diff --git a/Source/Engine/Audio/AudioBackend.h b/Source/Engine/Audio/AudioBackend.h index bce418beb..6f94df859 100644 --- a/Source/Engine/Audio/AudioBackend.h +++ b/Source/Engine/Audio/AudioBackend.h @@ -16,6 +16,13 @@ class AudioBackend public: + enum class FeatureFlags + { + None = 0, + // Supports multi-channel (incl. stereo) audio playback for spatial sources (3D), otherwise 3d audio needs to be in mono format. + SpatialMultiChannel = 1, + }; + static AudioBackend* Instance; private: @@ -56,6 +63,7 @@ private: // Base virtual const Char* Base_Name() = 0; + virtual FeatureFlags Base_Features() = 0; virtual void Base_OnActiveDeviceChanged() = 0; virtual void Base_SetDopplerFactor(float value) = 0; virtual void Base_SetVolume(float value) = 0; @@ -232,6 +240,11 @@ public: return Instance->Base_Name(); } + FORCE_INLINE static FeatureFlags Features() + { + return Instance->Base_Features(); + } + FORCE_INLINE static void OnActiveDeviceChanged() { Instance->Base_OnActiveDeviceChanged(); diff --git a/Source/Engine/Audio/AudioClip.cpp b/Source/Engine/Audio/AudioClip.cpp index 68f2e3471..d91eb358c 100644 --- a/Source/Engine/Audio/AudioClip.cpp +++ b/Source/Engine/Audio/AudioClip.cpp @@ -32,7 +32,6 @@ bool AudioClip::StreamingTask::Run() { const auto idx = queue[i]; uint32& bufferId = clip->Buffers[idx]; - if (bufferId == AUDIO_BUFFER_ID_INVALID) { AudioBackend::Buffer::Create(bufferId); @@ -45,64 +44,13 @@ bool AudioClip::StreamingTask::Run() } } - // Load missing buffers data - const auto format = clip->Format(); - AudioDataInfo info = clip->AudioHeader.Info; - const uint32 bytesPerSample = info.BitDepth / 8; + // Load missing buffers data (from asset chunks) for (int32 i = 0; i < queue.Count(); i++) { - const auto idx = queue[i]; - const uint32 bufferId = clip->Buffers[idx]; - if (bufferId == AUDIO_BUFFER_ID_INVALID) - continue; - - byte* data; - uint32 dataSize; - Array outTmp; - - const auto chunk = clip->GetChunk(idx); - if (chunk == nullptr || chunk->IsMissing()) + if (clip->WriteBuffer(queue[i])) { - LOG(Warning, "Missing audio streaming data chunk."); return true; } - - // Get raw data or decompress it - switch (format) - { - case AudioFormat::Vorbis: - { -#if COMPILE_WITH_OGG_VORBIS - OggVorbisDecoder decoder; - MemoryReadStream stream(chunk->Get(), chunk->Size()); - AudioDataInfo outInfo; - if (decoder.Convert(&stream, outInfo, outTmp)) - { - LOG(Warning, "Audio data decode failed (OggVorbisDecoder)."); - return true; - } - // TODO: validate decompressed data header info? - data = outTmp.Get(); - dataSize = outTmp.Count(); -#else - LOG(Warning, "OggVorbisDecoder is disabled."); - return true; -#endif - } - break; - case AudioFormat::Raw: - { - data = chunk->Get(); - dataSize = chunk->Size(); - } - break; - default: - return true; - } - - // Write samples to the audio buffer - info.NumSamples = dataSize / bytesPerSample; - AudioBackend::Buffer::Write(bufferId, data, info); } // Update the sources @@ -360,7 +308,7 @@ bool AudioClip::init(AssetInitData& initData) } if (initData.CustomData.Length() != sizeof(AudioHeader)) { - LOG(Warning, "Missing audio clip header."); + LOG(Warning, "Missing audio data."); return true; } @@ -420,44 +368,15 @@ Asset::LoadResult AudioClip::load() // Load the whole audio at once if (LoadChunk(0)) return LoadResult::CannotLoadData; - auto chunk0 = GetChunk(0); - if (chunk0 == nullptr || chunk0->IsMissing()) - return LoadResult::MissingDataChunk; // Create single buffer - if (!AudioBackend::Instance) - return LoadResult::Failed; uint32 bufferId; AudioBackend::Buffer::Create(bufferId); Buffers[0] = bufferId; - // Write samples to the audio buffer - switch (AudioHeader.Format) - { - case AudioFormat::Vorbis: - { -#if COMPILE_WITH_OGG_VORBIS - OggVorbisDecoder decoder; - MemoryReadStream stream(chunk0->Get(), chunk0->Size()); - AudioDataInfo outInfo; - Array outTmp; - if (decoder.Convert(&stream, outInfo, outTmp)) - { - LOG(Warning, "Audio data decode failed (OggVorbisDecoder)."); - return LoadResult::InvalidData; - } - AudioBackend::Buffer::Write(bufferId, outTmp.Get(), outInfo); -#endif - break; - } - case AudioFormat::Raw: - { - AudioBackend::Buffer::Write(bufferId, chunk0->Get(), AudioHeader.Info); - break; - } - default: - return LoadResult::InvalidData; - } + // Write data to audio buffer + if (WriteBuffer(0)) + return LoadResult::Failed; return LoadResult::Ok; } @@ -493,3 +412,74 @@ void AudioClip::unload(bool isReloading) _totalChunks = 0; Platform::MemoryClear(&AudioHeader, sizeof(AudioHeader)); } + +bool AudioClip::WriteBuffer(int32 chunkIndex) +{ + // Ignore if buffer is not created + const uint32 bufferId = Buffers[chunkIndex]; + if (bufferId == AUDIO_BUFFER_ID_INVALID) + return false; + + // Ensure audio backend exists + if (AudioBackend::Instance == nullptr) + return true; + + const auto chunk = GetChunk(chunkIndex); + if (chunk == nullptr || chunk->IsMissing()) + { + LOG(Warning, "Missing audio data."); + return true; + } + Span data; + Array tmp1, tmp2; + AudioDataInfo info = AudioHeader.Info; + const uint32 bytesPerSample = info.BitDepth / 8; + + // Get raw data or decompress it + switch (Format()) + { + case AudioFormat::Vorbis: + { +#if COMPILE_WITH_OGG_VORBIS + OggVorbisDecoder decoder; + MemoryReadStream stream(chunk->Get(), chunk->Size()); + AudioDataInfo tmpInfo; + if (decoder.Convert(&stream, tmpInfo, tmp1)) + { + LOG(Warning, "Audio data decode failed (OggVorbisDecoder)."); + return true; + } + // TODO: validate decompressed data header info? + data = Span(tmp1.Get(), tmp1.Count()); +#else + LOG(Warning, "OggVorbisDecoder is disabled."); + return true; +#endif + } + break; + case AudioFormat::Raw: + { + data = Span(chunk->Get(), chunk->Size()); + } + break; + default: + return true; + } + info.NumSamples = data.Length() / bytesPerSample; + + // Convert to Mono if used as 3D source and backend doesn't support it + if (Is3D() && info.NumChannels > 1 && EnumHasNoneFlags(AudioBackend::Features(), AudioBackend::FeatureFlags::SpatialMultiChannel)) + { + const uint32 samplesPerChannel = info.NumSamples / info.NumChannels; + const uint32 monoBufferSize = samplesPerChannel * bytesPerSample; + tmp2.Resize(monoBufferSize); + AudioTool::ConvertToMono(data.Get(), tmp2.Get(), info.BitDepth, samplesPerChannel, info.NumChannels); + info.NumChannels = 1; + info.NumSamples = samplesPerChannel; + data = Span(tmp2.Get(), tmp2.Count()); + } + + // Write samples to the audio buffer + AudioBackend::Buffer::Write(bufferId, data.Get(), info); + return false; +} diff --git a/Source/Engine/Audio/AudioClip.h b/Source/Engine/Audio/AudioClip.h index a8f064afe..3cf9029f6 100644 --- a/Source/Engine/Audio/AudioClip.h +++ b/Source/Engine/Audio/AudioClip.h @@ -17,9 +17,9 @@ class AudioSource; /// API_CLASS(NoSpawn) class FLAXENGINE_API AudioClip : public BinaryAsset, public StreamableResource { -DECLARE_BINARY_ASSET_HEADER(AudioClip, 2); -public: + DECLARE_BINARY_ASSET_HEADER(AudioClip, 2); +public: /// /// Audio Clip resource header structure, version 2. Added on 08.08.2019. /// @@ -40,12 +40,10 @@ public: class StreamingTask : public ThreadPoolTask { private: - WeakAssetReference _asset; FlaxStorage::LockData _dataLock; public: - /// /// Init /// @@ -57,7 +55,6 @@ public: } public: - // [ThreadPoolTask] bool HasReference(Object* resource) const override { @@ -65,28 +62,24 @@ public: } protected: - // [ThreadPoolTask] bool Run() override; void OnEnd() override; }; private: - int32 _totalChunks; int32 _totalChunksSize; StreamingTask* _streamingTask; float _buffersStartTimes[ASSET_FILE_DATA_CHUNKS + 1]; public: - /// /// Finalizes an instance of the class. /// ~AudioClip(); public: - /// /// The audio clip header data. /// @@ -103,7 +96,6 @@ public: Array> StreamingQueue; public: - /// /// Gets the audio data format. /// @@ -153,7 +145,6 @@ public: } public: - /// /// Gets the buffer start time (in seconds). /// @@ -170,7 +161,6 @@ public: int32 GetFirstBufferIndex(float time, float& offset) const; public: - /// /// Extracts the source audio data from the asset storage. Loads the whole asset. The result data is in an asset format. /// @@ -196,10 +186,9 @@ public: API_FUNCTION() bool ExtractDataRaw(API_PARAM(Out) Array& resultData, API_PARAM(Out) AudioDataInfo& resultDataInfo); public: - // [BinaryAsset] void CancelStreaming() override; - + // [StreamableResource] int32 GetMaxResidency() const override; int32 GetCurrentResidency() const override; @@ -210,9 +199,12 @@ public: void CancelStreamingTasks() override; protected: - // [BinaryAsset] bool init(AssetInitData& initData) override; LoadResult load() override; void unload(bool isReloading) override; + +private: + // Writes audio samples into Audio Backend buffer and handles automatic decompression or format conversion for runtime playback. + bool WriteBuffer(int32 chunkIndex); }; diff --git a/Source/Engine/Audio/AudioSource.cpp b/Source/Engine/Audio/AudioSource.cpp index f3c335573..dae6aaa4a 100644 --- a/Source/Engine/Audio/AudioSource.cpp +++ b/Source/Engine/Audio/AudioSource.cpp @@ -20,6 +20,7 @@ AudioSource::AudioSource(const SpawnParams& params) , _attenuation(1.0f) , _loop(false) , _playOnStart(false) + , _allowSpatialization(true) { Clip.Changed.Bind(this); Clip.Loaded.Bind(this); @@ -77,7 +78,15 @@ void AudioSource::SetAttenuation(float value) if (Math::NearEqual(_attenuation, value)) return; _attenuation = value; + if (SourceIDs.HasItems()) + AudioBackend::Source::SpatialSetupChanged(this); +} +void AudioSource::SetAllowSpatialization(bool value) +{ + if (_allowSpatialization == value) + return; + _allowSpatialization = value; if (SourceIDs.HasItems()) AudioBackend::Source::SpatialSetupChanged(this); } @@ -213,7 +222,7 @@ bool AudioSource::Is3D() const { if (Clip == nullptr || Clip->WaitForLoaded()) return false; - return Clip->Is3D(); + return _allowSpatialization && Clip->Is3D(); } void AudioSource::RequestStreamingBuffersUpdate() @@ -325,6 +334,7 @@ void AudioSource::Serialize(SerializeStream& stream, const void* otherObj) SERIALIZE_MEMBER(Attenuation, _attenuation); SERIALIZE_MEMBER(Loop, _loop); SERIALIZE_MEMBER(PlayOnStart, _playOnStart); + SERIALIZE_MEMBER(AllowSpatialization, _allowSpatialization); } void AudioSource::Deserialize(DeserializeStream& stream, ISerializeModifier* modifier) @@ -339,6 +349,7 @@ void AudioSource::Deserialize(DeserializeStream& stream, ISerializeModifier* mod DESERIALIZE_MEMBER(Attenuation, _attenuation); DESERIALIZE_MEMBER(Loop, _loop); DESERIALIZE_MEMBER(PlayOnStart, _playOnStart); + DESERIALIZE_MEMBER(AllowSpatialization, _allowSpatialization); } bool AudioSource::HasContentLoaded() const diff --git a/Source/Engine/Audio/AudioSource.h b/Source/Engine/Audio/AudioSource.h index 0ed2c87a0..8057fbe23 100644 --- a/Source/Engine/Audio/AudioSource.h +++ b/Source/Engine/Audio/AudioSource.h @@ -50,6 +50,7 @@ private: float _attenuation; bool _loop; bool _playOnStart; + bool _allowSpatialization; bool _isActuallyPlayingSth = false; bool _needToUpdateStreamingBuffers = false; @@ -163,6 +164,20 @@ public: /// API_PROPERTY() void SetAttenuation(float value); + /// + /// If checked, source can play spatial 3d audio (when audio clip supports it), otherwise will always play as 2d sound. At 0, no distance attenuation ever occurs. + /// + API_PROPERTY(Attributes="EditorOrder(80), DefaultValue(true), EditorDisplay(\"Audio Source\")") + FORCE_INLINE bool GetAllowSpatialization() const + { + return _allowSpatialization; + } + + /// + /// If checked, source can play spatial 3d audio (when audio clip supports it), otherwise will always play as 2d sound. + /// + API_PROPERTY() void SetAllowSpatialization(bool value); + public: /// /// Starts playing the currently assigned audio clip. diff --git a/Source/Engine/Audio/None/AudioBackendNone.cpp b/Source/Engine/Audio/None/AudioBackendNone.cpp index a1a747763..f35ffd0c8 100644 --- a/Source/Engine/Audio/None/AudioBackendNone.cpp +++ b/Source/Engine/Audio/None/AudioBackendNone.cpp @@ -128,6 +128,11 @@ const Char* AudioBackendNone::Base_Name() return TEXT("None"); } +AudioBackend::FeatureFlags AudioBackendNone::Base_Features() +{ + return FeatureFlags::None; +} + void AudioBackendNone::Base_OnActiveDeviceChanged() { } diff --git a/Source/Engine/Audio/None/AudioBackendNone.h b/Source/Engine/Audio/None/AudioBackendNone.h index 2b6bb9606..59bc076c7 100644 --- a/Source/Engine/Audio/None/AudioBackendNone.h +++ b/Source/Engine/Audio/None/AudioBackendNone.h @@ -43,6 +43,7 @@ public: void Buffer_Delete(uint32& bufferId) override; void Buffer_Write(uint32 bufferId, byte* samples, const AudioDataInfo& info) override; const Char* Base_Name() override; + FeatureFlags Base_Features() override; void Base_OnActiveDeviceChanged() override; void Base_SetDopplerFactor(float value) override; void Base_SetVolume(float value) override; diff --git a/Source/Engine/Audio/OpenAL/AudioBackendOAL.cpp b/Source/Engine/Audio/OpenAL/AudioBackendOAL.cpp index 813f25ab1..68c3a8c60 100644 --- a/Source/Engine/Audio/OpenAL/AudioBackendOAL.cpp +++ b/Source/Engine/Audio/OpenAL/AudioBackendOAL.cpp @@ -60,6 +60,7 @@ namespace ALC { ALCdevice* Device = nullptr; Array> Contexts; + AudioBackend::FeatureFlags Features = AudioBackend::FeatureFlags::None; bool IsExtensionSupported(const char* extension) { @@ -146,6 +147,9 @@ namespace ALC alSourcei(sourceID, AL_BUFFER, 0); if (is3D) { +#ifdef AL_SOFT_source_spatialize + alSourcei(sourceID, AL_SOURCE_SPATIALIZE_SOFT, AL_TRUE); +#endif alSourcef(sourceID, AL_ROLLOFF_FACTOR, source->GetAttenuation()); alSourcef(sourceID, AL_REFERENCE_DISTANCE, FLAX_DST_TO_OAL(source->GetMinDistance())); alSource3f(sourceID, AL_POSITION, FLAX_POS_TO_OAL(source->GetPosition())); @@ -683,6 +687,11 @@ const Char* AudioBackendOAL::Base_Name() return TEXT("OpenAL"); } +AudioBackend::FeatureFlags AudioBackendOAL::Base_Features() +{ + return ALC::Features; +} + void AudioBackendOAL::Base_OnActiveDeviceChanged() { // Cleanup @@ -820,6 +829,10 @@ bool AudioBackendOAL::Base_Init() alDistanceModel(AL_INVERSE_DISTANCE_CLAMPED); // Default attenuation model ALC::RebuildContexts(true); Audio::SetActiveDeviceIndex(activeDeviceIndex); +#ifdef AL_SOFT_source_spatialize + if (ALC::IsExtensionSupported("AL_SOFT_source_spatialize")) + ALC::Features = (FeatureFlags)((uint32)ALC::Features | (uint32)FeatureFlags::SpatialMultiChannel); +#endif // Log service info LOG(Info, "{0} ({1})", String(alGetString(AL_RENDERER)), String(alGetString(AL_VERSION))); diff --git a/Source/Engine/Audio/OpenAL/AudioBackendOAL.h b/Source/Engine/Audio/OpenAL/AudioBackendOAL.h index 2f6fed237..82844a7a2 100644 --- a/Source/Engine/Audio/OpenAL/AudioBackendOAL.h +++ b/Source/Engine/Audio/OpenAL/AudioBackendOAL.h @@ -43,6 +43,7 @@ public: void Buffer_Delete(uint32& bufferId) override; void Buffer_Write(uint32 bufferId, byte* samples, const AudioDataInfo& info) override; const Char* Base_Name() override; + FeatureFlags Base_Features() override; void Base_OnActiveDeviceChanged() override; void Base_SetDopplerFactor(float value) override; void Base_SetVolume(float value) override; diff --git a/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.cpp b/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.cpp index 2d4fe5841..2bdd88000 100644 --- a/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.cpp +++ b/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.cpp @@ -368,7 +368,7 @@ void AudioBackendXAudio2::Source_OnAdd(AudioSource* source) auto& header = clip->AudioHeader; auto& format = aSource->Format; format.wFormatTag = WAVE_FORMAT_PCM; - format.nChannels = header.Info.NumChannels; + format.nChannels = source->Is3D() ? 1 : header.Info.NumChannels; // 3d audio is always mono (AudioClip auto-converts before buffer write) format.nSamplesPerSec = header.Info.SampleRate; format.wBitsPerSample = header.Info.BitDepth; format.nBlockAlign = (WORD)(format.nChannels * (format.wBitsPerSample / 8)); @@ -394,7 +394,7 @@ void AudioBackendXAudio2::Source_OnAdd(AudioSource* source) // Prepare source state aSource->Callback.Source = source; aSource->IsDirty = true; - aSource->Data.ChannelCount = header.Info.NumChannels; + aSource->Data.ChannelCount = format.nChannels; aSource->Data.InnerRadius = FLAX_DST_TO_XAUDIO(source->GetMinDistance()); aSource->Is3D = source->Is3D(); aSource->Pitch = source->GetPitch(); @@ -591,7 +591,8 @@ float AudioBackendXAudio2::Source_GetCurrentBufferTime(const AudioSource* source const auto& clipInfo = source->Clip->AudioHeader.Info; XAUDIO2_VOICE_STATE state; aSource->Voice->GetState(&state); - const UINT32 totalSamples = clipInfo.NumSamples / clipInfo.NumChannels; + const uint32 numChannels = source->Is3D() ? 1 : clipInfo.NumChannels; // 3d audio is always mono (AudioClip auto-converts before buffer write) + const UINT32 totalSamples = clipInfo.NumSamples / numChannels; state.SamplesPlayed -= aSource->LastBufferStartSamplesPlayed % totalSamples; // Offset by the last buffer start to get time relative to its begin time = aSource->StartTime + (state.SamplesPlayed % totalSamples) / static_cast(Math::Max(1U, clipInfo.SampleRate)); } @@ -716,6 +717,11 @@ const Char* AudioBackendXAudio2::Base_Name() return TEXT("XAudio2"); } +AudioBackend::FeatureFlags AudioBackendXAudio2::Base_Features() +{ + return FeatureFlags::None; +} + void AudioBackendXAudio2::Base_OnActiveDeviceChanged() { } diff --git a/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.h b/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.h index 23e43be42..9fff0af03 100644 --- a/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.h +++ b/Source/Engine/Audio/XAudio2/AudioBackendXAudio2.h @@ -43,6 +43,7 @@ public: void Buffer_Delete(uint32& bufferId) override; void Buffer_Write(uint32 bufferId, byte* samples, const AudioDataInfo& info) override; const Char* Base_Name() override; + FeatureFlags Base_Features() override; void Base_OnActiveDeviceChanged() override; void Base_SetDopplerFactor(float value) override; void Base_SetVolume(float value) override; diff --git a/Source/Engine/ContentImporters/ImportAudio.cpp b/Source/Engine/ContentImporters/ImportAudio.cpp index 73556dc26..a9955a4d4 100644 --- a/Source/Engine/ContentImporters/ImportAudio.cpp +++ b/Source/Engine/ContentImporters/ImportAudio.cpp @@ -114,22 +114,6 @@ CreateAssetResult ImportAudio::Import(CreateAssetContext& context, AudioDecoder& DataContainer sampleBuffer; sampleBuffer.Link(audioData.Get()); - // Convert to Mono if used as 3D source - if (options.Is3D && info.NumChannels > 1) - { - const uint32 numSamplesPerChannel = info.NumSamples / info.NumChannels; - - const uint32 monoBufferSize = numSamplesPerChannel * bytesPerSample; - sampleBuffer.Allocate(monoBufferSize); - - AudioTool::ConvertToMono(audioData.Get(), sampleBuffer.Get(), info.BitDepth, numSamplesPerChannel, info.NumChannels); - - info.NumSamples = numSamplesPerChannel; - info.NumChannels = 1; - - bufferSize = monoBufferSize; - } - // Convert bit depth if need to if (options.BitDepth != static_cast(info.BitDepth)) { diff --git a/Source/Engine/Graphics/Textures/StreamingTexture.cpp b/Source/Engine/Graphics/Textures/StreamingTexture.cpp index 4793d340e..409125838 100644 --- a/Source/Engine/Graphics/Textures/StreamingTexture.cpp +++ b/Source/Engine/Graphics/Textures/StreamingTexture.cpp @@ -404,7 +404,7 @@ protected: if (_streamingTexture) { // Stop streaming on fail - _streamingTexture->CancelStreaming(); + _streamingTexture->ResetStreaming(); } GPUUploadTextureMipTask::OnFail(); diff --git a/Source/Engine/Streaming/StreamableResource.h b/Source/Engine/Streaming/StreamableResource.h index ae44eb442..866ec8469 100644 --- a/Source/Engine/Streaming/StreamableResource.h +++ b/Source/Engine/Streaming/StreamableResource.h @@ -131,7 +131,7 @@ public: /// /// Stops the streaming (eg. on streaming fail). /// - void CancelStreaming(); + void ResetStreaming(); protected: diff --git a/Source/Engine/Streaming/Streaming.cpp b/Source/Engine/Streaming/Streaming.cpp index 77ac7a49c..19b845fb1 100644 --- a/Source/Engine/Streaming/Streaming.cpp +++ b/Source/Engine/Streaming/Streaming.cpp @@ -84,7 +84,7 @@ void StreamableResource::RequestStreamingUpdate() Streaming.LastUpdate = 0; } -void StreamableResource::CancelStreaming() +void StreamableResource::ResetStreaming() { Streaming.TargetResidency = 0; Streaming.LastUpdate = DateTime::MaxValue().Ticks; @@ -169,7 +169,7 @@ void UpdateResource(StreamableResource* resource, DateTime now, double currentTi else if (resource->GetAllocatedResidency() < targetResidency) { // Allocation failed (eg. texture format is not supported or run out of memory) - resource->CancelStreaming(); + resource->ResetStreaming(); return; } } diff --git a/Source/Engine/Tools/AudioTool/AudioTool.cpp b/Source/Engine/Tools/AudioTool/AudioTool.cpp index 2a047117b..a5a5d4663 100644 --- a/Source/Engine/Tools/AudioTool/AudioTool.cpp +++ b/Source/Engine/Tools/AudioTool/AudioTool.cpp @@ -4,6 +4,11 @@ #include "Engine/Core/Core.h" #include "Engine/Core/Memory/Allocation.h" +#define CONVERT_TO_MONO_AVG 1 +#if !CONVERT_TO_MONO_AVG +#include "Engine/Core/Math/Math.h" +#endif + void ConvertToMono8(const int8* input, uint8* output, uint32 numSamples, uint32 numChannels) { for (uint32 i = 0; i < numSamples; i++) @@ -15,7 +20,11 @@ void ConvertToMono8(const int8* input, uint8* output, uint32 numSamples, uint32 ++input; } - *output = sum / numChannels; +#if CONVERT_TO_MONO_AVG + *output = (uint8)(sum / numChannels); +#else + *output = (uint8)Math::Clamp(sum, 0, MAX_uint8); +#endif ++output; } } @@ -31,7 +40,11 @@ void ConvertToMono16(const int16* input, int16* output, uint32 numSamples, uint3 ++input; } - *output = sum / numChannels; +#if CONVERT_TO_MONO_AVG + *output = (int16)(sum / numChannels); +#else + *output = (int16)Math::Clamp(sum, MIN_int16, MAX_int16); +#endif ++output; } } @@ -55,8 +68,12 @@ void ConvertToMono24(const uint8* input, uint8* output, uint32 numSamples, uint3 input += 3; } - const int32 avg = (int32)(sum / numChannels); - Convert32To24Bits(avg, output); +#if CONVERT_TO_MONO_AVG + const int32 val = (int32)(sum / numChannels); +#else + const int32 val = (int32)Math::Clamp(sum, MIN_int16, MAX_int16); +#endif + Convert32To24Bits(val, output); output += 3; } } @@ -72,7 +89,11 @@ void ConvertToMono32(const int32* input, int32* output, uint32 numSamples, uint3 ++input; } +#if CONVERT_TO_MONO_AVG *output = (int32)(sum / numChannels); +#else + *output = (int32)Math::Clamp(sum, MIN_int16, MAX_int16); +#endif ++output; } }