29template <
typename Element>
33 explicit Queue (
int size)
34 : fifo (size), storage (static_cast<size_t> (size)) {}
36 bool push (Element& element)
noexcept
41 const auto writer = fifo.
write (1);
43 if (writer.blockSize1 != 0)
44 storage[
static_cast<size_t> (writer.startIndex1)] = std::move (element);
45 else if (writer.blockSize2 != 0)
46 storage[
static_cast<size_t> (writer.startIndex2)] = std::move (element);
51 template <
typename Fn>
52 void pop (Fn&& fn) { popN (1, std::forward<Fn> (fn)); }
54 template <
typename Fn>
55 void popAll (Fn&& fn) { popN (fifo.
getNumReady(), std::forward<Fn> (fn)); }
57 bool hasPendingMessages() const noexcept {
return fifo.
getNumReady() > 0; }
60 template <
typename Fn>
61 void popN (
int n, Fn&& fn)
65 fn (storage[
static_cast<size_t> (index)]);
70 std::vector<Element> storage;
73class BackgroundMessageQueue :
private Thread
76 explicit BackgroundMessageQueue (
int entries)
77 :
Thread (
"Convolution background loader"), queue (entries)
80 using IncomingCommand = FixedSizeFunction<400, void()>;
85 bool push (IncomingCommand& command) {
return queue.push (command); }
89 const ScopedLock lock (popMutex);
90 queue.popAll ([] (IncomingCommand& command) { command(); command =
nullptr; });
101 const auto tryPop = [&]
103 const ScopedLock lock (popMutex);
105 if (! queue.hasPendingMessages())
108 queue.pop ([] (IncomingCommand& command) { command(); command =
nullptr;});
117 CriticalSection popMutex;
118 Queue<IncomingCommand> queue;
120 JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (BackgroundMessageQueue)
123struct ConvolutionMessageQueue::Impl :
public BackgroundMessageQueue
125 using BackgroundMessageQueue::BackgroundMessageQueue;
133 : pimpl (std::make_unique<Impl> (entries))
135 pimpl->startThread();
138ConvolutionMessageQueue::~ConvolutionMessageQueue() noexcept
140 pimpl->stopThread (-1);
144ConvolutionMessageQueue& ConvolutionMessageQueue::operator= (ConvolutionMessageQueue&&) noexcept = default;
147struct ConvolutionEngine
149 ConvolutionEngine (
const float* samples,
152 : blockSize ((size_t) nextPowerOfTwo ((int) maxBlockSize)),
153 fftSize (blockSize > 128 ? 2 * blockSize : 4 * blockSize),
154 fftObject (std::make_unique<FFT> (roundToInt (std::log2 (fftSize)))),
155 numSegments (numSamples / (fftSize - blockSize) + 1u),
156 numInputSegments ((blockSize > 128 ? numSegments : 3 * numSegments)),
157 bufferInput (1, static_cast<int> (fftSize)),
158 bufferOutput (1, static_cast<int> (fftSize * 2)),
159 bufferTempOutput (1, static_cast<int> (fftSize * 2)),
160 bufferOverlap (1, static_cast<int> (fftSize))
162 bufferOutput.clear();
164 auto updateSegmentsIfNecessary = [
this] (
size_t numSegmentsToUpdate,
165 std::vector<AudioBuffer<float>>& segments)
167 if (numSegmentsToUpdate == 0
168 || numSegmentsToUpdate != (
size_t) segments.size()
169 || (
size_t) segments[0].getNumSamples() != fftSize * 2)
173 for (
size_t i = 0; i < numSegmentsToUpdate; ++i)
174 segments.push_back ({ 1, static_cast<int> (fftSize * 2) });
178 updateSegmentsIfNecessary (numInputSegments, buffersInputSegments);
179 updateSegmentsIfNecessary (numSegments, buffersImpulseSegments);
181 auto FFTTempObject = std::make_unique<FFT> (roundToInt (std::log2 (fftSize)));
182 size_t currentPtr = 0;
184 for (
auto& buf : buffersImpulseSegments)
188 auto* impulseResponse = buf.getWritePointer (0);
190 if (&buf == &buffersImpulseSegments.front())
191 impulseResponse[0] = 1.0f;
193 FloatVectorOperations::copy (impulseResponse,
194 samples + currentPtr,
195 static_cast<int> (jmin (fftSize - blockSize, numSamples - currentPtr)));
197 FFTTempObject->performRealOnlyForwardTransform (impulseResponse);
198 prepareForConvolution (impulseResponse);
200 currentPtr += (fftSize - blockSize);
209 bufferOverlap.clear();
210 bufferTempOutput.clear();
211 bufferOutput.clear();
213 for (
auto& buf : buffersInputSegments)
220 void processSamples (
const float* input,
float* output,
size_t numSamples)
223 size_t numSamplesProcessed = 0;
225 auto indexStep = numInputSegments / numSegments;
227 auto* inputData = bufferInput.getWritePointer (0);
228 auto* outputTempData = bufferTempOutput.getWritePointer (0);
229 auto* outputData = bufferOutput.getWritePointer (0);
230 auto* overlapData = bufferOverlap.getWritePointer (0);
232 while (numSamplesProcessed < numSamples)
234 const bool inputDataWasEmpty = (inputDataPos == 0);
235 auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
237 FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed,
static_cast<int> (numSamplesToProcess));
239 auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
240 FloatVectorOperations::copy (inputSegmentData, inputData,
static_cast<int> (fftSize));
242 fftObject->performRealOnlyForwardTransform (inputSegmentData);
243 prepareForConvolution (inputSegmentData);
246 if (inputDataWasEmpty)
248 FloatVectorOperations::fill (outputTempData, 0,
static_cast<int> (fftSize + 1));
250 auto index = currentSegment;
252 for (
size_t i = 1; i < numSegments; ++i)
256 if (index >= numInputSegments)
257 index -= numInputSegments;
259 convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
260 buffersImpulseSegments[i].getWritePointer (0),
265 FloatVectorOperations::copy (outputData, outputTempData,
static_cast<int> (fftSize + 1));
267 convolutionProcessingAndAccumulate (inputSegmentData,
268 buffersImpulseSegments.front().getWritePointer (0),
271 updateSymmetricFrequencyDomainData (outputData);
272 fftObject->performRealOnlyInverseTransform (outputData);
275 FloatVectorOperations::add (&output[numSamplesProcessed], &outputData[inputDataPos], &overlapData[inputDataPos], (
int) numSamplesToProcess);
278 inputDataPos += numSamplesToProcess;
280 if (inputDataPos == blockSize)
283 FloatVectorOperations::fill (inputData, 0.0f,
static_cast<int> (fftSize));
288 FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]),
static_cast<int> (fftSize - 2 * blockSize));
291 FloatVectorOperations::copy (overlapData, &(outputData[blockSize]),
static_cast<int> (fftSize - blockSize));
293 currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
296 numSamplesProcessed += numSamplesToProcess;
300 void processSamplesWithAddedLatency (
const float* input,
float* output,
size_t numSamples)
303 size_t numSamplesProcessed = 0;
305 auto indexStep = numInputSegments / numSegments;
307 auto* inputData = bufferInput.getWritePointer (0);
308 auto* outputTempData = bufferTempOutput.getWritePointer (0);
309 auto* outputData = bufferOutput.getWritePointer (0);
310 auto* overlapData = bufferOverlap.getWritePointer (0);
312 while (numSamplesProcessed < numSamples)
314 auto numSamplesToProcess = jmin (numSamples - numSamplesProcessed, blockSize - inputDataPos);
316 FloatVectorOperations::copy (inputData + inputDataPos, input + numSamplesProcessed,
static_cast<int> (numSamplesToProcess));
318 FloatVectorOperations::copy (output + numSamplesProcessed, outputData + inputDataPos,
static_cast<int> (numSamplesToProcess));
320 numSamplesProcessed += numSamplesToProcess;
321 inputDataPos += numSamplesToProcess;
324 if (inputDataPos == blockSize)
327 auto* inputSegmentData = buffersInputSegments[currentSegment].getWritePointer (0);
328 FloatVectorOperations::copy (inputSegmentData, inputData,
static_cast<int> (fftSize));
330 fftObject->performRealOnlyForwardTransform (inputSegmentData);
331 prepareForConvolution (inputSegmentData);
334 FloatVectorOperations::fill (outputTempData, 0,
static_cast<int> (fftSize + 1));
336 auto index = currentSegment;
338 for (
size_t i = 1; i < numSegments; ++i)
342 if (index >= numInputSegments)
343 index -= numInputSegments;
345 convolutionProcessingAndAccumulate (buffersInputSegments[index].getWritePointer (0),
346 buffersImpulseSegments[i].getWritePointer (0),
350 FloatVectorOperations::copy (outputData, outputTempData,
static_cast<int> (fftSize + 1));
352 convolutionProcessingAndAccumulate (inputSegmentData,
353 buffersImpulseSegments.front().getWritePointer (0),
356 updateSymmetricFrequencyDomainData (outputData);
357 fftObject->performRealOnlyInverseTransform (outputData);
360 FloatVectorOperations::add (outputData, overlapData,
static_cast<int> (blockSize));
363 FloatVectorOperations::fill (inputData, 0.0f,
static_cast<int> (fftSize));
366 FloatVectorOperations::add (&(outputData[blockSize]), &(overlapData[blockSize]),
static_cast<int> (fftSize - 2 * blockSize));
369 FloatVectorOperations::copy (overlapData, &(outputData[blockSize]),
static_cast<int> (fftSize - blockSize));
371 currentSegment = (currentSegment > 0) ? (currentSegment - 1) : (numInputSegments - 1);
379 void prepareForConvolution (
float *samples)
noexcept
381 auto FFTSizeDiv2 = fftSize / 2;
383 for (
size_t i = 0; i < FFTSizeDiv2; i++)
384 samples[i] = samples[i << 1];
386 samples[FFTSizeDiv2] = 0;
388 for (
size_t i = 1; i < FFTSizeDiv2; i++)
389 samples[i + FFTSizeDiv2] = -samples[((fftSize - i) << 1) + 1];
393 void convolutionProcessingAndAccumulate (
const float *input,
const float *impulse,
float *output)
395 auto FFTSizeDiv2 = fftSize / 2;
397 FloatVectorOperations::addWithMultiply (output, input, impulse,
static_cast<int> (FFTSizeDiv2));
398 FloatVectorOperations::subtractWithMultiply (output, &(input[FFTSizeDiv2]), &(impulse[FFTSizeDiv2]),
static_cast<int> (FFTSizeDiv2));
400 FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), input, &(impulse[FFTSizeDiv2]),
static_cast<int> (FFTSizeDiv2));
401 FloatVectorOperations::addWithMultiply (&(output[FFTSizeDiv2]), &(input[FFTSizeDiv2]), impulse,
static_cast<int> (FFTSizeDiv2));
403 output[fftSize] += input[fftSize] * impulse[fftSize];
409 void updateSymmetricFrequencyDomainData (
float* samples)
noexcept
411 auto FFTSizeDiv2 = fftSize / 2;
413 for (
size_t i = 1; i < FFTSizeDiv2; i++)
415 samples[(fftSize - i) << 1] = samples[i];
416 samples[((fftSize - i) << 1) + 1] = -samples[FFTSizeDiv2 + i];
421 for (
size_t i = 1; i < FFTSizeDiv2; i++)
423 samples[i << 1] = samples[(fftSize - i) << 1];
424 samples[(i << 1) + 1] = -samples[((fftSize - i) << 1) + 1];
429 const size_t blockSize;
430 const size_t fftSize;
431 const std::unique_ptr<FFT> fftObject;
432 const size_t numSegments;
433 const size_t numInputSegments;
434 size_t currentSegment = 0, inputDataPos = 0;
436 AudioBuffer<float> bufferInput, bufferOutput, bufferTempOutput, bufferOverlap;
437 std::vector<AudioBuffer<float>> buffersInputSegments, buffersImpulseSegments;
441class MultichannelEngine
444 MultichannelEngine (
const AudioBuffer<float>& buf,
447 Convolution::NonUniform headSizeIn,
449 : tailBuffer (1, maxBlockSize),
450 latency (isZeroDelayIn ? 0 : maxBufferSize),
451 irSize (buf.getNumSamples()),
452 blockSize (maxBlockSize),
453 isZeroDelay (isZeroDelayIn)
455 constexpr auto numChannels = 2;
457 const auto makeEngine = [&] (
int channel,
int offset,
int length, uint32 thisBlockSize)
459 return std::make_unique<ConvolutionEngine> (buf.getReadPointer (jmin (buf.getNumChannels() - 1, channel), offset),
461 static_cast<size_t> (thisBlockSize));
464 if (headSizeIn.headSizeInSamples == 0)
466 for (int i = 0; i < numChannels; ++i)
467 head.emplace_back (makeEngine (i, 0, buf.getNumSamples(), static_cast<uint32> (maxBufferSize)));
471 const auto size = jmin (buf.getNumSamples(), headSizeIn.headSizeInSamples);
473 for (int i = 0; i < numChannels; ++i)
474 head.emplace_back (makeEngine (i, 0, size, static_cast<uint32> (maxBufferSize)));
476 const auto tailBufferSize = static_cast<uint32> (headSizeIn.headSizeInSamples + (isZeroDelay ? 0 : maxBufferSize));
478 if (size != buf.getNumSamples())
479 for (int i = 0; i < numChannels; ++i)
480 tail.emplace_back (makeEngine (i, size, buf.getNumSamples() - size, tailBufferSize));
486 for (
const auto& e : head)
489 for (
const auto& e : tail)
493 void processSamples (
const AudioBlock<const float>& input, AudioBlock<float>& output)
495 const auto numChannels = jmin (head.size(), input.getNumChannels(), output.getNumChannels());
496 const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
498 const AudioBlock<float> fullTailBlock (tailBuffer);
499 const auto tailBlock = fullTailBlock.getSubBlock (0, (
size_t) numSamples);
501 const auto isUniform = tail.empty();
503 for (
size_t channel = 0; channel < numChannels; ++channel)
506 tail[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
507 tailBlock.getChannelPointer (0),
511 head[channel]->processSamples (input.getChannelPointer (channel),
512 output.getChannelPointer (channel),
515 head[channel]->processSamplesWithAddedLatency (input.getChannelPointer (channel),
516 output.getChannelPointer (channel),
520 output.getSingleChannelBlock (channel) += tailBlock;
523 const auto numOutputChannels = output.getNumChannels();
525 for (
auto i = numChannels; i < numOutputChannels; ++i)
526 output.getSingleChannelBlock (i).copyFrom (output.getSingleChannelBlock (0));
529 int getIRSize() const noexcept {
return irSize; }
530 int getLatency() const noexcept {
return latency; }
531 int getBlockSize() const noexcept {
return blockSize; }
534 std::vector<std::unique_ptr<ConvolutionEngine>> head, tail;
535 AudioBuffer<float> tailBuffer;
540 const bool isZeroDelay;
543static AudioBuffer<float> fixNumChannels (
const AudioBuffer<float>& buf, Convolution::Stereo stereo)
545 const auto numChannels = jmin (buf.getNumChannels(), stereo == Convolution::Stereo::yes ? 2 : 1);
546 const auto numSamples = buf.getNumSamples();
548 AudioBuffer<float> result (numChannels, buf.getNumSamples());
550 for (
auto channel = 0; channel != numChannels; ++channel)
551 result.copyFrom (channel, 0, buf.getReadPointer (channel), numSamples);
553 if (result.getNumSamples() == 0 || result.getNumChannels() == 0)
555 result.setSize (1, 1);
556 result.setSample (0, 0, 1.0f);
562static AudioBuffer<float> trimImpulseResponse (
const AudioBuffer<float>& buf)
566 const auto numChannels = buf.getNumChannels();
567 const auto numSamples = buf.getNumSamples();
569 std::ptrdiff_t offsetBegin = numSamples;
570 std::ptrdiff_t offsetEnd = numSamples;
572 for (
auto channel = 0; channel < numChannels; ++channel)
574 const auto indexAboveThreshold = [&] (
auto begin,
auto end)
576 return std::distance (begin, std::find_if (begin, end, [&] (
float sample)
578 return std::abs (sample) >= thresholdTrim;
582 const auto channelBegin = buf.getReadPointer (channel);
583 const auto channelEnd = channelBegin + numSamples;
584 const auto itStart = indexAboveThreshold (channelBegin, channelEnd);
585 const auto itEnd = indexAboveThreshold (std::make_reverse_iterator (channelEnd),
586 std::make_reverse_iterator (channelBegin));
588 offsetBegin = jmin (offsetBegin, itStart);
589 offsetEnd = jmin (offsetEnd, itEnd);
592 if (offsetBegin == numSamples)
594 auto result = AudioBuffer<float> (numChannels, 1);
599 const auto newLength = jmax (1, numSamples -
static_cast<int> (offsetBegin + offsetEnd));
601 AudioBuffer<float> result (numChannels, newLength);
603 for (
auto channel = 0; channel < numChannels; ++channel)
605 result.copyFrom (channel,
607 buf.getReadPointer (channel,
static_cast<int> (offsetBegin)),
608 result.getNumSamples());
614static float calculateNormalisationFactor (
float sumSquaredMagnitude)
616 if (sumSquaredMagnitude < 1e-8f)
619 return 0.125f / std::sqrt (sumSquaredMagnitude);
622static void normaliseImpulseResponse (AudioBuffer<float>& buf)
624 const auto numChannels = buf.getNumChannels();
625 const auto numSamples = buf.getNumSamples();
626 const auto channelPtrs = buf.getArrayOfWritePointers();
628 const auto maxSumSquaredMag = std::accumulate (channelPtrs, channelPtrs + numChannels, 0.0f, [numSamples] (
auto max,
auto* channel)
630 return jmax (max, std::accumulate (channel, channel + numSamples, 0.0f, [] (
auto sum,
auto samp)
632 return sum + (samp * samp);
636 const auto normalisationFactor = calculateNormalisationFactor (maxSumSquaredMag);
638 std::for_each (channelPtrs, channelPtrs + numChannels, [normalisationFactor, numSamples] (
auto* channel)
640 FloatVectorOperations::multiply (channel, normalisationFactor, numSamples);
644static AudioBuffer<float> resampleImpulseResponse (
const AudioBuffer<float>& buf,
645 const double srcSampleRate,
646 const double destSampleRate)
648 if (approximatelyEqual (srcSampleRate, destSampleRate))
651 const auto factorReading = srcSampleRate / destSampleRate;
653 AudioBuffer<float> original = buf;
654 MemoryAudioSource memorySource (original,
false);
655 ResamplingAudioSource resamplingSource (&memorySource,
false, buf.getNumChannels());
657 const auto finalSize = roundToInt (jmax (1.0, buf.getNumSamples() / factorReading));
658 resamplingSource.setResamplingRatio (factorReading);
659 resamplingSource.prepareToPlay (finalSize, srcSampleRate);
661 AudioBuffer<float> result (buf.getNumChannels(), finalSize);
662 resamplingSource.getNextAudioBlock ({ &result, 0, result.getNumSamples() });
668template <
typename Element>
672 void set (std::unique_ptr<Element> p)
678 std::unique_ptr<MultichannelEngine> get()
681 return lock.isLocked() ? std::move (ptr) : nullptr;
685 std::unique_ptr<Element> ptr;
689struct BufferWithSampleRate
691 BufferWithSampleRate() =
default;
693 BufferWithSampleRate (AudioBuffer<float>&& bufferIn,
double sampleRateIn)
694 : buffer (std::move (bufferIn)), sampleRate (sampleRateIn) {}
696 AudioBuffer<float> buffer;
697 double sampleRate = 0.0;
700static BufferWithSampleRate loadStreamToBuffer (std::unique_ptr<InputStream> stream,
size_t maxLength)
702 AudioFormatManager manager;
703 manager.registerBasicFormats();
704 std::unique_ptr<AudioFormatReader> formatReader (manager.createReaderFor (std::move (stream)));
706 if (formatReader ==
nullptr)
709 const auto fileLength =
static_cast<size_t> (formatReader->lengthInSamples);
710 const auto lengthToLoad = maxLength == 0 ? fileLength : jmin (maxLength, fileLength);
712 BufferWithSampleRate result { { jlimit (1, 2,
static_cast<int> (formatReader->numChannels)),
713 static_cast<int> (lengthToLoad) },
714 formatReader->sampleRate };
716 formatReader->read (result.buffer.getArrayOfWritePointers(),
717 result.buffer.getNumChannels(),
719 result.buffer.getNumSamples());
728class ConvolutionEngineFactory
731 ConvolutionEngineFactory (Convolution::Latency requiredLatency,
732 Convolution::NonUniform requiredHeadSize)
733 : latency { (requiredLatency.latencyInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredLatency.latencyInSamples)) },
734 headSize { (requiredHeadSize.headSizeInSamples <= 0) ? 0 : jmax (64, nextPowerOfTwo (requiredHeadSize.headSizeInSamples)) },
735 shouldBeZeroLatency (requiredLatency.latencyInSamples == 0)
740 void setProcessSpec (
const ProcessSpec& spec)
742 const std::lock_guard<std::mutex> lock (mutex);
745 engine.set (makeEngine());
750 void setImpulseResponse (BufferWithSampleRate&& buf,
751 Convolution::Stereo stereo,
752 Convolution::Trim trim,
753 Convolution::Normalise normalise)
755 const std::lock_guard<std::mutex> lock (mutex);
756 wantsNormalise = normalise;
757 originalSampleRate = buf.sampleRate;
759 impulseResponse = [&]
761 auto corrected = fixNumChannels (buf.buffer, stereo);
762 return trim == Convolution::Trim::yes ? trimImpulseResponse (corrected) : corrected;
765 engine.set (makeEngine());
773 std::unique_ptr<MultichannelEngine> getEngine() {
return engine.get(); }
776 std::unique_ptr<MultichannelEngine> makeEngine()
778 auto resampled = resampleImpulseResponse (impulseResponse, originalSampleRate, processSpec.
sampleRate);
780 if (wantsNormalise == Convolution::Normalise::yes)
781 normaliseImpulseResponse (resampled);
783 resampled.applyGain ((
float) (originalSampleRate / processSpec.
sampleRate));
785 const auto currentLatency = jmax (processSpec.
maximumBlockSize, (uint32) latency.latencyInSamples);
786 const auto maxBufferSize = shouldBeZeroLatency ?
static_cast<int> (processSpec.
maximumBlockSize)
787 : nextPowerOfTwo (static_cast<int> (currentLatency));
789 return std::make_unique<MultichannelEngine> (resampled,
793 shouldBeZeroLatency);
796 static AudioBuffer<float> makeImpulseBuffer()
798 AudioBuffer<float> result (1, 1);
799 result.setSample (0, 0, 1.0f);
803 ProcessSpec processSpec { 44100.0, 128, 2 };
804 AudioBuffer<float> impulseResponse = makeImpulseBuffer();
805 double originalSampleRate = processSpec.sampleRate;
806 Convolution::Normalise wantsNormalise = Convolution::Normalise::no;
807 const Convolution::Latency latency;
808 const Convolution::NonUniform headSize;
809 const bool shouldBeZeroLatency;
811 TryLockedPtr<MultichannelEngine> engine;
813 mutable std::mutex mutex;
816static void setImpulseResponse (ConvolutionEngineFactory& factory,
817 const void* sourceData,
818 size_t sourceDataSize,
819 Convolution::Stereo stereo,
820 Convolution::Trim trim,
822 Convolution::Normalise normalise)
824 factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<MemoryInputStream> (sourceData, sourceDataSize,
false), size),
825 stereo, trim, normalise);
828static void setImpulseResponse (ConvolutionEngineFactory& factory,
829 const File& fileImpulseResponse,
830 Convolution::Stereo stereo,
831 Convolution::Trim trim,
833 Convolution::Normalise normalise)
835 factory.setImpulseResponse (loadStreamToBuffer (std::make_unique<FileInputStream> (fileImpulseResponse), size),
836 stereo, trim, normalise);
846class ConvolutionEngineQueue final :
public std::enable_shared_from_this<ConvolutionEngineQueue>
849 ConvolutionEngineQueue (BackgroundMessageQueue& queue,
850 Convolution::Latency latencyIn,
851 Convolution::NonUniform headSizeIn)
852 : messageQueue (queue), factory (latencyIn, headSizeIn) {}
854 void loadImpulseResponse (AudioBuffer<float>&& buffer,
856 Convolution::Stereo stereo,
857 Convolution::Trim trim,
858 Convolution::Normalise normalise)
860 callLater ([b = std::move (buffer), sr, stereo, trim, normalise] (ConvolutionEngineFactory& f)
mutable
862 f.setImpulseResponse ({ std::move (b), sr }, stereo, trim, normalise);
866 void loadImpulseResponse (
const void* sourceData,
867 size_t sourceDataSize,
868 Convolution::Stereo stereo,
869 Convolution::Trim trim,
871 Convolution::Normalise normalise)
873 callLater ([sourceData, sourceDataSize, stereo, trim, size, normalise] (ConvolutionEngineFactory& f)
mutable
875 setImpulseResponse (f, sourceData, sourceDataSize, stereo, trim, size, normalise);
879 void loadImpulseResponse (
const File& fileImpulseResponse,
880 Convolution::Stereo stereo,
881 Convolution::Trim trim,
883 Convolution::Normalise normalise)
885 callLater ([fileImpulseResponse, stereo, trim, size, normalise] (ConvolutionEngineFactory& f)
mutable
887 setImpulseResponse (f, fileImpulseResponse, stereo, trim, size, normalise);
891 void prepare (
const ProcessSpec& spec)
893 factory.setProcessSpec (spec);
899 void postPendingCommand()
901 if (pendingCommand ==
nullptr)
904 if (messageQueue.push (pendingCommand))
905 pendingCommand =
nullptr;
908 std::unique_ptr<MultichannelEngine> getEngine() {
return factory.getEngine(); }
911 template <
typename Fn>
912 void callLater (Fn&& fn)
916 pendingCommand = [weak = weakFromThis(), callback = std::forward<Fn> (fn)]()
mutable
918 if (
auto t = weak.lock())
919 callback (t->factory);
922 postPendingCommand();
925 std::weak_ptr<ConvolutionEngineQueue> weakFromThis() {
return shared_from_this(); }
927 BackgroundMessageQueue& messageQueue;
928 ConvolutionEngineFactory factory;
929 BackgroundMessageQueue::IncomingCommand pendingCommand;
937 smoother.setCurrentAndTargetValue (1.0f);
940 void prepare (
const ProcessSpec& spec)
942 smoother.reset (spec.sampleRate, 0.05);
943 smootherBuffer.
setSize (1,
static_cast<int> (spec.maximumBlockSize));
944 mixBuffer.
setSize (
static_cast<int> (spec.numChannels),
static_cast<int> (spec.maximumBlockSize));
948 template <
typename ProcessCurrent,
typename ProcessPrevious,
typename NotifyDone>
949 void processSamples (
const AudioBlock<const float>& input,
950 AudioBlock<float>& output,
951 ProcessCurrent&& current,
952 ProcessPrevious&& previous,
953 NotifyDone&& notifyDone)
955 if (smoother.isSmoothing())
957 const auto numSamples =
static_cast<int> (input.getNumSamples());
959 for (
auto sample = 0; sample != numSamples; ++sample)
960 smootherBuffer.
setSample (0, sample, smoother.getNextValue());
962 AudioBlock<float> mixBlock (mixBuffer);
964 previous (input, mixBlock);
966 for (
size_t channel = 0; channel != output.getNumChannels(); ++channel)
968 FloatVectorOperations::multiply (mixBlock.getChannelPointer (channel),
973 FloatVectorOperations::multiply (smootherBuffer.
getWritePointer (0), -1.0f, numSamples);
974 FloatVectorOperations::add (smootherBuffer.
getWritePointer (0), 1.0f, numSamples);
976 current (input, output);
978 for (
size_t channel = 0; channel != output.getNumChannels(); ++channel)
980 FloatVectorOperations::multiply (output.getChannelPointer (channel),
983 FloatVectorOperations::add (output.getChannelPointer (channel),
984 mixBlock.getChannelPointer (channel),
988 if (! smoother.isSmoothing())
993 current (input, output);
997 void beginTransition()
999 smoother.setCurrentAndTargetValue (1.0f);
1000 smoother.setTargetValue (0.0f);
1004 LinearSmoothedValue<float> smoother;
1005 AudioBuffer<float> smootherBuffer;
1006 AudioBuffer<float> mixBuffer;
1009using OptionalQueue = OptionalScopedPointer<ConvolutionMessageQueue>;
1011class Convolution::Impl
1014 Impl (Latency requiredLatency,
1015 NonUniform requiredHeadSize,
1016 OptionalQueue&& queue)
1017 : messageQueue (std::move (queue)),
1018 engineQueue (std::make_shared<ConvolutionEngineQueue> (*messageQueue->pimpl,
1027 if (currentEngine !=
nullptr)
1028 currentEngine->reset();
1030 destroyPreviousEngine();
1033 void prepare (
const ProcessSpec& spec)
1035 messageQueue->pimpl->popAll();
1036 mixer.prepare (spec);
1037 engineQueue->prepare (spec);
1039 if (
auto newEngine = engineQueue->getEngine())
1040 currentEngine = std::move (newEngine);
1042 previousEngine =
nullptr;
1043 jassert (currentEngine !=
nullptr);
1046 void processSamples (
const AudioBlock<const float>& input, AudioBlock<float>& output)
1048 engineQueue->postPendingCommand();
1050 if (previousEngine ==
nullptr)
1051 installPendingEngine();
1053 mixer.processSamples (input,
1055 [
this] (
const AudioBlock<const float>& in, AudioBlock<float>& out)
1057 currentEngine->processSamples (in, out);
1059 [
this] (
const AudioBlock<const float>& in, AudioBlock<float>& out)
1061 if (previousEngine !=
nullptr)
1062 previousEngine->processSamples (in, out);
1066 [
this] { destroyPreviousEngine(); });
1069 int getCurrentIRSize()
const {
return currentEngine !=
nullptr ? currentEngine->getIRSize() : 0; }
1071 int getLatency()
const {
return currentEngine !=
nullptr ? currentEngine->getLatency() : 0; }
1073 void loadImpulseResponse (AudioBuffer<float>&& buffer,
1074 double originalSampleRate,
1077 Normalise normalise)
1079 engineQueue->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
1082 void loadImpulseResponse (
const void* sourceData,
1083 size_t sourceDataSize,
1087 Normalise normalise)
1089 engineQueue->loadImpulseResponse (sourceData, sourceDataSize, stereo, trim, size, normalise);
1092 void loadImpulseResponse (
const File& fileImpulseResponse,
1096 Normalise normalise)
1098 engineQueue->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
1102 void destroyPreviousEngine()
1105 BackgroundMessageQueue::IncomingCommand command = [p = std::move (previousEngine)]()
mutable { p =
nullptr; };
1106 messageQueue->pimpl->push (command);
1109 void installNewEngine (std::unique_ptr<MultichannelEngine> newEngine)
1111 destroyPreviousEngine();
1112 previousEngine = std::move (currentEngine);
1113 currentEngine = std::move (newEngine);
1114 mixer.beginTransition();
1117 void installPendingEngine()
1119 if (
auto newEngine = engineQueue->getEngine())
1120 installNewEngine (std::move (newEngine));
1123 OptionalQueue messageQueue;
1124 std::shared_ptr<ConvolutionEngineQueue> engineQueue;
1125 std::unique_ptr<MultichannelEngine> previousEngine, currentEngine;
1126 CrossoverMixer mixer;
1130void Convolution::Mixer::prepare (
const ProcessSpec& spec)
1132 for (
auto& dry : volumeDry)
1133 dry.
reset (spec.sampleRate, 0.05);
1135 for (
auto& wet : volumeWet)
1136 wet.reset (spec.sampleRate, 0.05);
1138 sampleRate = spec.sampleRate;
1140 dryBlock = AudioBlock<float> (dryBlockStorage,
1141 jmin (spec.numChannels, 2u),
1142 spec.maximumBlockSize);
1146template <
typename ProcessWet>
1147void Convolution::Mixer::processSamples (
const AudioBlock<const float>& input,
1148 AudioBlock<float>& output,
1150 ProcessWet&& processWet)
noexcept
1152 const auto numChannels = jmin (input.getNumChannels(), volumeDry.size());
1153 const auto numSamples = jmin (input.getNumSamples(), output.getNumSamples());
1155 auto dry = dryBlock.getSubsetChannelBlock (0, numChannels);
1157 if (volumeDry[0].isSmoothing())
1159 dry.copyFrom (input);
1161 for (
size_t channel = 0; channel < numChannels; ++channel)
1162 volumeDry[channel].applyGain (dry.getChannelPointer (channel), (
int) numSamples);
1164 processWet (input, output);
1166 for (
size_t channel = 0; channel < numChannels; ++channel)
1167 volumeWet[channel].applyGain (output.getChannelPointer (channel), (
int) numSamples);
1173 if (! currentIsBypassed)
1174 processWet (input, output);
1176 if (isBypassed != currentIsBypassed)
1178 currentIsBypassed = isBypassed;
1180 for (
size_t channel = 0; channel < numChannels; ++channel)
1182 volumeDry[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
1183 volumeDry[channel].reset (sampleRate, 0.05);
1184 volumeDry[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
1186 volumeWet[channel].setTargetValue (isBypassed ? 1.0f : 0.0f);
1187 volumeWet[channel].reset (sampleRate, 0.05);
1188 volumeWet[channel].setTargetValue (isBypassed ? 0.0f : 1.0f);
1194void Convolution::Mixer::reset() { dryBlock.clear(); }
1208 OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
1214 OptionalQueue { std::make_unique<ConvolutionMessageQueue>() })
1226 const NonUniform& nonUniform,
1227 OptionalQueue&& queue)
1228 : pimpl (std::make_unique<Impl> (latency, nonUniform, std::move (queue)))
1231Convolution::~Convolution() noexcept = default;
1234 size_t sourceDataSize,
1238 Normalise normalise)
1247 Normalise normalise)
1249 pimpl->loadImpulseResponse (fileImpulseResponse, stereo, trim, size, normalise);
1253 double originalSampleRate,
1256 Normalise normalise)
1258 pimpl->loadImpulseResponse (std::move (buffer), originalSampleRate, stereo, trim, normalise);
1263 mixer.prepare (spec);
1264 pimpl->prepare (spec);
1276 bool isBypassed)
noexcept
1281 jassert (input.getNumChannels() == output.getNumChannels());
1282 jassert (isPositiveAndBelow (input.getNumChannels(),
static_cast<size_t> (3)));
1284 mixer.processSamples (input, output, isBypassed, [
this] (
const auto& in,
auto& out)
1286 pimpl->processSamples (in, out);
void forEach(FunctionToApply &&func) const
ScopedRead read(int numToRead) noexcept
int getFreeSpace() const noexcept
ScopedWrite write(int numToWrite) noexcept
int getNumReady() const noexcept
void setSize(int newNumChannels, int newNumSamples, bool keepExistingContent=false, bool clearExtraSpace=false, bool avoidReallocating=false)
Type * getWritePointer(int channelNumber) noexcept
void setSample(int destChannel, int destSample, Type newValue) noexcept
const Type * getReadPointer(int channelNumber) const noexcept
static Type decibelsToGain(Type decibels, Type minusInfinityDb=Type(defaultMinusInfinitydB))
GenericScopedTryLock< SpinLock > ScopedTryLockType
GenericScopedLock< SpinLock > ScopedLockType
static void JUCE_CALLTYPE sleep(int milliseconds)
Thread(const String &threadName, size_t threadStackSize=osDefaultStackSize)
bool threadShouldExit() const
bool stopThread(int timeOutMilliseconds)
ConvolutionMessageQueue()
int getCurrentIRSize() const
void prepare(const ProcessSpec &)
void loadImpulseResponse(const void *sourceData, size_t sourceDataSize, Stereo isStereo, Trim requiresTrimming, size_t size, Normalise requiresNormalisation=Normalise::yes)