Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
39e172d
refactor: simpler implementations
mdydek Mar 30, 2026
3c00523
chore: merge
closetcaiman Apr 9, 2026
024101d
feat: add shared buffer processor
closetcaiman Apr 20, 2026
d5c7e38
feat: add SingleBufferCursor
closetcaiman Apr 20, 2026
7c37eca
chore(merge): integrate main
closetcaiman Apr 20, 2026
62cfbdf
feat(tests): add AudioBufferSourceNode tests
closetcaiman Apr 21, 2026
03bdcd9
chore(ios): update pods
closetcaiman Apr 21, 2026
f55cdf6
feat: add base class implementation
closetcaiman Apr 21, 2026
aa4201e
feat(tests): add long playback
closetcaiman Apr 21, 2026
e60a5a0
fix(test): bump long test len
closetcaiman Apr 21, 2026
fe48468
feat: add queue cursor
closetcaiman Apr 28, 2026
7cf2228
feat: add queue processor
closetcaiman Apr 28, 2026
26a599e
refactor: processing
closetcaiman May 4, 2026
f7bd578
fix: uncomment tests
closetcaiman May 4, 2026
af103ca
fix: infinite loop on float truncation
closetcaiman May 4, 2026
ea6819d
chore: remove test screen from fabric
closetcaiman May 4, 2026
dbdc395
chore: merge
closetcaiman May 4, 2026
d8c066f
feat: add negative playbackrate loop test
closetcaiman May 4, 2026
5ca86b7
chore: remove unused navigation param
closetcaiman May 4, 2026
38bffe6
fix: hpp -> h
closetcaiman May 5, 2026
46fc831
fix: clangd warning pragma
closetcaiman May 5, 2026
ac2a1bb
refactor: processor creation, fatfunction
closetcaiman May 5, 2026
c9521f2
fix: remove unecessary header
closetcaiman May 5, 2026
d9b8bba
refactor: boundary checks
closetcaiman May 5, 2026
4fefe26
refactor: unecessary check
closetcaiman May 5, 2026
6a4adb2
fix: add negative playback rate support
closetcaiman May 5, 2026
750df6c
fix: proper clamping withoutPitchCorrection
closetcaiman May 5, 2026
37bb26d
fix!: interpolation
closetcaiman May 7, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 19 additions & 13 deletions apps/common-app/src/examples/AudioTag/AudioTag.tsx
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
import React, { useRef } from 'react';
import { Button, View } from 'react-native';
import { Audio, AudioTagHandle } from 'react-native-audio-api/development/react';
import {
Audio,
AudioTagHandle,
} from 'react-native-audio-api/development/react';

import { Container } from '../../components';

Expand Down Expand Up @@ -35,18 +38,21 @@ const AudioTag: React.FC = () => {
<Container disablePadding>
<View style={{ flex: 1, justifyContent: 'center', alignItems: 'center' }}>
<View style={{ width: '90%' }}>
<Audio source={DEMO_AUDIO_URL} ref={audioRef} controls
onLoadStart={() => console.log('onLoadStart')}
onLoad={() => console.log('onLoad')}
onError={(error) => console.log('onError', error)}
onPositionChange={(seconds) =>
console.log('onPositionChange', seconds)
}
onEnded={() => console.log('onEnded')}
onPlay={() => console.log('onPlay')}
onPause={() => console.log('onPause')}
onVolumeChange={(volume) => console.log('onVolumeChange', volume)}
/>
<Audio
source={DEMO_AUDIO_URL}
ref={audioRef}
controls
onLoadStart={() => console.log('onLoadStart')}
onLoad={() => console.log('onLoad')}
onError={(error) => console.log('onError', error)}
onPositionChange={(seconds) =>
console.log('onPositionChange', seconds)
}
onEnded={() => console.log('onEnded')}
onPlay={() => console.log('onPlay')}
onPause={() => console.log('onPause')}
onVolumeChange={(volume) => console.log('onVolumeChange', volume)}
/>
</View>
</View>
</Container>
Expand Down
11 changes: 9 additions & 2 deletions apps/common-app/src/examples/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,14 @@ import Metronome from './Metronome';
import OfflineRendering from './OfflineRendering';
import Oscillator from './Oscillator';
import Piano from './Piano';
import PlaybackSpeed from './PlaybackSpeed/PlaybackSpeed';
import PlaybackSpeed from './PlaybackSpeed';
import Record from './Record/Record';
import Streaming from './Streaming/Streaming';
import Worklets from './Worklets/Worklets';
import AudioStream from './AudioTag/AudioTag';
import ConvolverIR from './ConvolverIR';
import AudioParamPipeline from './AudioParamPipeline';
import { TestScreen } from '../../../../packages/test-app-screen';

type NavigationParamList = {
Oscillator: undefined;
Expand All @@ -33,8 +34,8 @@ type NavigationParamList = {
Streamer: undefined;
AudioTag: undefined;
ConvolverIR: undefined;
TestScreen: undefined;
AudioParamPipeline: undefined;
TestScreen: undefined;
};

export type ExampleKey = keyof NavigationParamList;
Expand Down Expand Up @@ -143,4 +144,10 @@ export const Examples: Example[] = [
Icon: icons.SquareStack,
screen: AudioParamPipeline,
},
{
key: 'TestScreen',
title: 'Test Screen',
Icon: icons.TestTube,
screen: TestScreen,
},
] as const;
8 changes: 4 additions & 4 deletions apps/fabric-example/ios/Podfile.lock
Original file line number Diff line number Diff line change
Expand Up @@ -2476,7 +2476,7 @@ EXTERNAL SOURCES:

SPEC CHECKSUMS:
FBLazyVector: c00c20551d40126351a6783c47ce75f5b374851b
hermes-engine: c399a2e224a0b13c589d76b4fc05e14bdd76fa88
hermes-engine: 146211e12d60a1951d9eb0287be07211e86cf5d5
RCTDeprecation: 3bb167081b134461cfeb875ff7ae1945f8635257
RCTRequired: 74839f55d5058a133a0bc4569b0afec750957f64
RCTSwiftUI: 87a316382f3eab4dd13d2a0d0fd2adcce917361a
Expand All @@ -2485,7 +2485,7 @@ SPEC CHECKSUMS:
React: 1b1536b9099195944034e65b1830f463caaa8390
React-callinvoker: 6dff6d17d1d6cc8fdf85468a649bafed473c65f5
React-Core: 00faa4d038298089a1d5a5b21dde8660c4f0820d
React-Core-prebuilt: ab26be1216323aea7c76f96ca450bffa7bcd4a72
React-Core-prebuilt: ef40616103ee11f8c2517697c3aa4f48ce790549
React-CoreModules: a17807f849bfd86045b0b9a75ec8c19373b482f6
React-cxxreact: c7b53ace5827be54048288bce5c55f337c41e95f
React-debug: e1f00fcd2cef58a2897471a6d76a4ef5f5f90c74
Expand Down Expand Up @@ -2549,8 +2549,8 @@ SPEC CHECKSUMS:
ReactAppDependencyProvider: 5787b37b8e2e51dfeab697ec031cc7c4080dcea2
ReactCodegen: d07ee3c8db75b43d1cbe479ae6affebf9925c733
ReactCommon: fe2a3af8975e63efa60f95fca8c34dc85deee360
ReactNativeDependencies: 212738cc51e6c4cc34ee487890497d6f41979ec0
RNAudioAPI: a36dcdaa5905d2b0e1f7170f7b5cb3cec944c029
ReactNativeDependencies: 54189f1570b1308686cb21564e755e1daa77ea03
RNAudioAPI: 0da654a83adfff638b0ccf05f7b07869a8e78cbe
RNGestureHandler: 187c5c7936abf427bc4d22d6c3b1ac80ad1f63c0
RNReanimated: 64f4b3b33b48b19e0ba76a352571b52b1e931981
RNScreens: 01b065ded2dfe7987bcce770ff3a196be417ff41
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,8 @@ void AudioBufferBaseSourceNode::processWithPitchCorrection(

playbackRateBuffer_->zero();

auto framesNeededToStretch = static_cast<int>(playbackRate * static_cast<float>(framesToProcess));
auto framesNeededToStretch =
std::abs(static_cast<int>(playbackRate * static_cast<float>(framesToProcess)));

updatePlaybackInfo(
playbackRateBuffer_,
Expand All @@ -132,7 +133,7 @@ void AudioBufferBaseSourceNode::processWithPitchCorrection(
return;
}

processWithoutInterpolation(playbackRateBuffer_, startOffset, offsetLength, playbackRate);
runBufferProcessor(playbackRateBuffer_, startOffset, offsetLength, playbackRate, false);

stretch_->process(
playbackRateBuffer_.get()[0],
Expand Down Expand Up @@ -176,16 +177,19 @@ void AudioBufferBaseSourceNode::processWithoutPitchCorrection(
}

if (std::fabs(computedPlaybackRate) == 1.0) {
processWithoutInterpolation(processingBuffer, startOffset, offsetLength, computedPlaybackRate);
runBufferProcessor(processingBuffer, startOffset, offsetLength, computedPlaybackRate, false);
} else {
processWithInterpolation(processingBuffer, startOffset, offsetLength, computedPlaybackRate);
runBufferProcessor(processingBuffer, startOffset, offsetLength, computedPlaybackRate, true);
}

sendOnPositionChangedEvent();
}

float AudioBufferBaseSourceNode::getComputedPlaybackRateValue(int framesToProcess, double time) {
auto playbackRate = playbackRateParam_->processKRateParam(framesToProcess, time);
auto playbackRate = std::clamp(
playbackRateParam_->processKRateParam(framesToProcess, time),
MIN_PLAYBACK_RATE,
MAX_PLAYBACK_RATE);
auto detune = std::pow(
2.0f, //NOLINT(cppcoreguidelines-avoid-magic-numbers, readability-magic-numbers)
detuneParam_->processKRateParam(framesToProcess, time) / static_cast<float>(OCTAVE_RANGE));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,17 +45,12 @@ class AudioBufferBaseSourceNode : public AudioScheduledSourceNode {

virtual bool isEmpty() const = 0;

virtual void processWithoutInterpolation(
virtual void runBufferProcessor(
const std::shared_ptr<DSPAudioBuffer> &processingBuffer,
size_t startOffset,
size_t offsetLength,
float playbackRate) = 0;

virtual void processWithInterpolation(
const std::shared_ptr<DSPAudioBuffer> &processingBuffer,
size_t startOffset,
size_t offsetLength,
float playbackRate) = 0;
float playbackRate,
bool interpolate) = 0;

private:
// pitch correction parameters
Expand All @@ -64,7 +59,7 @@ class AudioBufferBaseSourceNode : public AudioScheduledSourceNode {
std::shared_ptr<signalsmith::stretch::SignalsmithStretch<float>> stretch_;
std::shared_ptr<DSPAudioBuffer> playbackRateBuffer_;
static constexpr float MAX_PLAYBACK_RATE = 3.0f;
static constexpr float MIN_PLAYBACK_RATE = 0.0f;
static constexpr float MIN_PLAYBACK_RATE = -3.0f;

// k-rate params
const std::shared_ptr<AudioParam> detuneParam_;
Expand All @@ -79,6 +74,7 @@ class AudioBufferBaseSourceNode : public AudioScheduledSourceNode {
void processWithPitchCorrection(
const std::shared_ptr<DSPAudioBuffer> &processingBuffer,
int framesToProcess);

void processWithoutPitchCorrection(
const std::shared_ptr<DSPAudioBuffer> &processingBuffer,
int framesToProcess);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
#include <audioapi/core/utils/AudioGraphManager.h>
#include <audioapi/core/utils/Constants.h>
#include <audioapi/core/utils/Locker.h>
#include <audioapi/core/utils/buffer/QueueBufferProcessor.h>
#include <audioapi/dsp/AudioUtils.hpp>
#include <audioapi/events/AudioEventHandlerRegistry.h>
#include <audioapi/types/NodeOptions.h>
Expand All @@ -26,6 +27,22 @@ AudioBufferQueueSourceNode::AudioBufferQueueSourceNode(
}

isInitialized_.store(true, std::memory_order_release);

auto graphManager = context->getGraphManager();

auto onBufferConsumed = [this, graphManager](
size_t bufferId,
std::shared_ptr<AudioBuffer> buffer,
bool isLastInQueue,
bool fireBufferEndedEvent) {
playedBuffersDuration_ += buffer->getDuration();
if (fireBufferEndedEvent) {
sendOnBufferEndedEvent(bufferId, isLastInQueue);
}
graphManager->addAudioBufferForDestruction(std::move(buffer));
};

processor_ = std::make_unique<QueueBufferProcessor>(&buffers_, &vReadIndex_, onBufferConsumed);
}

void AudioBufferQueueSourceNode::stop(double when) {
Expand Down Expand Up @@ -152,147 +169,33 @@ bool AudioBufferQueueSourceNode::isEmpty() const {
return buffers_.empty();
}

// todo: refactor so its less complex and more readable
void AudioBufferQueueSourceNode::processWithoutInterpolation(
void AudioBufferQueueSourceNode::runBufferProcessor(
const std::shared_ptr<DSPAudioBuffer> &processingBuffer,
size_t startOffset,
size_t offsetLength,
float playbackRate) {
if (auto context = context_.lock()) {
auto readIndex = static_cast<size_t>(vReadIndex_);
size_t writeIndex = startOffset;

auto data = buffers_.front();
auto bufferId = data.first;
auto buffer = data.second;

size_t framesLeft = offsetLength;

while (framesLeft > 0) {
size_t framesToEnd = buffer->getSize() - readIndex;
size_t framesToCopy = std::min(framesToEnd, framesLeft);
framesToCopy = framesToCopy > 0 ? framesToCopy : 0;

assert(readIndex >= 0);
assert(writeIndex >= 0);
assert(readIndex + framesToCopy <= buffer->getSize());
assert(writeIndex + framesToCopy <= processingBuffer->getSize());

processingBuffer->copy(*buffer, readIndex, writeIndex, framesToCopy);

writeIndex += framesToCopy;
readIndex += framesToCopy;
framesLeft -= framesToCopy;

if (readIndex >= buffer->getSize()) {
playedBuffersDuration_ += buffer->getDuration();
buffers_.pop_front();

if (!(buffers_.empty() && addExtraTailFrames_)) {
sendOnBufferEndedEvent(bufferId, buffers_.empty());
}

if (buffers_.empty()) {
if (addExtraTailFrames_) {
buffers_.emplace_back(bufferId, tailBuffer_);
addExtraTailFrames_ = false;
} else {
context->getGraphManager()->addAudioBufferForDestruction(std::move(buffer));
processingBuffer->zero(writeIndex, framesLeft);
readIndex = 0;

break;
}
}

context->getGraphManager()->addAudioBufferForDestruction(std::move(buffer));
data = buffers_.front();
bufferId = data.first;
buffer = data.second;
readIndex = 0;
}
}
float playbackRate,
bool interpolate) {
if (!processingBuffer) {
return;
}

// update reading index for next render quantum
vReadIndex_ = static_cast<double>(readIndex);
if (buffers_.empty()) {
processingBuffer->zero(startOffset, offsetLength);
return;
}
}

// todo: refactor so its less complex and more readable
void AudioBufferQueueSourceNode::processWithInterpolation(
const std::shared_ptr<DSPAudioBuffer> &processingBuffer,
size_t startOffset,
size_t offsetLength,
float playbackRate) {
if (auto context = context_.lock()) {
size_t writeIndex = startOffset;
size_t framesLeft = offsetLength;

auto data = buffers_.front();
auto bufferId = data.first;
auto buffer = data.second;

while (framesLeft > 0) {
auto readIndex = static_cast<size_t>(vReadIndex_);
size_t nextReadIndex = readIndex + 1;
auto factor = static_cast<float>(vReadIndex_ - static_cast<double>(readIndex));

bool crossBufferInterpolation = false;
std::shared_ptr<AudioBuffer> nextBuffer = nullptr;

if (nextReadIndex >= buffer->getSize()) {
if (buffers_.size() > 1) {
auto tempQueue = buffers_;
tempQueue.pop_front();
nextBuffer = tempQueue.front().second;
nextReadIndex = 0;
crossBufferInterpolation = true;
} else {
nextReadIndex = readIndex;
}
}
if (addExtraTailFrames_ && tailBuffer_ != nullptr) {
processor_->setPendingTail(tailBuffer_);
}

for (size_t i = 0; i < processingBuffer->getNumberOfChannels(); i += 1) {
const auto destination = processingBuffer->getChannel(i)->span();
const auto currentSource = buffer->getChannel(i)->span();

if (crossBufferInterpolation) {
const auto nextSource = nextBuffer->getChannel(i)->span();
float currentSample = currentSource[readIndex];
float nextSample = nextSource[nextReadIndex];
destination[writeIndex] = currentSample + factor * (nextSample - currentSample);
} else {
destination[writeIndex] =
dsp::linearInterpolate(currentSource, readIndex, nextReadIndex, factor);
}
}
processor_->setPosition(vReadIndex_);
processor_->process(processingBuffer, startOffset, offsetLength, playbackRate, interpolate);

writeIndex += 1;
// queue source node always use positive playbackRate
vReadIndex_ += std::abs(playbackRate);
framesLeft -= 1;

if (vReadIndex_ >= static_cast<double>(buffer->getSize())) {
playedBuffersDuration_ += buffer->getDuration();
buffers_.pop_front();

sendOnBufferEndedEvent(bufferId, buffers_.empty());

if (buffers_.empty()) {
context->getGraphManager()->addAudioBufferForDestruction(std::move(buffer));
processingBuffer->zero(writeIndex, framesLeft);
vReadIndex_ = 0.0;
break;
}

vReadIndex_ = vReadIndex_ - static_cast<double>(buffer->getSize());
context->getGraphManager()->addAudioBufferForDestruction(std::move(buffer));
data = buffers_.front();
bufferId = data.first;
buffer = data.second;
}
}
if (processor_->didConsumeTail()) {
addExtraTailFrames_ = false;
}

vReadIndex_ = processor_->getPosition();
}

} // namespace audioapi
Loading
Loading