Skip to content

Commit

Permalink
Handle AV sync timestamps when draining the audio sink during tunneling
Browse files Browse the repository at this point in the history
When audio processors are enabled during tunneling, they must produce
output immediately, ensuring that the timestamps of the output samples
correspond to the input and that no additional samples are produced.
This requirement is documented in the Javadoc of DefaultAudioSink.

However, this alone doesn't guarantee all buffers are immediately
written to the AudioTrack, because the AudioTrack writes are
non-blocking and may need multiple attempts.

When draining the audio sink at the end of the stream, we currently
fail in this situation because we assert that the timestamp must be
set (=the drain operation is a no-op). But this may not be true when
the previous non-blocking write wasn't fully handled. We can fix this
by saving the last timestamp and reusing it during draining.

Issue: #10847
PiperOrigin-RevId: 500943891
  • Loading branch information
tonihei authored and christosts committed Jan 10, 2023
1 parent 2693b60 commit 3c6b8c5
Showing 1 changed file with 21 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,7 @@ public DefaultAudioSink build() {
private AuxEffectInfo auxEffectInfo;
@Nullable private AudioDeviceInfoApi23 preferredDevice;
private boolean tunneling;
private long lastTunnelingAvSyncPresentationTimeUs;
private long lastFeedElapsedRealtimeMs;
private boolean offloadDisabledUntilNextConfiguration;
private boolean isWaitingForOffloadEndOfStreamHandled;
Expand Down Expand Up @@ -1001,6 +1002,9 @@ private void registerStreamEventCallbackV29(AudioTrack audioTrack) {
* <p>If the {@link AudioProcessingPipeline} is not {@linkplain
* AudioProcessingPipeline#isOperational() operational}, input buffers are passed straight to
* {@link #writeBuffer(ByteBuffer, long)}.
*
* @param avSyncPresentationTimeUs The tunneling AV sync presentation time for the current buffer,
* or {@link C#TIME_END_OF_SOURCE} when draining remaining buffers at the end of the stream.
*/
private void processBuffers(long avSyncPresentationTimeUs) throws WriteException {
if (!audioProcessingPipeline.isOperational()) {
Expand Down Expand Up @@ -1034,17 +1038,24 @@ private boolean drainToEndOfStream() throws WriteException {
if (outputBuffer == null) {
return true;
}
writeBuffer(outputBuffer, C.TIME_UNSET);
writeBuffer(outputBuffer, C.TIME_END_OF_SOURCE);
return outputBuffer == null;
}

audioProcessingPipeline.queueEndOfStream();
processBuffers(C.TIME_UNSET);
processBuffers(C.TIME_END_OF_SOURCE);
return audioProcessingPipeline.isEnded()
&& (outputBuffer == null || !outputBuffer.hasRemaining());
}

@SuppressWarnings("ReferenceEquality")
/**
* Writes the provided buffer to the audio track.
*
* @param buffer The buffer to write.
* @param avSyncPresentationTimeUs The tunneling AV sync presentation time for the buffer, or
* {@link C#TIME_END_OF_SOURCE} when draining remaining buffers at the end of the stream.
*/
private void writeBuffer(ByteBuffer buffer, long avSyncPresentationTimeUs) throws WriteException {
if (!buffer.hasRemaining()) {
return;
Expand Down Expand Up @@ -1080,6 +1091,14 @@ private void writeBuffer(ByteBuffer buffer, long avSyncPresentationTimeUs) throw
}
} else if (tunneling) {
Assertions.checkState(avSyncPresentationTimeUs != C.TIME_UNSET);
if (avSyncPresentationTimeUs == C.TIME_END_OF_SOURCE) {
// Audio processors during tunneling are required to produce buffers immediately when
// queuing, so we can assume the timestamp during draining at the end of the stream is the
// same as the timestamp of the last sample we processed.
avSyncPresentationTimeUs = lastTunnelingAvSyncPresentationTimeUs;
} else {
lastTunnelingAvSyncPresentationTimeUs = avSyncPresentationTimeUs;
}
bytesWrittenOrError =
writeNonBlockingWithAvSyncV21(
audioTrack, buffer, bytesRemaining, avSyncPresentationTimeUs);
Expand Down

0 comments on commit 3c6b8c5

Please sign in to comment.