--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -29,21 +29,19 @@ AppleVTDecoder::AppleVTDecoder(const Vid
MediaDataDecoderCallback* aCallback,
layers::ImageContainer* aImageContainer)
: mExtraData(aConfig.mExtraData)
, mCallback(aCallback)
, mPictureWidth(aConfig.mImage.width)
, mPictureHeight(aConfig.mImage.height)
, mDisplayWidth(aConfig.mDisplay.width)
, mDisplayHeight(aConfig.mDisplay.height)
- , mQueuedSamples(0)
, mTaskQueue(aTaskQueue)
, mMaxRefFrames(mp4_demuxer::H264::ComputeMaxRefFrames(aConfig.mExtraData))
, mImageContainer(aImageContainer)
- , mInputIncoming(0)
, mIsShutDown(false)
#ifdef MOZ_WIDGET_UIKIT
, mUseSoftwareImages(true)
#else
, mUseSoftwareImages(false)
#endif
, mIsFlushing(false)
, mMonitor("AppleVideoDecoder")
@@ -83,34 +81,30 @@ AppleVTDecoder::Input(MediaRawData* aSam
LOG("mp4 input sample %p pts %lld duration %lld us%s %d bytes",
aSample,
aSample->mTime,
aSample->mDuration,
aSample->mKeyframe ? " keyframe" : "",
aSample->Size());
- mInputIncoming++;
-
mTaskQueue->Dispatch(NewRunnableMethod<RefPtr<MediaRawData>>(
this, &AppleVTDecoder::ProcessDecode, aSample));
return NS_OK;
}
nsresult
AppleVTDecoder::Flush()
{
MOZ_ASSERT(mCallback->OnReaderTaskQueue());
mIsFlushing = true;
nsCOMPtr<nsIRunnable> runnable =
NewRunnableMethod(this, &AppleVTDecoder::ProcessFlush);
SyncRunnable::DispatchToThread(mTaskQueue, runnable);
mIsFlushing = false;
- // All ProcessDecode() tasks should be done.
- MOZ_ASSERT(mInputIncoming == 0);
mSeekTargetThreshold.reset();
return NS_OK;
}
nsresult
AppleVTDecoder::Drain()
@@ -137,28 +131,21 @@ AppleVTDecoder::Shutdown()
return NS_OK;
}
nsresult
AppleVTDecoder::ProcessDecode(MediaRawData* aSample)
{
AssertOnTaskQueueThread();
- mInputIncoming--;
-
if (mIsFlushing) {
return NS_OK;
}
auto rv = DoDecode(aSample);
- // Ask for more data.
- if (NS_SUCCEEDED(rv) && !mInputIncoming && mQueuedSamples <= mMaxRefFrames) {
- LOG("%s task queue empty; requesting more data", GetDescriptionName());
- mCallback->InputExhausted();
- }
return rv;
}
void
AppleVTDecoder::ProcessShutdown()
{
if (mSession) {
@@ -208,27 +195,25 @@ AppleVTDecoder::CreateAppleFrameRef(cons
void
AppleVTDecoder::DrainReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mCallback->Output(mReorderQueue.Pop().get());
}
- mQueuedSamples = 0;
}
void
AppleVTDecoder::ClearReorderedFrames()
{
MonitorAutoLock mon(mMonitor);
while (!mReorderQueue.IsEmpty()) {
mReorderQueue.Pop();
}
- mQueuedSamples = 0;
}
void
AppleVTDecoder::SetSeekThreshold(const media::TimeUnit& aTime)
{
LOG("SetSeekThreshold %lld", aTime.ToMicroseconds());
mSeekTargetThreshold = Some(aTime);
}
@@ -283,26 +268,20 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
LOG("mp4 output frame %lld dts %lld pts %lld duration %lld us%s",
aFrameRef.byte_offset,
aFrameRef.decode_timestamp.ToMicroseconds(),
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds(),
aFrameRef.is_sync_point ? " keyframe" : ""
);
- if (mQueuedSamples > mMaxRefFrames) {
- // We had stopped requesting more input because we had received too much at
- // the time. We can ask for more once again.
+ if (!aImage) {
+ // Image was dropped by decoder or none return yet.
+ // We need more input to continue.
mCallback->InputExhausted();
- }
- MOZ_ASSERT(mQueuedSamples);
- mQueuedSamples--;
-
- if (!aImage) {
- // Image was dropped by decoder.
return NS_OK;
}
bool useNullSample = false;
if (mSeekTargetThreshold.isSome()) {
if ((aFrameRef.composition_timestamp + aFrameRef.duration) < mSeekTargetThreshold.ref()) {
useNullSample = true;
} else {
@@ -405,19 +384,20 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
mCallback->Error(MediaDataDecoderError::FATAL_ERROR);
return NS_ERROR_FAILURE;
}
// Frames come out in DTS order but we need to output them
// in composition order.
MonitorAutoLock mon(mMonitor);
mReorderQueue.Push(data);
- while (mReorderQueue.Length() > mMaxRefFrames) {
+ if (mReorderQueue.Length() > mMaxRefFrames) {
mCallback->Output(mReorderQueue.Pop().get());
}
+ mCallback->InputExhausted();
LOG("%llu decoded frames queued",
static_cast<unsigned long long>(mReorderQueue.Length()));
return NS_OK;
}
nsresult
AppleVTDecoder::WaitForAsynchronousFrames()
@@ -475,18 +455,16 @@ AppleVTDecoder::DoDecode(MediaRawData* a
}
CMSampleTimingInfo timestamp = TimingInfoFromSample(aSample);
rv = CMSampleBufferCreate(kCFAllocatorDefault, block, true, 0, 0, mFormat, 1, 1, ×tamp, 0, NULL, sample.receive());
if (rv != noErr) {
NS_ERROR("Couldn't create CMSampleBuffer");
return NS_ERROR_FAILURE;
}
- mQueuedSamples++;
-
VTDecodeFrameFlags decodeFlags =
kVTDecodeFrame_EnableAsynchronousDecompression;
rv = VTDecompressionSessionDecodeFrame(mSession,
sample,
decodeFlags,
CreateAppleFrameRef(aSample),
&infoFlags);
if (rv != noErr && !(infoFlags & kVTDecodeInfo_FrameDropped)) {