Bug 1294154: Handle case where WMF is unable to determine time and duration. r?mattwoodrow
The WMF decoder doesn't handle well the case where a single frame was given to decode.
When draining, the output is a correctly decoded frame but with a time of 0 and a duration set at 1/30th.
This is a workaround
MozReview-Commit-ID: JbjgNmPXKIM
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -32,22 +32,23 @@ public:
// or until no more is able to be produced.
// Returns S_OK on success, or MF_E_TRANSFORM_NEED_MORE_INPUT if there's not
// enough data to produce more output. If this returns a failure code other
// than MF_E_TRANSFORM_NEED_MORE_INPUT, an error will be reported to the
// MP4Reader.
virtual HRESULT Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutput) = 0;
- void Flush() {
+ virtual void Flush()
+ {
mDecoder->Flush();
mSeekTargetThreshold.reset();
}
- void Drain()
+ virtual void Drain()
{
if (FAILED(mDecoder->SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0))) {
NS_WARNING("Failed to send DRAIN command to MFT");
}
}
// Destroys all resources.
virtual void Shutdown() = 0;
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -491,16 +491,18 @@ WMFVideoMFTManager::Input(MediaRawData*
HRESULT hr = mDecoder->CreateInputSample(aSample->Data(),
uint32_t(aSample->Size()),
aSample->mTime,
&mLastInput);
NS_ENSURE_TRUE(SUCCEEDED(hr) && mLastInput != nullptr, hr);
mLastDuration = aSample->mDuration;
+ mLastTime = aSample->mTime;
+ mSamplesCount++;
// Forward sample data to the decoder.
return mDecoder->Input(mLastInput);
}
class SupportsConfigEvent : public Runnable {
public:
SupportsConfigEvent(DXVA2Manager* aDXVA2Manager, IMFMediaType* aMediaType, float aFramerate)
@@ -798,16 +800,25 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
HRESULT
WMFVideoMFTManager::Output(int64_t aStreamOffset,
RefPtr<MediaData>& aOutData)
{
RefPtr<IMFSample> sample;
HRESULT hr;
aOutData = nullptr;
int typeChangeCount = 0;
+ bool wasDraining = mDraining;
+ int64_t sampleCount = mSamplesCount;
+ if (wasDraining) {
+ mSamplesCount = 0;
+ mDraining = false;
+ }
+
+ media::TimeUnit pts;
+ media::TimeUnit duration;
// Loop until we decode a sample, or an unexpected error that we can't
// handle occurs.
while (true) {
hr = mDecoder->Output(&sample);
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
return MF_E_TRANSFORM_NEED_MORE_INPUT;
}
@@ -839,22 +850,31 @@ WMFVideoMFTManager::Output(int64_t aStre
++mNullOutputCount;
if (mNullOutputCount > 250) {
LOG("Excessive Video MFTDecoder returning success but no output; giving up");
mGotExcessiveNullOutput = true;
return E_FAIL;
}
continue;
}
+ pts = GetSampleTime(sample);
+ duration = GetSampleDuration(sample);
+ if (!pts.IsValid() || !duration.IsValid()) {
+ return E_FAIL;
+ }
+ if (wasDraining && sampleCount == 1 && pts == media::TimeUnit()) {
+ // WMF is unable to calculate a duration if only a single sample
+ // was parsed. Additionally, the pts always comes out at 0 under those
+ // circumstances.
+ // Seeing that we've only fed the decoder a single frame, the pts
+ // and duration are known, it's of the last sample.
+ pts = media::TimeUnit::FromMicroseconds(mLastTime);
+ duration = media::TimeUnit::FromMicroseconds(mLastDuration);
+ }
if (mSeekTargetThreshold.isSome()) {
- media::TimeUnit pts = GetSampleTime(sample);
- media::TimeUnit duration = GetSampleDuration(sample);
- if (!pts.IsValid() || !duration.IsValid()) {
- return E_FAIL;
- }
if ((pts + duration) < mSeekTargetThreshold.ref()) {
LOG("Dropping video frame which pts is smaller than seek target.");
// It is necessary to clear the pointer to release the previous output
// buffer.
sample = nullptr;
continue;
}
mSeekTargetThreshold.reset();
@@ -873,16 +893,19 @@ WMFVideoMFTManager::Output(int64_t aStre
hr = CreateBasicVideoFrame(sample, aStreamOffset, getter_AddRefs(frame));
}
// Frame should be non null only when we succeeded.
MOZ_ASSERT((frame != nullptr) == SUCCEEDED(hr));
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
NS_ENSURE_TRUE(frame, E_FAIL);
aOutData = frame;
+ // Set the potentially corrected pts and duration.
+ aOutData->mTime = pts.ToMicroseconds();
+ aOutData->mDuration = duration.ToMicroseconds();
if (mNullOutputCount) {
mGotValidOutputAfterNullOutput = true;
}
return S_OK;
}
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.h
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h
@@ -44,16 +44,29 @@ public:
const char* GetDescriptionName() const override
{
nsCString failureReason;
return IsHardwareAccelerated(failureReason)
? "wmf hardware video decoder" : "wmf software video decoder";
}
+ void Flush() override
+ {
+ MFTManager::Flush();
+ mDraining = false;
+ mSamplesCount = 0;
+ }
+
+ void Drain() override
+ {
+ MFTManager::Drain();
+ mDraining = true;
+ }
+
private:
bool InitializeDXVA(bool aForceD3D9);
bool InitInternal(bool aForceD3D9);
HRESULT ConfigureVideoFrameGeometry();
@@ -74,16 +87,19 @@ private:
uint32_t mVideoStride;
nsIntSize mImageSize;
RefPtr<layers::ImageContainer> mImageContainer;
nsAutoPtr<DXVA2Manager> mDXVA2Manager;
RefPtr<IMFSample> mLastInput;
float mLastDuration;
+ int64_t mLastTime = 0;
+ bool mDraining = false;
+ int64_t mSamplesCount = 0;
bool mDXVAEnabled;
const layers::LayersBackend mLayersBackend;
bool mUseHwAccel;
nsCString mDXVAFailureReason;
enum StreamType {