Bug 1243538: P1. Make MediaInfo::mImage an nsIntSize again and introduce a mImageRect member. r?mattwoodrow draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Fri, 15 Apr 2016 16:10:24 +1000
changeset 352917 d373d7ba54b3c2017e6cf08287d080a01d5aae12
parent 352861 67ac40fb8f680ea5e03805552187ba1b5e8392a1
child 352918 b44b7873f89dd23ab153a71d3027eaccc4e49738
push id15841
push userbmo:jyavenard@mozilla.com
push dateTue, 19 Apr 2016 00:09:30 +0000
reviewersmattwoodrow
bugs1243538
milestone48.0a1
Bug 1243538: P1. Make MediaInfo::mImage an nsIntSize again and introduce a mImageRect member. r?mattwoodrow MozReview-Commit-ID: 3iVCA1b7bHc
dom/media/MediaInfo.h
dom/media/fmp4/MP4Decoder.cpp
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
dom/media/platforms/omx/OmxDataDecoder.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/webm/WebMDemuxer.cpp
--- a/dom/media/MediaInfo.h
+++ b/dom/media/MediaInfo.h
@@ -179,29 +179,31 @@ public:
   {
   }
 
   VideoInfo(int32_t aWidth, int32_t aHeight)
     : TrackInfo(kVideoTrack, NS_LITERAL_STRING("2"), NS_LITERAL_STRING("main"),
                 EmptyString(), EmptyString(), true, 2)
     , mDisplay(nsIntSize(aWidth, aHeight))
     , mStereoMode(StereoMode::MONO)
-    , mImage(nsIntRect(0, 0, aWidth, aHeight))
+    , mImage(nsIntSize(aWidth, aHeight))
     , mCodecSpecificConfig(new MediaByteBuffer)
     , mExtraData(new MediaByteBuffer)
+    , mImageRect(nsIntRect(0, 0, aWidth, aHeight))
   {
   }
 
   VideoInfo(const VideoInfo& aOther)
     : TrackInfo(aOther)
     , mDisplay(aOther.mDisplay)
     , mStereoMode(aOther.mStereoMode)
     , mImage(aOther.mImage)
     , mCodecSpecificConfig(aOther.mCodecSpecificConfig)
     , mExtraData(aOther.mExtraData)
+    , mImageRect(aOther.mImageRect)
   {
   }
 
   bool IsValid() const override
   {
     return mDisplay.width > 0 && mDisplay.height > 0;
   }
 
@@ -215,27 +217,46 @@ public:
     return this;
   }
 
   UniquePtr<TrackInfo> Clone() const override
   {
     return MakeUnique<VideoInfo>(*this);
   }
 
+  nsIntRect ImageRect() const
+  {
+    if (mImageRect.width < 0 || mImageRect.height < 0) {
+      return nsIntRect(0, 0, mImage.width, mImage.height);
+    }
+    return mImageRect;
+  }
+
+  void SetImageRect(const nsIntRect& aRect)
+  {
+    mImageRect = aRect;
+  }
+
   // Size in pixels at which the video is rendered. This is after it has
   // been scaled by its aspect ratio.
   nsIntSize mDisplay;
 
   // Indicates the frame layout for single track stereo videos.
   StereoMode mStereoMode;
 
-  // Visible area of the decoded video's image.
-  nsIntRect mImage;
+  // Size of the decoded video's image.
+  nsIntSize mImage;
+
   RefPtr<MediaByteBuffer> mCodecSpecificConfig;
   RefPtr<MediaByteBuffer> mExtraData;
+
+private:
+  // mImage may be cropped; currently only used with the WebM container.
+  // A negative width or height indicate that no cropping is to occur.
+  nsIntRect mImageRect;
 };
 
 class AudioInfo : public TrackInfo {
 public:
   AudioInfo()
     : TrackInfo(kAudioTrack, NS_LITERAL_STRING("1"), NS_LITERAL_STRING("main"),
                 EmptyString(), EmptyString(), true, 1)
     , mRate(0)
--- a/dom/media/fmp4/MP4Decoder.cpp
+++ b/dom/media/fmp4/MP4Decoder.cpp
@@ -216,18 +216,17 @@ static already_AddRefed<MediaDataDecoder
 CreateTestH264Decoder(layers::LayersBackend aBackend,
                       VideoInfo& aConfig,
                       FlushableTaskQueue* aTaskQueue)
 {
   aConfig.mMimeType = "video/avc";
   aConfig.mId = 1;
   aConfig.mDuration = 40000;
   aConfig.mMediaTime = 0;
-  aConfig.mDisplay = nsIntSize(640, 360);
-  aConfig.mImage = nsIntRect(0, 0, 640, 360);
+  aConfig.mImage = aConfig.mDisplay = nsIntSize(640, 360);
   aConfig.mExtraData = new MediaByteBuffer();
   aConfig.mExtraData->AppendElements(sTestH264ExtraData,
                                      MOZ_ARRAY_LENGTH(sTestH264ExtraData));
 
   PDMFactory::Init();
 
   RefPtr<PDMFactory> platform = new PDMFactory();
   RefPtr<MediaDataDecoder> decoder(
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -146,27 +146,25 @@ VPXDecoder::DoDecodeFrame(MediaRawData* 
 
       b.mPlanes[2].mHeight = img->d_h;
       b.mPlanes[2].mWidth = img->d_w;
     } else {
       LOG("VPX Unknown image format");
       return -1;
     }
 
-    VideoInfo info;
-    info.mDisplay = mInfo.mDisplay;
-    RefPtr<VideoData> v = VideoData::Create(info,
-                                              mImageContainer,
-                                              aSample->mOffset,
-                                              aSample->mTime,
-                                              aSample->mDuration,
-                                              b,
-                                              aSample->mKeyframe,
-                                              aSample->mTimecode,
-                                              mInfo.mImage);
+    RefPtr<VideoData> v = VideoData::Create(mInfo,
+                                            mImageContainer,
+                                            aSample->mOffset,
+                                            aSample->mTime,
+                                            aSample->mDuration,
+                                            b,
+                                            aSample->mKeyframe,
+                                            aSample->mTimecode,
+                                            mInfo.ImageRect());
 
     if (!v) {
       LOG("Image allocation error source %ldx%ld display %ldx%ld picture %ldx%ld",
           img->d_w, img->d_h, mInfo.mDisplay.width, mInfo.mDisplay.height,
           mInfo.mImage.width, mInfo.mImage.height);
       return -1;
     }
     mCallback->Output(v);
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -101,18 +101,17 @@ FFmpegVideoDecoder<LIBAV_VER>::PtsCorrec
 }
 
 FFmpegVideoDecoder<LIBAV_VER>::FFmpegVideoDecoder(FFmpegLibWrapper* aLib,
   FlushableTaskQueue* aTaskQueue, MediaDataDecoderCallback* aCallback,
   const VideoInfo& aConfig,
   ImageContainer* aImageContainer)
   : FFmpegDataDecoder(aLib, aTaskQueue, aCallback, GetCodecId(aConfig.mMimeType))
   , mImageContainer(aImageContainer)
-  , mDisplay(aConfig.mDisplay)
-  , mImage(aConfig.mImage)
+  , mInfo(aConfig)
   , mCodecParser(nullptr)
 {
   MOZ_COUNT_CTOR(FFmpegVideoDecoder);
   // Use a new MediaByteBuffer as the object will be modified during initialization.
   mExtraData = new MediaByteBuffer;
   mExtraData->AppendElements(*aConfig.mExtraData);
 }
 
@@ -124,28 +123,28 @@ FFmpegVideoDecoder<LIBAV_VER>::Init()
   }
 
   return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
 }
 
 void
 FFmpegVideoDecoder<LIBAV_VER>::InitCodecContext()
 {
-  mCodecContext->width = mImage.width;
-  mCodecContext->height = mImage.height;
+  mCodecContext->width = mInfo.mImage.width;
+  mCodecContext->height = mInfo.mImage.height;
 
   // We use the same logic as libvpx in determining the number of threads to use
   // so that we end up behaving in the same fashion when using ffmpeg as
   // we would otherwise cause various crashes (see bug 1236167)
   int decode_threads = 1;
-  if (mDisplay.width >= 2048) {
+  if (mInfo.mDisplay.width >= 2048) {
     decode_threads = 8;
-  } else if (mDisplay.width >= 1024) {
+  } else if (mInfo.mDisplay.width >= 1024) {
     decode_threads = 4;
-  } else if (mDisplay.width >= 320) {
+  } else if (mInfo.mDisplay.width >= 320) {
     decode_threads = 2;
   }
 
   decode_threads = std::min(decode_threads, PR_GetNumberOfProcessors());
   mCodecContext->thread_count = decode_threads;
   if (decode_threads > 1) {
     mCodecContext->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME;
   }
@@ -267,19 +266,16 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecodeF
       NS_WARNING("Unable to retrieve duration from map");
       duration = aSample->mDuration;
       // dts are probably incorrectly reported ; so clear the map as we're
       // unlikely to find them in the future anyway. This also guards
       // against the map becoming extremely big.
       mDurationMap.Clear();
     }
 
-    VideoInfo info;
-    info.mDisplay = mDisplay;
-
     VideoData::YCbCrBuffer b;
     b.mPlanes[0].mData = mFrame->data[0];
     b.mPlanes[1].mData = mFrame->data[1];
     b.mPlanes[2].mData = mFrame->data[2];
 
     b.mPlanes[0].mStride = mFrame->linesize[0];
     b.mPlanes[1].mStride = mFrame->linesize[1];
     b.mPlanes[2].mStride = mFrame->linesize[2];
@@ -293,25 +289,25 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecodeF
     if (mCodecContext->pix_fmt == AV_PIX_FMT_YUV444P) {
       b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = mFrame->width;
       b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = mFrame->height;
     } else {
       b.mPlanes[1].mWidth = b.mPlanes[2].mWidth = (mFrame->width + 1) >> 1;
       b.mPlanes[1].mHeight = b.mPlanes[2].mHeight = (mFrame->height + 1) >> 1;
     }
 
-    RefPtr<VideoData> v = VideoData::Create(info,
-                                              mImageContainer,
-                                              aSample->mOffset,
-                                              pts,
-                                              duration,
-                                              b,
-                                              !!mFrame->key_frame,
-                                              -1,
-                                              mImage);
+    RefPtr<VideoData> v = VideoData::Create(mInfo,
+                                            mImageContainer,
+                                            aSample->mOffset,
+                                            pts,
+                                            duration,
+                                            b,
+                                            !!mFrame->key_frame,
+                                            -1,
+                                            mInfo.ImageRect());
     if (!v) {
       NS_WARNING("image allocation error.");
       mCallback->Error();
       return DecodeResult::DECODE_ERROR;
     }
     mCallback->Output(v);
     return DecodeResult::DECODE_FRAME;
   }
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
@@ -66,18 +66,17 @@ private:
    * Currently it only supports Planar YUV420, which appears to be the only
    * non-hardware accelerated image format that FFmpeg's H264 decoder is
    * capable of outputting.
    */
   int AllocateYUV420PVideoBuffer(AVCodecContext* aCodecContext,
                                  AVFrame* aFrame);
 
   RefPtr<ImageContainer> mImageContainer;
-  nsIntSize mDisplay;
-  nsIntRect mImage;
+  VideoInfo mInfo;
 
   // Parser used for VP8 and VP9 decoding.
   AVCodecParserContext* mCodecParser;
 
   class PtsCorrectionContext {
   public:
     PtsCorrectionContext();
     int64_t GuessCorrectPts(int64_t aPts, int64_t aDts);
--- a/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
+++ b/dom/media/platforms/omx/GonkOmxPlatformLayer.cpp
@@ -303,28 +303,26 @@ GonkBufferData::GetPlatformMediaData()
     return nullptr;
   }
 
   if (!mTextureClientRecycleHandler) {
     // There is no GraphicBuffer, it should fallback to normal YUV420 VideoData.
     return nullptr;
   }
 
-  VideoInfo info;
-  info.mDisplay = mGonkPlatformLayer->GetTrackInfo()->GetAsVideoInfo()->mDisplay;
-  info.mImage = mGonkPlatformLayer->GetTrackInfo()->GetAsVideoInfo()->mImage;
+  VideoInfo info(*mGonkPlatformLayer->GetTrackInfo()->GetAsVideoInfo());
   RefPtr<VideoData> data = VideoData::Create(info,
                                              mGonkPlatformLayer->GetImageContainer(),
                                              0,
                                              mBuffer->nTimeStamp,
                                              1,
                                              mTextureClientRecycleHandler->GetTextureClient(),
                                              false,
                                              0,
-                                             info.mImage);
+                                             info.ImageRect());
   LOG("%p, disp width %d, height %d, pic width %d, height %d, time %ld",
       this, info.mDisplay.width, info.mDisplay.height,
       info.mImage.width, info.mImage.height, mBuffer->nTimeStamp);
 
   // Get TextureClient Promise here to wait for resolved.
   RefPtr<GonkBufferData> self(this);
   mTextureClientRecycleHandler->WaitforRecycle()
     ->Then(mGonkPlatformLayer->GetTaskQueue(), __func__,
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -1022,28 +1022,26 @@ MediaDataHelper::CreateYUV420VideoData(B
 
   b.mPlanes[2].mData = yuv420p_v;
   b.mPlanes[2].mWidth =(width + 1) / 2;
   b.mPlanes[2].mHeight = (height + 1) / 2;
   b.mPlanes[2].mStride = (stride + 1) / 2;
   b.mPlanes[2].mOffset = 0;
   b.mPlanes[2].mSkip = 0;
 
-  VideoInfo info;
-  info.mDisplay = mTrackInfo->GetAsVideoInfo()->mDisplay;
-  info.mImage = mTrackInfo->GetAsVideoInfo()->mImage;
+  VideoInfo info(*mTrackInfo->GetAsVideoInfo());
   RefPtr<VideoData> data = VideoData::Create(info,
                                              mImageContainer,
                                              0, // Filled later by caller.
                                              0, // Filled later by caller.
                                              1, // We don't know the duration.
                                              b,
                                              0, // Filled later by caller.
                                              -1,
-                                             info.mImage);
+                                             info.ImageRect());
 
   LOG("YUV420 VideoData: disp width %d, height %d, pic width %d, height %d, time %ld",
       info.mDisplay.width, info.mDisplay.height, info.mImage.width,
       info.mImage.height, aBufferData->mBuffer->nTimeStamp);
 
   return data.forget();
 }
 
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -399,22 +399,20 @@ WMFVideoMFTManager::ConfigureVideoFrameG
   // is a planar format.
   GUID videoFormat;
   hr = mediaType->GetGUID(MF_MT_SUBTYPE, &videoFormat);
   NS_ENSURE_TRUE(videoFormat == MFVideoFormat_NV12 || !mUseHwAccel, E_FAIL);
   NS_ENSURE_TRUE(videoFormat == MFVideoFormat_YV12 || mUseHwAccel, E_FAIL);
 
   UINT32 width = mVideoInfo.mImage.width;
   UINT32 height = mVideoInfo.mImage.height;
-  nsIntRect pictureRegion = mVideoInfo.mImage;
+  nsIntRect pictureRegion = mVideoInfo.ImageRect();
   // Calculate and validate the picture region and frame dimensions after
   // scaling by the pixel aspect ratio.
-  nsIntSize frameSize = nsIntSize(width, height);
-  nsIntSize displaySize = nsIntSize(mVideoInfo.mDisplay.width, mVideoInfo.mDisplay.height);
-  if (!IsValidVideoRegion(frameSize, pictureRegion, displaySize)) {
+  if (!IsValidVideoRegion(mVideoInfo.mImage, pictureRegion, mVideoInfo.mDisplay)) {
     // Video track's frame sizes will overflow. Ignore the video track.
     return E_FAIL;
   }
 
   if (mDXVA2Manager) {
     hr = mDXVA2Manager->ConfigureForSize(width, height);
     NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   }
@@ -513,29 +511,29 @@ WMFVideoMFTManager::CreateBasicVideoFram
   NS_ENSURE_TRUE(duration.IsValid(), E_FAIL);
 
   RefPtr<layers::PlanarYCbCrImage> image =
     new IMFYCbCrImage(buffer, twoDBuffer);
 
   VideoData::SetVideoDataToImage(image,
                                  mVideoInfo,
                                  b,
-                                 mVideoInfo.mImage,
+                                 mVideoInfo.ImageRect(),
                                  false);
 
   RefPtr<VideoData> v =
     VideoData::CreateFromImage(mVideoInfo,
                                mImageContainer,
                                aStreamOffset,
                                pts.ToMicroseconds(),
                                duration.ToMicroseconds(),
                                image.forget(),
                                false,
                                -1,
-                               mVideoInfo.mImage);
+                               mVideoInfo.ImageRect());
 
   v.forget(aOutVideoData);
   return S_OK;
 }
 
 HRESULT
 WMFVideoMFTManager::CreateD3DVideoFrame(IMFSample* aSample,
                                         int64_t aStreamOffset,
@@ -546,17 +544,17 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
   NS_ENSURE_TRUE(mDXVA2Manager, E_ABORT);
   NS_ENSURE_TRUE(mUseHwAccel, E_ABORT);
 
   *aOutVideoData = nullptr;
   HRESULT hr;
 
   RefPtr<Image> image;
   hr = mDXVA2Manager->CopyToImage(aSample,
-                                  mVideoInfo.mImage,
+                                  mVideoInfo.ImageRect(),
                                   mImageContainer,
                                   getter_AddRefs(image));
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
   NS_ENSURE_TRUE(image, E_FAIL);
 
   media::TimeUnit pts = GetSampleTime(aSample);
   NS_ENSURE_TRUE(pts.IsValid(), E_FAIL);
   media::TimeUnit duration = GetSampleDuration(aSample);
@@ -564,17 +562,17 @@ WMFVideoMFTManager::CreateD3DVideoFrame(
   RefPtr<VideoData> v = VideoData::CreateFromImage(mVideoInfo,
                                                      mImageContainer,
                                                      aStreamOffset,
                                                      pts.ToMicroseconds(),
                                                      duration.ToMicroseconds(),
                                                      image.forget(),
                                                      false,
                                                      -1,
-                                                     mVideoInfo.mImage);
+                                                     mVideoInfo.ImageRect());
 
   NS_ENSURE_TRUE(v, E_FAIL);
   v.forget(aOutVideoData);
 
   return S_OK;
 }
 
 // Blocks until decoded sample is produced by the deoder.
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -335,17 +335,18 @@ WebMDemuxer::ReadMetadata()
         // Video track's frame sizes will overflow. Ignore the video track.
         continue;
       }
 
       mVideoTrack = track;
       mHasVideo = true;
 
       mInfo.mVideo.mDisplay = displaySize;
-      mInfo.mVideo.mImage = pictureRect;
+      mInfo.mVideo.mImage = frameSize;
+      mInfo.mVideo.SetImageRect(pictureRect);
 
       switch (params.stereo_mode) {
         case NESTEGG_VIDEO_MONO:
           mInfo.mVideo.mStereoMode = StereoMode::MONO;
           break;
         case NESTEGG_VIDEO_STEREO_LEFT_RIGHT:
           mInfo.mVideo.mStereoMode = StereoMode::LEFT_RIGHT;
           break;
@@ -579,18 +580,18 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
         // We only look for resolution changes on keyframes for both VP8 and
         // VP9. Other resolution changes are invalid.
         if (mLastSeenFrameWidth.isSome() && mLastSeenFrameHeight.isSome() &&
             (si.w != mLastSeenFrameWidth.value() ||
              si.h != mLastSeenFrameHeight.value())) {
           // We ignore cropping information on resizes during streams.
           // Cropping alone is rare, and we do not consider cropping to
           // still be valid after a resolution change
-          mInfo.mVideo.mDisplay = nsIntSize(si.w, si.h);
-          mInfo.mVideo.mImage = nsIntRect(0, 0, si.w, si.h);
+          mInfo.mVideo.mImage = mInfo.mVideo.mDisplay = nsIntSize(si.w, si.h);
+          mInfo.mVideo.SetImageRect(nsIntRect(0, 0, si.w, si.h));
           mSharedVideoTrackInfo = new SharedTrackInfo(mInfo.mVideo, ++sStreamSourceID);
         }
         mLastSeenFrameWidth = Some(si.w);
         mLastSeenFrameHeight = Some(si.h);
       }
     }
 
     WEBM_DEBUG("push sample tstamp: %ld next_tstamp: %ld length: %ld kf: %d",