Bug 1355048: P7. Use Image's dimensions when available. r?jesup draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Sat, 01 Jul 2017 01:00:19 +0200
changeset 609797 4423d1aec2895b94933ad9ab7ac49ce91303cc02
parent 609796 6852d6c3e92e76687f9363db1272ff69207e71a6
child 609798 7d339dcf86c1a8ec504163d2300cfa6adae3d444
push id68676
push userbmo:jyavenard@mozilla.com
push dateMon, 17 Jul 2017 13:51:59 +0000
reviewersjesup
bugs1355048
milestone56.0a1
Bug 1355048: P7. Use Image's dimensions when available. r?jesup Do not only rely on the dimensions retrieved via FrameSizeChange. Both the webrtc::VideoFrameBuffer object and layers::Image know about their dimensions. We still need to keep the FrameSizeChange mechanism so that the attached media element know about its size before a frame is ready to be displayed. We will revisit this problem later. Additionally, don't assume that the frame's stride is the same as the frame's width. It may be true with the software decoders currently used, but it's not the norm. MozReview-Commit-ID: BTY8lImoUbc
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -2122,17 +2122,18 @@ void MediaPipelineReceiveAudio::DetachMe
 
     if (stream_->GraphImpl()) {
       stream_->RemoveListener(listener_);
     }
     stream_ = nullptr;
   }
 }
 
-nsresult MediaPipelineReceiveAudio::Init() {
+nsresult MediaPipelineReceiveAudio::Init()
+{
   ASSERT_ON_THREAD(main_thread_);
   MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
 
   description_ = pc_ + "| Receive audio[";
   description_ += track_id_;
   description_ += "]";
 
   listener_->AddSelf();
@@ -2166,18 +2167,18 @@ public:
     RefPtr<Image> image = image_;
     StreamTime delta = desired_time - played_ticks_;
 
     // Don't append if we've already provided a frame that supposedly
     // goes past the current aDesiredTime Doing so means a negative
     // delta and thus messes up handling of the graph
     if (delta > 0) {
       VideoSegment segment;
-      segment.AppendFrame(image.forget(), delta, IntSize(width_, height_),
-                          principal_handle_);
+      IntSize size = image ? image->GetSize() : IntSize(width_, height_);
+      segment.AppendFrame(image.forget(), delta, size, principal_handle_);
       // Handle track not actually added yet or removed/finished
       if (source_->AppendToTrack(track_id_, &segment)) {
         played_ticks_ = desired_time;
       } else {
         MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
         return;
       }
     }
@@ -2192,51 +2193,34 @@ public:
     width_ = width;
     height_ = height;
   }
 
   void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
                         uint32_t time_stamp,
                         int64_t render_time)
   {
-    RenderVideoFrame(buffer.DataY(),
-                     buffer.StrideY(),
-                     buffer.DataU(),
-                     buffer.StrideU(),
-                     buffer.DataV(),
-                     buffer.StrideV(),
-                     time_stamp, render_time);
-  }
-
-  void RenderVideoFrame(const uint8_t* buffer_y,
-                        uint32_t y_stride,
-                        const uint8_t* buffer_u,
-                        uint32_t u_stride,
-                        const uint8_t* buffer_v,
-                        uint32_t v_stride,
-                        uint32_t time_stamp,
-                        int64_t render_time)
-  {
-    MOZ_ASSERT(buffer_y);
+    MOZ_ASSERT(buffer.DataY());
     // Create a video frame using |buffer|.
     RefPtr<PlanarYCbCrImage> yuvImage =
       image_container_->CreatePlanarYCbCrImage();
 
     PlanarYCbCrData yuvData;
-    yuvData.mYChannel = const_cast<uint8_t*>(buffer_y);
-    yuvData.mYSize = IntSize(y_stride, height_);
-    yuvData.mYStride = y_stride;
-    MOZ_ASSERT(u_stride == v_stride);
-    yuvData.mCbCrStride = u_stride;
-    yuvData.mCbChannel = const_cast<uint8_t*>(buffer_u);
-    yuvData.mCrChannel = const_cast<uint8_t*>(buffer_v);
-    yuvData.mCbCrSize = IntSize(yuvData.mCbCrStride, (height_ + 1) >> 1);
+    yuvData.mYChannel = const_cast<uint8_t*>(buffer.DataY());
+    yuvData.mYSize = IntSize(buffer.width(), buffer.height());
+    yuvData.mYStride = buffer.StrideY();
+    MOZ_ASSERT(buffer.StrideU() == buffer.StrideV());
+    yuvData.mCbCrStride = buffer.StrideU();
+    yuvData.mCbChannel = const_cast<uint8_t*>(buffer.DataU());
+    yuvData.mCrChannel = const_cast<uint8_t*>(buffer.DataV());
+    yuvData.mCbCrSize =
+      IntSize((buffer.width() + 1) >> 1, (buffer.height() + 1) >> 1);
     yuvData.mPicX = 0;
     yuvData.mPicY = 0;
-    yuvData.mPicSize = IntSize(width_, height_);
+    yuvData.mPicSize = IntSize(buffer.width(), buffer.height());
     yuvData.mStereoMode = StereoMode::MONO;
 
     if (!yuvImage->CopyData(yuvData)) {
       MOZ_ASSERT(false);
       return;
     }
 
     MutexAutoLock lock(mutex_);