Bug 1355048: P7. Use Image's dimensions when available. r?jesup
Do not only rely on the dimensions retrieved via FrameSizeChange. Both the webrtc::VideoFrameBuffer object and layers::Image know about their dimensions.
We still need to keep the FrameSizeChange mechanism so that the attached media element know about its size before a frame is ready to be displayed.
We will revisit this problem later.
Additionally, don't assume that the frame's stride is the same as the frame's width. It may be true with the software decoders currently used, but it's not the norm.
MozReview-Commit-ID: BTY8lImoUbc
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -2122,17 +2122,18 @@ void MediaPipelineReceiveAudio::DetachMe
if (stream_->GraphImpl()) {
stream_->RemoveListener(listener_);
}
stream_ = nullptr;
}
}
-nsresult MediaPipelineReceiveAudio::Init() {
+nsresult MediaPipelineReceiveAudio::Init()
+{
ASSERT_ON_THREAD(main_thread_);
MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
description_ = pc_ + "| Receive audio[";
description_ += track_id_;
description_ += "]";
listener_->AddSelf();
@@ -2166,18 +2167,18 @@ public:
RefPtr<Image> image = image_;
StreamTime delta = desired_time - played_ticks_;
// Don't append if we've already provided a frame that supposedly
// goes past the current aDesiredTime Doing so means a negative
// delta and thus messes up handling of the graph
if (delta > 0) {
VideoSegment segment;
- segment.AppendFrame(image.forget(), delta, IntSize(width_, height_),
- principal_handle_);
+ IntSize size = image ? image->GetSize() : IntSize(width_, height_);
+ segment.AppendFrame(image.forget(), delta, size, principal_handle_);
// Handle track not actually added yet or removed/finished
if (source_->AppendToTrack(track_id_, &segment)) {
played_ticks_ = desired_time;
} else {
MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
return;
}
}
@@ -2192,51 +2193,34 @@ public:
width_ = width;
height_ = height;
}
void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
uint32_t time_stamp,
int64_t render_time)
{
- RenderVideoFrame(buffer.DataY(),
- buffer.StrideY(),
- buffer.DataU(),
- buffer.StrideU(),
- buffer.DataV(),
- buffer.StrideV(),
- time_stamp, render_time);
- }
-
- void RenderVideoFrame(const uint8_t* buffer_y,
- uint32_t y_stride,
- const uint8_t* buffer_u,
- uint32_t u_stride,
- const uint8_t* buffer_v,
- uint32_t v_stride,
- uint32_t time_stamp,
- int64_t render_time)
- {
- MOZ_ASSERT(buffer_y);
+ MOZ_ASSERT(buffer.DataY());
// Create a video frame using |buffer|.
RefPtr<PlanarYCbCrImage> yuvImage =
image_container_->CreatePlanarYCbCrImage();
PlanarYCbCrData yuvData;
- yuvData.mYChannel = const_cast<uint8_t*>(buffer_y);
- yuvData.mYSize = IntSize(y_stride, height_);
- yuvData.mYStride = y_stride;
- MOZ_ASSERT(u_stride == v_stride);
- yuvData.mCbCrStride = u_stride;
- yuvData.mCbChannel = const_cast<uint8_t*>(buffer_u);
- yuvData.mCrChannel = const_cast<uint8_t*>(buffer_v);
- yuvData.mCbCrSize = IntSize(yuvData.mCbCrStride, (height_ + 1) >> 1);
+ yuvData.mYChannel = const_cast<uint8_t*>(buffer.DataY());
+ yuvData.mYSize = IntSize(buffer.width(), buffer.height());
+ yuvData.mYStride = buffer.StrideY();
+ MOZ_ASSERT(buffer.StrideU() == buffer.StrideV());
+ yuvData.mCbCrStride = buffer.StrideU();
+ yuvData.mCbChannel = const_cast<uint8_t*>(buffer.DataU());
+ yuvData.mCrChannel = const_cast<uint8_t*>(buffer.DataV());
+ yuvData.mCbCrSize =
+ IntSize((buffer.width() + 1) >> 1, (buffer.height() + 1) >> 1);
yuvData.mPicX = 0;
yuvData.mPicY = 0;
- yuvData.mPicSize = IntSize(width_, height_);
+ yuvData.mPicSize = IntSize(buffer.width(), buffer.height());
yuvData.mStereoMode = StereoMode::MONO;
if (!yuvImage->CopyData(yuvData)) {
MOZ_ASSERT(false);
return;
}
MutexAutoLock lock(mutex_);