Refactored YCbCrBuffer to YCbCrABuffer. r?jya
draft
Refactored YCbCrBuffer to YCbCrABuffer. r?jya
MozReview-Commit-ID: LcZi4jLjB1T
--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -80,29 +80,29 @@ AudioData::TransferAndUpdateTimestampAnd
aOther->mFrames,
Move(aOther->mAudioData),
aOther->mChannels,
aOther->mRate);
return v.forget();
}
static bool
-ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
+ValidatePlane(const VideoData::YCbCrABuffer::Plane& aPlane)
{
return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
aPlane.mStride > 0;
}
#ifdef MOZ_WIDGET_GONK
static bool
-IsYV12Format(const VideoData::YCbCrBuffer::Plane& aYPlane,
- const VideoData::YCbCrBuffer::Plane& aCbPlane,
- const VideoData::YCbCrBuffer::Plane& aCrPlane)
+IsYV12Format(const VideoData::YCbCrABuffer::Plane& aYPlane,
+ const VideoData::YCbCrABuffer::Plane& aCbPlane,
+ const VideoData::YCbCrABuffer::Plane& aCrPlane)
{
return
aYPlane.mWidth % 2 == 0 &&
aYPlane.mHeight % 2 == 0 &&
aYPlane.mWidth / 2 == aCbPlane.mWidth &&
aYPlane.mHeight / 2 == aCbPlane.mHeight &&
aCbPlane.mWidth == aCrPlane.mWidth &&
aCbPlane.mHeight == aCrPlane.mHeight;
@@ -204,26 +204,26 @@ VideoData::ShallowCopyUpdateTimestampAnd
aOther->mFrameID);
v->mImage = aOther->mImage;
return v.forget();
}
/* static */
bool VideoData::SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
const VideoInfo& aInfo,
- const YCbCrBuffer &aBuffer,
+ const YCbCrABuffer &aBuffer,
const IntRect& aPicture,
bool aCopyData)
{
if (!aVideoImage) {
return false;
}
- const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
- const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
- const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
+ const YCbCrABuffer::Plane &Y = aBuffer.mPlanes[0];
+ const YCbCrABuffer::Plane &Cb = aBuffer.mPlanes[1];
+ const YCbCrABuffer::Plane &Cr = aBuffer.mPlanes[2];
PlanarYCbCrData data;
data.mYChannel = Y.mData + Y.mOffset;
data.mYSize = IntSize(Y.mWidth, Y.mHeight);
data.mYStride = Y.mStride;
data.mYSkip = Y.mSkip;
data.mCbChannel = Cb.mData + Cb.mOffset;
data.mCrChannel = Cr.mData + Cr.mOffset;
@@ -247,17 +247,17 @@ bool VideoData::SetVideoDataToImage(Plan
/* static */
already_AddRefed<VideoData>
VideoData::CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
- const YCbCrBuffer& aBuffer,
+ const YCbCrABuffer& aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture)
{
if (!aContainer) {
// Create a dummy VideoData with no image. This gives us something to
// send to media streams if necessary.
RefPtr<VideoData> v(new VideoData(aOffset,
@@ -306,19 +306,19 @@ VideoData::CreateAndCopyData(const Video
RefPtr<VideoData> v(new VideoData(aOffset,
aTime,
aDuration,
aKeyframe,
aTimecode,
aInfo.mDisplay,
0));
#ifdef MOZ_WIDGET_GONK
- const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
- const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
- const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
+ const YCbCrABuffer::Plane &Y = aBuffer.mPlanes[0];
+ const YCbCrABuffer::Plane &Cb = aBuffer.mPlanes[1];
+ const YCbCrABuffer::Plane &Cr = aBuffer.mPlanes[2];
#endif
// Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
// format.
#ifdef MOZ_WIDGET_GONK
if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) {
v->mImage = new layers::GrallocImage();
}
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -412,44 +412,47 @@ protected:
namespace layers {
class TextureClient;
class PlanarYCbCrImage;
} // namespace layers
class VideoInfo;
-// Holds a decoded video frame, in YCbCr format. These are queued in the reader.
+// Holds a decoded video frame, in YCbCrA format. These are queued in the reader.
class VideoData : public MediaData {
public:
typedef gfx::IntRect IntRect;
typedef gfx::IntSize IntSize;
typedef layers::ImageContainer ImageContainer;
typedef layers::Image Image;
typedef layers::PlanarYCbCrImage PlanarYCbCrImage;
static const Type sType = VIDEO_DATA;
static const char* sTypeName;
- // YCbCr data obtained from decoding the video. The index's are:
+ // YCbCrA data obtained from decoding the video. The index's are:
// 0 = Y
// 1 = Cb
// 2 = Cr
- struct YCbCrBuffer {
+ // 3 = A
+ struct YCbCrABuffer {
struct Plane {
uint8_t* mData;
uint32_t mWidth;
uint32_t mHeight;
uint32_t mStride;
uint32_t mOffset;
uint32_t mSkip;
};
- Plane mPlanes[3];
+ Plane mPlanes[4];
YUVColorSpace mYUVColorSpace = YUVColorSpace::BT601;
+ // Alpha plane should be null unless explicitly set.
+ YCbCrABuffer() { mPlanes[3].mData = nullptr; }
};
// Constructs a VideoData object. If aImage is nullptr, creates a new Image
// holding a copy of the YCbCr data passed in aBuffer. If aImage is not
// nullptr, it's stored as the underlying video image and aBuffer is assumed
// to point to memory within aImage so no copy is made. aTimecode is a codec
// specific number representing the timestamp of the frame of video data.
// Returns nsnull if an error occurs. This may indicate that memory couldn't
@@ -459,17 +462,17 @@ public:
// Creates a new VideoData containing a deep copy of aBuffer. May use aContainer
// to allocate an Image to hold the copied data.
static already_AddRefed<VideoData> CreateAndCopyData(const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
- const YCbCrBuffer &aBuffer,
+ const YCbCrABuffer &aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
@@ -509,17 +512,17 @@ public:
static already_AddRefed<VideoData>
ShallowCopyUpdateTimestampAndDuration(const VideoData* aOther, int64_t aTimestamp,
int64_t aDuration);
// Initialize PlanarYCbCrImage. Only When aCopyData is true,
// video data is copied to PlanarYCbCrImage.
static bool SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
const VideoInfo& aInfo,
- const YCbCrBuffer &aBuffer,
+ const YCbCrABuffer &aBuffer,
const IntRect& aPicture,
bool aCopyData);
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
// Dimensions at which to display the video frame. The picture region
// will be scaled to this size. This is should be the picture region's
// dimensions scaled with respect to its aspect ratio.
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -190,17 +190,17 @@ bool AndroidMediaReader::DecodeVideoFram
frame.mTimeUs,
1, // We don't know the duration yet.
currentImage,
frame.mKeyFrame,
-1,
picture);
} else {
// Assume YUV
- VideoData::YCbCrBuffer b;
+ VideoData::YCbCrABuffer b;
b.mPlanes[0].mData = static_cast<uint8_t *>(frame.Y.mData);
b.mPlanes[0].mStride = frame.Y.mStride;
b.mPlanes[0].mHeight = frame.Y.mHeight;
b.mPlanes[0].mWidth = frame.Y.mWidth;
b.mPlanes[0].mOffset = frame.Y.mOffset;
b.mPlanes[0].mSkip = frame.Y.mSkip;
b.mPlanes[1].mData = static_cast<uint8_t *>(frame.Cb.mData);
--- a/dom/media/platforms/agnostic/BlankDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/BlankDecoderModule.cpp
@@ -120,17 +120,17 @@ public:
Create(const media::TimeUnit& aDTS, const media::TimeUnit& aDuration, int64_t aOffsetInStream)
{
// Create a fake YUV buffer in a 420 format. That is, an 8bpp Y plane,
// with a U and V plane that are half the size of the Y plane, i.e 8 bit,
// 2x2 subsampled.
const int sizeY = mFrameWidth * mFrameHeight;
const int sizeCbCr = ((mFrameWidth + 1) / 2) * ((mFrameHeight + 1) / 2);
auto frame = MakeUnique<uint8_t[]>(sizeY + sizeCbCr);
- VideoData::YCbCrBuffer buffer;
+ VideoData::YCbCrABuffer buffer;
// Y plane.
buffer.mPlanes[0].mData = frame.get();
buffer.mPlanes[0].mStride = mFrameWidth;
buffer.mPlanes[0].mHeight = mFrameHeight;
buffer.mPlanes[0].mWidth = mFrameWidth;
buffer.mPlanes[0].mOffset = 0;
buffer.mPlanes[0].mSkip = 0;
--- a/dom/media/platforms/agnostic/TheoraDecoder.cpp
+++ b/dom/media/platforms/agnostic/TheoraDecoder.cpp
@@ -137,17 +137,17 @@ TheoraDecoder::DoDecode(MediaRawData* aS
int ret = th_decode_packetin(mTheoraDecoderContext, &pkt, nullptr);
if (ret == 0 || ret == TH_DUPFRAME) {
th_ycbcr_buffer ycbcr;
th_decode_ycbcr_out(mTheoraDecoderContext, ycbcr);
int hdec = !(mTheoraInfo.pixel_fmt & 1);
int vdec = !(mTheoraInfo.pixel_fmt & 2);
- VideoData::YCbCrBuffer b;
+ VideoData::YCbCrABuffer b;
b.mPlanes[0].mData = ycbcr[0].data;
b.mPlanes[0].mStride = ycbcr[0].stride;
b.mPlanes[0].mHeight = mTheoraInfo.frame_height;
b.mPlanes[0].mWidth = mTheoraInfo.frame_width;
b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
b.mPlanes[1].mData = ycbcr[1].data;
b.mPlanes[1].mStride = ycbcr[1].stride;
--- a/dom/media/platforms/agnostic/VPXDecoder.cpp
+++ b/dom/media/platforms/agnostic/VPXDecoder.cpp
@@ -147,17 +147,17 @@ VPXDecoder::DoDecode(MediaRawData* aSamp
vpx_image_t *img_alpha = nullptr;
MediaResult rv = DecodeAlpha(img, &img_alpha, aSample);
if (NS_FAILED(rv)) {
return(rv);
}
// Chroma shifts are rounded down as per the decoding examples in the SDK
- VideoData::YCbCrBuffer b;
+ VideoData::YCbCrABuffer b;
b.mPlanes[0].mData = img->planes[0];
b.mPlanes[0].mStride = img->stride[0];
b.mPlanes[0].mHeight = img->d_h;
b.mPlanes[0].mWidth = img->d_w;
b.mPlanes[0].mOffset = b.mPlanes[0].mSkip = 0;
b.mPlanes[1].mData = img->planes[1];
b.mPlanes[1].mStride = img->stride[1];
--- a/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
+++ b/dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
@@ -20,17 +20,17 @@ extern bool IsOnGMPThread();
void
VideoCallbackAdapter::Decoded(GMPVideoi420Frame* aDecodedFrame)
{
GMPUniquePtr<GMPVideoi420Frame> decodedFrame(aDecodedFrame);
MOZ_ASSERT(IsOnGMPThread());
- VideoData::YCbCrBuffer b;
+ VideoData::YCbCrABuffer b;
for (int i = 0; i < kGMPNumOfPlanes; ++i) {
b.mPlanes[i].mData = decodedFrame->Buffer(GMPPlaneType(i));
b.mPlanes[i].mStride = decodedFrame->Stride(GMPPlaneType(i));
if (i == kGMPYPlane) {
b.mPlanes[i].mWidth = decodedFrame->Width();
b.mPlanes[i].mHeight = decodedFrame->Height();
} else {
b.mPlanes[i].mWidth = (decodedFrame->Width() + 1) / 2;
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -299,17 +299,17 @@ AppleVTDecoder::OutputFrame(CVPixelBuffe
aFrameRef.composition_timestamp.ToMicroseconds(),
aFrameRef.duration.ToMicroseconds());
} else if (mUseSoftwareImages) {
size_t width = CVPixelBufferGetWidth(aImage);
size_t height = CVPixelBufferGetHeight(aImage);
DebugOnly<size_t> planes = CVPixelBufferGetPlaneCount(aImage);
MOZ_ASSERT(planes == 2, "Likely not NV12 format and it must be.");
- VideoData::YCbCrBuffer buffer;
+ VideoData::YCbCrABuffer buffer;
// Lock the returned image data.
CVReturn rv = CVPixelBufferLockBaseAddress(aImage, kCVPixelBufferLock_ReadOnly);
if (rv != kCVReturnSuccess) {
NS_ERROR("error locking pixel data");
mCallback->Error(
MediaResult(NS_ERROR_DOM_MEDIA_DECODE_ERR,
RESULT_DETAIL("CVPixelBufferLockBaseAddress:%x", rv)));
--- a/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
+++ b/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
@@ -274,17 +274,17 @@ FFmpegVideoDecoder<LIBAV_VER>::DoDecode(
// dts are probably incorrectly reported ; so clear the map as we're
// unlikely to find them in the future anyway. This also guards
// against the map becoming extremely big.
mDurationMap.Clear();
}
FFMPEG_LOG("Got one frame output with pts=%lld dts=%lld duration=%lld opaque=%lld",
pts, mFrame->pkt_dts, duration, mCodecContext->reordered_opaque);
- VideoData::YCbCrBuffer b;
+ VideoData::YCbCrABuffer b;
b.mPlanes[0].mData = mFrame->data[0];
b.mPlanes[1].mData = mFrame->data[1];
b.mPlanes[2].mData = mFrame->data[2];
b.mPlanes[0].mStride = mFrame->linesize[0];
b.mPlanes[1].mStride = mFrame->linesize[1];
b.mPlanes[2].mStride = mFrame->linesize[2];
--- a/dom/media/platforms/omx/OmxDataDecoder.cpp
+++ b/dom/media/platforms/omx/OmxDataDecoder.cpp
@@ -989,17 +989,17 @@ MediaDataHelper::CreateYUV420VideoData(B
}
size_t yuv420p_y_size = stride * slice_height;
size_t yuv420p_u_size = ((stride + 1) / 2) * ((slice_height + 1) / 2);
uint8_t *yuv420p_y = yuv420p_buffer;
uint8_t *yuv420p_u = yuv420p_y + yuv420p_y_size;
uint8_t *yuv420p_v = yuv420p_u + yuv420p_u_size;
- VideoData::YCbCrBuffer b;
+ VideoData::YCbCrABuffer b;
b.mPlanes[0].mData = yuv420p_y;
b.mPlanes[0].mWidth = width;
b.mPlanes[0].mHeight = height;
b.mPlanes[0].mStride = stride;
b.mPlanes[0].mOffset = 0;
b.mPlanes[0].mSkip = 0;
b.mPlanes[1].mData = yuv420p_u;
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -712,17 +712,17 @@ WMFVideoMFTManager::CreateBasicVideoFram
} else {
hr = buffer->Lock(&data, nullptr, nullptr);
NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
stride = mVideoStride;
}
// YV12, planar format: [YYYY....][VVVV....][UUUU....]
// i.e., Y, then V, then U.
- VideoData::YCbCrBuffer b;
+ VideoData::YCbCrABuffer b;
uint32_t videoWidth = mImageSize.width;
uint32_t videoHeight = mImageSize.height;
// Y (Y') plane
b.mPlanes[0].mData = data;
b.mPlanes[0].mStride = stride;
b.mPlanes[0].mHeight = videoHeight;