--- a/dom/media/MediaData.cpp
+++ b/dom/media/MediaData.cpp
@@ -84,74 +84,76 @@ AudioData::TransferAndUpdateTimestampAnd
aOther->mChannels,
aOther->mRate);
return v.forget();
}
static bool
ValidatePlane(const VideoData::YCbCrBuffer::Plane& aPlane)
{
- return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION &&
- aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION &&
- aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT &&
- aPlane.mStride > 0;
+ return aPlane.mWidth <= PlanarYCbCrImage::MAX_DIMENSION
+ && aPlane.mHeight <= PlanarYCbCrImage::MAX_DIMENSION
+ && aPlane.mWidth * aPlane.mHeight < MAX_VIDEO_WIDTH * MAX_VIDEO_HEIGHT
+ && aPlane.mStride > 0;
}
static bool ValidateBufferAndPicture(const VideoData::YCbCrBuffer& aBuffer,
const IntRect& aPicture)
{
// The following situation should never happen unless there is a bug
// in the decoder
- if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
- aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
+ if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth
+ || aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
NS_ERROR("C planes with different sizes");
return false;
}
// The following situations could be triggered by invalid input
if (aPicture.width <= 0 || aPicture.height <= 0) {
// In debug mode, makes the error more noticeable
MOZ_ASSERT(false, "Empty picture rect");
return false;
}
- if (!ValidatePlane(aBuffer.mPlanes[0]) ||
- !ValidatePlane(aBuffer.mPlanes[1]) ||
- !ValidatePlane(aBuffer.mPlanes[2])) {
+ if (!ValidatePlane(aBuffer.mPlanes[0])
+ || !ValidatePlane(aBuffer.mPlanes[1])
+ || !ValidatePlane(aBuffer.mPlanes[2])) {
NS_WARNING("Invalid plane size");
return false;
}
// Ensure the picture size specified in the headers can be extracted out of
// the frame we've been supplied without indexing out of bounds.
CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
- if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
- !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight)
+ if (!xLimit.isValid()
+ || xLimit.value() > aBuffer.mPlanes[0].mStride
+ || !yLimit.isValid()
+ || yLimit.value() > aBuffer.mPlanes[0].mHeight)
{
// The specified picture dimensions can't be contained inside the video
// frame, we'll stomp memory if we try to copy it. Fail.
NS_WARNING("Overflowing picture rect");
return false;
}
return true;
}
#ifdef MOZ_WIDGET_GONK
static bool
IsYV12Format(const VideoData::YCbCrBuffer::Plane& aYPlane,
const VideoData::YCbCrBuffer::Plane& aCbPlane,
const VideoData::YCbCrBuffer::Plane& aCrPlane)
{
return
- aYPlane.mWidth % 2 == 0 &&
- aYPlane.mHeight % 2 == 0 &&
- aYPlane.mWidth / 2 == aCbPlane.mWidth &&
- aYPlane.mHeight / 2 == aCbPlane.mHeight &&
- aCbPlane.mWidth == aCrPlane.mWidth &&
- aCbPlane.mHeight == aCrPlane.mHeight;
+ aYPlane.mWidth % 2 == 0
+ && aYPlane.mHeight % 2 == 0
+ && aYPlane.mWidth / 2 == aCbPlane.mWidth
+ && aYPlane.mHeight / 2 == aCbPlane.mHeight
+ && aCbPlane.mWidth == aCrPlane.mWidth
+ && aCbPlane.mHeight == aCrPlane.mHeight;
}
static bool
IsInEmulator()
{
char propQemu[PROPERTY_VALUE_MAX];
property_get("ro.kernel.qemu", propQemu, "");
return !strncmp(propQemu, "1", 1);
@@ -178,17 +180,18 @@ VideoData::VideoData(int64_t aOffset,
VideoData::~VideoData()
{
}
void
VideoData::SetListener(UniquePtr<Listener> aListener)
{
- MOZ_ASSERT(!mSentToCompositor, "Listener should be registered before sending data");
+ MOZ_ASSERT(!mSentToCompositor,
+ "Listener should be registered before sending data");
mListener = Move(aListener);
}
void
VideoData::MarkSentToCompositor()
{
if (mSentToCompositor) {
--- a/dom/media/MediaData.h
+++ b/dom/media/MediaData.h
@@ -61,17 +61,18 @@ template <typename Type, int Alignment =
class AlignedBuffer
{
public:
AlignedBuffer()
: mData(nullptr)
, mLength(0)
, mBuffer(nullptr)
, mCapacity(0)
- {}
+ {
+ }
explicit AlignedBuffer(size_t aLength)
: mData(nullptr)
, mLength(0)
, mBuffer(nullptr)
, mCapacity(0)
{
if (EnsureCapacity(aLength)) {
@@ -85,17 +86,18 @@ public:
if (!mData) {
return;
}
PodCopy(mData, aData, aLength);
}
AlignedBuffer(const AlignedBuffer& aOther)
: AlignedBuffer(aOther.Data(), aOther.Length())
- {}
+ {
+ }
AlignedBuffer(AlignedBuffer&& aOther)
: mData(aOther.mData)
, mLength(aOther.mLength)
, mBuffer(Move(aOther.mBuffer))
, mCapacity(aOther.mCapacity)
{
aOther.mData = nullptr;
@@ -349,28 +351,29 @@ protected:
, mTime(0)
, mTimecode(0)
, mDuration(0)
, mFrames(aFrames)
, mKeyframe(false)
{
}
- virtual ~MediaData() {}
+ virtual ~MediaData() { }
};
// NullData is for decoder generating a sample which doesn't need to be
// rendered.
class NullData : public MediaData
{
public:
NullData(int64_t aOffset, int64_t aTime, int64_t aDuration)
: MediaData(NULL_DATA, aOffset, aTime, aDuration, 0)
- {}
+ {
+ }
static const Type sType = NULL_DATA;
};
// Holds chunk a decoded audio frames.
class AudioData : public MediaData
{
public:
@@ -380,17 +383,19 @@ public:
int64_t aDuration,
uint32_t aFrames,
AlignedAudioBuffer&& aData,
uint32_t aChannels,
uint32_t aRate)
: MediaData(sType, aOffset, aTime, aDuration, aFrames)
, mChannels(aChannels)
, mRate(aRate)
- , mAudioData(Move(aData)) {}
+ , mAudioData(Move(aData))
+ {
+ }
static const Type sType = AUDIO_DATA;
static const char* sTypeName;
// Creates a new AudioData identical to aOther, but with a different
// specified timestamp and duration. All data from aOther is copied
// into the new AudioData but the audio data which is transferred.
// After such call, the original aOther is unusable.
@@ -459,50 +464,50 @@ public:
Plane mPlanes[3];
YUVColorSpace mYUVColorSpace = YUVColorSpace::BT601;
};
class Listener
{
public:
virtual void OnSentToCompositor() = 0;
- virtual ~Listener() {}
+ virtual ~Listener() { }
};
// Constructs a VideoData object. If aImage is nullptr, creates a new Image
// holding a copy of the YCbCr data passed in aBuffer. If aImage is not
// nullptr, it's stored as the underlying video image and aBuffer is assumed
// to point to memory within aImage so no copy is made. aTimecode is a codec
// specific number representing the timestamp of the frame of video data.
// Returns nsnull if an error occurs. This may indicate that memory couldn't
// be allocated to create the VideoData object, or it may indicate some
// problem with the input data (e.g. negative stride).
- // Creates a new VideoData containing a deep copy of aBuffer. May use aContainer
- // to allocate an Image to hold the copied data.
+ // Creates a new VideoData containing a deep copy of aBuffer. May use
+ // aContainer to allocate an Image to hold the copied data.
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
- const YCbCrBuffer &aBuffer,
+ const YCbCrBuffer& aBuffer,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyData(
const VideoInfo& aInfo,
ImageContainer* aContainer,
int64_t aOffset,
int64_t aTime,
int64_t aDuration,
- const YCbCrBuffer &aBuffer,
- const YCbCrBuffer::Plane &aAlphaPlane,
+ const YCbCrBuffer& aBuffer,
+ const YCbCrBuffer::Plane& aAlphaPlane,
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
static already_AddRefed<VideoData> CreateAndCopyIntoTextureClient(
const VideoInfo& aInfo,
int64_t aOffset,
int64_t aTime,
@@ -521,17 +526,17 @@ public:
bool aKeyframe,
int64_t aTimecode,
const IntRect& aPicture);
// Initialize PlanarYCbCrImage. Only When aCopyData is true,
// video data is copied to PlanarYCbCrImage.
static bool SetVideoDataToImage(PlanarYCbCrImage* aVideoImage,
const VideoInfo& aInfo,
- const YCbCrBuffer &aBuffer,
+ const YCbCrBuffer& aBuffer,
const IntRect& aPicture,
bool aCopyData);
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
// Dimensions at which to display the video frame. The picture region
// will be scaled to this size. This is should be the picture region's
// dimensions scaled with respect to its aspect ratio.
@@ -683,17 +688,18 @@ private:
friend class MediaRawDataWriter;
AlignedByteBuffer mBuffer;
AlignedByteBuffer mAlphaBuffer;
CryptoSample mCryptoInternal;
MediaRawData(const MediaRawData&); // Not implemented
};
// MediaByteBuffer is a ref counted infallible TArray.
-class MediaByteBuffer : public nsTArray<uint8_t> {
+class MediaByteBuffer : public nsTArray<uint8_t>
+{
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaByteBuffer);
MediaByteBuffer() = default;
explicit MediaByteBuffer(size_t aCapacity) : nsTArray<uint8_t>(aCapacity) { }
private:
~MediaByteBuffer() { }
};
--- a/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.cpp
+++ b/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.cpp
@@ -122,28 +122,32 @@ WidevineVideoDecoder::Decode(GMPVideoEnc
if (mCodecType == kGMPVideoCodecH264) {
// Convert input from AVCC, which GMPAPI passes in, to AnnexB, which
// Chromium uses internally.
mp4_demuxer::AnnexB::ConvertSampleToAnnexB(raw);
}
const GMPEncryptedBufferMetadata* crypto = aInputFrame->GetDecryptionData();
nsTArray<SubsampleEntry> subsamples;
- InitInputBuffer(crypto, aInputFrame->TimeStamp(), raw->Data(), raw->Size(), sample, subsamples);
+ InitInputBuffer(crypto, aInputFrame->TimeStamp(), raw->Data(), raw->Size(),
+ sample, subsamples);
// For keyframes, ConvertSampleToAnnexB will stick the AnnexB extra data
// at the start of the input. So we need to account for that as clear data
// in the subsamples.
- if (raw->mKeyframe && !subsamples.IsEmpty() && mCodecType == kGMPVideoCodecH264) {
+ if (raw->mKeyframe
+ && !subsamples.IsEmpty()
+ && mCodecType == kGMPVideoCodecH264) {
subsamples[0].clear_bytes += mAnnexB->Length();
}
WidevineVideoFrame frame;
Status rv = CDM()->DecryptAndDecodeFrame(sample, &frame);
- Log("WidevineVideoDecoder::Decode(timestamp=%lld) rv=%d", sample.timestamp, rv);
+ Log("WidevineVideoDecoder::Decode(timestamp=%lld) rv=%d", sample.timestamp,
+ rv);
// Destroy frame, so that the shmem is now free to be used to return
// output to the Gecko process.
aInputFrame->Destroy();
aInputFrame = nullptr;
if (rv == kSuccess) {
if (!ReturnOutput(frame)) {
@@ -161,17 +165,18 @@ WidevineVideoDecoder::Decode(GMPVideoEnc
mCallback->InputDataExhausted();
}
} else if (rv == kNeedMoreData) {
MOZ_ASSERT(mCDMWrapper);
mCallback->InputDataExhausted();
} else {
mCallback->Error(ToGMPErr(rv));
}
- // Finish a drain if pending and we have no pending ReturnOutput calls on the stack.
+ // Finish a drain if pending and we have no pending ReturnOutput calls on the
+ // stack.
if (mDrainPending && mReturnOutputCallDepth == 0) {
Drain();
}
}
// Util class to assist with counting mReturnOutputCallDepth.
class CounterHelper {
public:
@@ -190,22 +195,20 @@ public:
private:
int32_t& mCounter;
};
// Util class to make sure GMP frames are freed. Holds a GMPVideoi420Frame*
// and will destroy it when the helper is destroyed unless the held frame
// if forgotten with ForgetFrame.
-class FrameDestroyerHelper {
+class FrameDestroyerHelper
+{
public:
- explicit FrameDestroyerHelper(GMPVideoi420Frame*& frame)
- : frame(frame)
- {
- }
+ explicit FrameDestroyerHelper(GMPVideoi420Frame*& frame) : frame(frame) { }
// RAII, destroy frame if held.
~FrameDestroyerHelper()
{
if (frame) {
frame->Destroy();
}
frame = nullptr;
@@ -262,21 +265,23 @@ WidevineVideoDecoder::ReturnOutput(Widev
// other IPC calls can happen during this call, resulting in calls
// being made to the CDM. After this call state can have changed,
// and should be reevaluated.
err = gmpFrame->CreateEmptyFrame(size.width,
size.height,
yStride,
uStride,
vStride);
- // Assert possible reentrant calls or resets haven't altered level unexpectedly.
+ // Assert possible reentrant calls or resets haven't altered level
+ // unexpectedly.
MOZ_ASSERT(mReturnOutputCallDepth == 1);
ENSURE_GMP_SUCCESS(err, false);
- // If a reset started we need to dump the current frame and complete the reset.
+ // If a reset started we need to dump the current frame and complete the
+ // reset.
if (mResetInProgress) {
MOZ_ASSERT(mCDMWrapper);
MOZ_ASSERT(mFrameAllocationQueue.empty());
CompleteReset();
return true;
}
err = gmpFrame->SetWidth(size.width);
--- a/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.h
+++ b/dom/media/gmp/widevine-adapter/WidevineVideoDecoder.h
@@ -15,17 +15,18 @@
#include "nsTArray.h"
#include "WidevineDecryptor.h"
#include "WidevineVideoFrame.h"
#include <map>
#include <deque>
namespace mozilla {
-class WidevineVideoDecoder : public GMPVideoDecoder {
+class WidevineVideoDecoder : public GMPVideoDecoder
+{
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WidevineVideoDecoder)
WidevineVideoDecoder(GMPVideoHost* aVideoHost,
RefPtr<CDMWrapper> aCDMWrapper);
void InitDecode(const GMPVideoCodec& aCodecSettings,
const uint8_t* aCodecSpecific,
@@ -40,17 +41,18 @@ public:
void Reset() override;
void Drain() override;
void DecodingComplete() override;
private:
~WidevineVideoDecoder();
- cdm::ContentDecryptionModule_8* CDM() const {
+ cdm::ContentDecryptionModule_8* CDM() const
+ {
// CDM should only be accessed before 'DecodingComplete'.
MOZ_ASSERT(mCDMWrapper);
// CDMWrapper ensure the CDM is non-null, no need to check again.
return mCDMWrapper->GetCDM();
}
bool ReturnOutput(WidevineVideoFrame& aFrame);
void CompleteReset();
--- a/dom/media/ipc/VideoDecoderParent.cpp
+++ b/dom/media/ipc/VideoDecoderParent.cpp
@@ -72,17 +72,18 @@ VideoDecoderParent::VideoDecoderParent(V
CreateDecoderParams params(aVideoInfo);
params.mTaskQueue = mDecodeTaskQueue;
params.mKnowsCompositor = mKnowsCompositor;
params.mImageContainer = new layers::ImageContainer();
mDecoder = pdm->CreateVideoDecoder(params);
#else
- MOZ_ASSERT(false, "Can't use RemoteVideoDecoder on non-Windows platforms yet");
+ MOZ_ASSERT(false,
+ "Can't use RemoteVideoDecoder on non-Windows platforms yet");
#endif
*aSuccess = !!mDecoder;
}
VideoDecoderParent::~VideoDecoderParent()
{
MOZ_COUNT_DTOR(VideoDecoderParent);
@@ -101,34 +102,35 @@ mozilla::ipc::IPCResult
VideoDecoderParent::RecvInit()
{
MOZ_ASSERT(OnManagerThread());
RefPtr<VideoDecoderParent> self = this;
mDecoder->Init()->Then(mManagerTaskQueue, __func__,
[self] (TrackInfo::TrackType aTrack) {
if (self->mDecoder) {
nsCString hardwareReason;
- bool hardwareAccelerated = self->mDecoder->IsHardwareAccelerated(hardwareReason);
+ bool hardwareAccelerated =
+ self->mDecoder->IsHardwareAccelerated(hardwareReason);
Unused << self->SendInitComplete(hardwareAccelerated, hardwareReason);
}
},
[self] (MediaResult aReason) {
if (!self->mDestroyed) {
Unused << self->SendInitFailed(aReason);
}
});
return IPC_OK();
}
mozilla::ipc::IPCResult
VideoDecoderParent::RecvInput(const MediaRawDataIPDL& aData)
{
MOZ_ASSERT(OnManagerThread());
- // XXX: This copies the data into a buffer owned by the MediaRawData. Ideally we'd just take ownership
- // of the shmem.
+ // XXX: This copies the data into a buffer owned by the MediaRawData. Ideally
+ // we'd just take ownership of the shmem.
RefPtr<MediaRawData> data = new MediaRawData(aData.buffer().get<uint8_t>(),
aData.buffer().Size<uint8_t>());
if (!data->Data()) {
// OOM
Error(NS_ERROR_OUT_OF_MEMORY);
return IPC_OK();
}
data->mOffset = aData.base().offset();
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -3,27 +3,26 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include <string.h>
#include "mozilla/EndianUtils.h"
#include <stdint.h>
+#include <algorithm>
+#include <opus/opus.h>
-#include "nsDebug.h"
#include "OggCodecState.h"
#include "OpusDecoder.h"
#include "OpusParser.h"
#include "VideoUtils.h"
-#include <algorithm>
-
-#include <opus/opus.h>
+#include "XiphExtradata.h"
+#include "nsDebug.h"
#include "opus/opus_multistream.h"
-#include "XiphExtradata.h"
// On Android JellyBean, the hardware.h header redefines version_major and
// version_minor, which breaks our build. See:
// https://bugzilla.mozilla.org/show_bug.cgi?id=912702#c6
#ifdef MOZ_WIDGET_GONK
#ifdef version_major
#undef version_major
#endif
@@ -200,18 +199,19 @@ Clone(ogg_packet* aPacket)
p->packet = new unsigned char[p->bytes];
memcpy(p->packet, aPacket->packet, p->bytes);
return p;
}
void
OggCodecState::ReleasePacket(ogg_packet* aPacket)
{
- if (aPacket)
+ if (aPacket) {
delete [] aPacket->packet;
+ }
delete aPacket;
}
void
OggPacketQueue::Append(ogg_packet* aPacket)
{
nsDeque::Push(aPacket);
}
@@ -236,32 +236,34 @@ OggCodecState::PacketPeek()
{
if (mPackets.IsEmpty()) {
return nullptr;
}
return mPackets.PeekFront();
}
void
-OggCodecState::PushFront(OggPacketQueue &&aOther)
+OggCodecState::PushFront(OggPacketQueue&& aOther)
{
while (!aOther.IsEmpty()) {
mPackets.PushFront(aOther.Pop());
}
}
already_AddRefed<MediaRawData>
OggCodecState::PacketOutAsMediaRawData()
{
ogg_packet* packet = PacketOut();
if (!packet) {
return nullptr;
}
- NS_ASSERTION(!IsHeader(packet), "PacketOutAsMediaRawData can only be called on non-header packets");
+ NS_ASSERTION(
+ !IsHeader(packet),
+ "PacketOutAsMediaRawData can only be called on non-header packets");
RefPtr<MediaRawData> sample = new MediaRawData(packet->packet, packet->bytes);
if (!sample->Data()) {
// OOM.
ReleasePacket(packet);
return nullptr;
}
int64_t end_tstamp = Time(packet->granulepos);
@@ -369,17 +371,18 @@ TheoraState::Init()
int64_t d = mTheoraInfo.aspect_denominator;
float aspectRatio =
(n == 0 || d == 0) ? 1.0f : static_cast<float>(n) / static_cast<float>(d);
// Ensure the frame and picture regions aren't larger than our prescribed
// maximum, or zero sized.
nsIntSize frame(mTheoraInfo.frame_width, mTheoraInfo.frame_height);
- nsIntRect picture(mTheoraInfo.pic_x, mTheoraInfo.pic_y, mTheoraInfo.pic_width, mTheoraInfo.pic_height);
+ nsIntRect picture(mTheoraInfo.pic_x, mTheoraInfo.pic_y,
+ mTheoraInfo.pic_width, mTheoraInfo.pic_height);
nsIntSize display(mTheoraInfo.pic_width, mTheoraInfo.pic_height);
ScaleDisplayByAspectRatio(display, aspectRatio);
if (!IsValidVideoRegion(frame, picture, display)) {
return mActive = false;
}
mCtx = th_decode_alloc(&mTheoraInfo, mSetup);
if (!mCtx) {
@@ -450,19 +453,19 @@ TheoraState::Time(int64_t granulepos)
bool
TheoraState::IsHeader(ogg_packet* aPacket)
{
return th_packet_isheader(aPacket);
}
# define TH_VERSION_CHECK(_info,_maj,_min,_sub) \
- (((_info)->version_major>(_maj)||(_info)->version_major==(_maj))&& \
- (((_info)->version_minor>(_min)||(_info)->version_minor==(_min))&& \
- (_info)->version_subminor>=(_sub)))
+ (((_info)->version_major>(_maj)||(_info)->version_major==(_maj)) \
+ && (((_info)->version_minor>(_min)||(_info)->version_minor==(_min)) \
+ && (_info)->version_subminor>=(_sub)))
int64_t
TheoraState::Time(th_info* aInfo, int64_t aGranulepos)
{
if (aGranulepos < 0 || aInfo->fps_numerator == 0) {
return -1;
}
// Implementation of th_granule_frame inlined here to operate
@@ -568,19 +571,19 @@ TheoraState::PageIn(ogg_page* aPage)
// version (maj,min,sub) or later, otherwise returns 0.
int
TheoraVersion(th_info* info,
unsigned char maj,
unsigned char min,
unsigned char sub)
{
ogg_uint32_t ver = (maj << 16) + (min << 8) + sub;
- ogg_uint32_t th_ver = (info->version_major << 16) +
- (info->version_minor << 8) +
- info->version_subminor;
+ ogg_uint32_t th_ver = (info->version_major << 16)
+ + (info->version_minor << 8)
+ + info->version_subminor;
return (th_ver >= ver) ? 1 : 0;
}
void
TheoraState::ReconstructTheoraGranulepos()
{
if (mUnstamped.Length() == 0) {
return;
@@ -589,18 +592,18 @@ TheoraState::ReconstructTheoraGranulepos
NS_ASSERTION(lastGranulepos != -1, "Must know last granulepos");
// Reconstruct the granulepos (and thus timestamps) of the decoded
// frames. Granulepos are stored as ((keyframe<<shift)+offset). We
// know the granulepos of the last frame in the list, so we can infer
// the granulepos of the intermediate frames using their frame numbers.
ogg_int64_t shift = mTheoraInfo.keyframe_granule_shift;
ogg_int64_t version_3_2_1 = TheoraVersion(&mTheoraInfo,3,2,1);
- ogg_int64_t lastFrame = th_granule_frame(mCtx,
- lastGranulepos) + version_3_2_1;
+ ogg_int64_t lastFrame =
+ th_granule_frame(mCtx, lastGranulepos) + version_3_2_1;
ogg_int64_t firstFrame = lastFrame - mUnstamped.Length() + 1;
// Until we encounter a keyframe, we'll assume that the "keyframe"
// segment of the granulepos is the first frame, or if that causes
// the "offset" segment to overflow, we assume the required
// keyframe is maximumally offset. Until we encounter a keyframe
// the granulepos will probably be wrong, but we can't decode the
// frame anyway (since we don't have its keyframe) so it doesn't really
@@ -615,52 +618,56 @@ TheoraState::ReconstructTheoraGranulepos
ogg_int64_t frame = firstFrame + i;
ogg_int64_t granulepos;
ogg_packet* packet = mUnstamped[i];
bool isKeyframe = th_packet_iskeyframe(packet) == 1;
if (isKeyframe) {
granulepos = frame << shift;
keyframe = frame;
- } else if (frame >= keyframe &&
- frame - keyframe < ((ogg_int64_t)1 << shift))
+ } else if (frame >= keyframe
+ && frame - keyframe < ((ogg_int64_t)1 << shift))
{
// (frame - keyframe) won't overflow the "offset" segment of the
// granulepos, so it's safe to calculate the granulepos.
granulepos = (keyframe << shift) + (frame - keyframe);
} else {
// (frame - keyframeno) will overflow the "offset" segment of the
// granulepos, so we take "keyframe" to be the max possible offset
// frame instead.
- ogg_int64_t k = std::max(frame - (((ogg_int64_t)1 << shift) - 1), version_3_2_1);
+ ogg_int64_t k =
+ std::max(frame - (((ogg_int64_t)1 << shift) - 1), version_3_2_1);
granulepos = (k << shift) + (frame - k);
}
// Theora 3.2.1+ granulepos store frame number [1..N], so granulepos
// should be > 0.
// Theora 3.2.0 granulepos store the frame index [0..(N-1)], so
// granulepos should be >= 0.
NS_ASSERTION(granulepos >= version_3_2_1,
"Invalid granulepos for Theora version");
// Check that the frame's granule number is one more than the
// previous frame's.
- NS_ASSERTION(i == 0 ||
- th_granule_frame(mCtx, granulepos) ==
- th_granule_frame(mCtx, mUnstamped[i-1]->granulepos) + 1,
+ NS_ASSERTION(i == 0
+ || th_granule_frame(mCtx, granulepos)
+ == th_granule_frame(mCtx, mUnstamped[i-1]->granulepos)
+ + 1,
"Granulepos calculation is incorrect!");
packet->granulepos = granulepos;
}
// Check that the second to last frame's granule number is one less than
// the last frame's (the known granule number). If not our granulepos
// recovery missed a beat.
- NS_ASSERTION(mUnstamped.Length() < 2 ||
- th_granule_frame(mCtx, mUnstamped[mUnstamped.Length()-2]->granulepos) + 1 ==
- th_granule_frame(mCtx, lastGranulepos),
+ NS_ASSERTION(
+ mUnstamped.Length() < 2
+ || th_granule_frame(mCtx, mUnstamped[mUnstamped.Length() - 2]->granulepos)
+ + 1
+ == th_granule_frame(mCtx, lastGranulepos),
"Granulepos recovery should catch up with packet->granulepos!");
}
nsresult
VorbisState::Reset()
{
nsresult res = NS_OK;
if (mActive && vorbis_synthesis_restart(&mDsp) != 0) {
@@ -955,18 +962,19 @@ VorbisState::ReconstructVorbisGranulepos
ogg_packet* first = mUnstamped[0];
long blockSize = vorbis_packet_blocksize(&mVorbisInfo, first);
if (blockSize < 0) {
mPrevVorbisBlockSize = 0;
blockSize = 0;
}
- long samples = (mPrevVorbisBlockSize == 0) ? 0 :
- mPrevVorbisBlockSize / 4 + blockSize / 4;
+ long samples = (mPrevVorbisBlockSize == 0)
+ ? 0
+ : mPrevVorbisBlockSize / 4 + blockSize / 4;
int64_t start = first->granulepos - samples;
RecordVorbisPacketSamples(first, samples);
if (last->e_o_s && start < mGranulepos) {
// We've calculated that there are more samples in this page than its
// granulepos claims, and it's the last page in the stream. This is legal,
// and we will need to prune the trailing samples when we come to decode it.
// We must correct the timestamps so that they follow the last Vorbis page's
@@ -1109,17 +1117,18 @@ OpusState::DecodeHeader(ogg_packet* aPac
/* Construct and return a tags hashmap from our internal array */
MetadataTags*
OpusState::GetTags()
{
MetadataTags* tags;
tags = new MetadataTags;
for (uint32_t i = 0; i < mParser->mTags.Length(); i++) {
- AddVorbisComment(tags, mParser->mTags[i].Data(), mParser->mTags[i].Length());
+ AddVorbisComment(tags, mParser->mTags[i].Data(),
+ mParser->mTags[i].Length());
}
return tags;
}
/* Return the timestamp (in microseconds) equivalent to a granulepos. */
int64_t
OpusState::Time(int64_t aGranulepos)
@@ -1141,19 +1150,19 @@ OpusState::Time(int aPreSkip, int64_t aG
// Ogg Opus always runs at a granule rate of 48 kHz.
CheckedInt64 t = SaferMultDiv(aGranulepos - aPreSkip, USECS_PER_S, 48000);
return t.isValid() ? t.value() : -1;
}
bool
OpusState::IsHeader(ogg_packet* aPacket)
{
- return aPacket->bytes >= 16 &&
- (!memcmp(aPacket->packet, "OpusHead", 8) ||
- !memcmp(aPacket->packet, "OpusTags", 8));
+ return aPacket->bytes >= 16
+ && (!memcmp(aPacket->packet, "OpusHead", 8)
+ || !memcmp(aPacket->packet, "OpusTags", 8));
}
nsresult
OpusState::PageIn(ogg_page* aPage)
{
if (!mActive) {
return NS_OK;
}
@@ -1427,17 +1436,17 @@ FlacState::ReconstructFlacGranulepos(voi
int64_t gp;
gp = last->granulepos;
// Loop through the packets backwards, subtracting the next
// packet's duration from its granulepos to get the value
// for the current packet.
for (uint32_t i = mUnstamped.Length() - 1; i > 0; i--) {
int offset =
- mParser.BlockDuration(mUnstamped[i]->packet, mUnstamped[i]->bytes);
+ mParser.BlockDuration(mUnstamped[i]->packet, mUnstamped[i]->bytes);
// Check for error (negative offset) and overflow.
if (offset >= 0) {
if (offset <= gp) {
gp -= offset;
} else {
// If the granule position of the first data page is smaller than the
// number of decodable audio samples on that page, then we MUST reject
// the stream.
@@ -1511,51 +1520,51 @@ static const size_t INDEX_KEYPOINT_OFFSE
static const size_t FISBONE_MSG_FIELDS_OFFSET = 8;
static const size_t FISBONE_SERIALNO_OFFSET = 12;
static bool
IsSkeletonBOS(ogg_packet* aPacket)
{
static_assert(SKELETON_MIN_HEADER_LEN >= 8,
"Minimum length of skeleton BOS header incorrect");
- return aPacket->bytes >= SKELETON_MIN_HEADER_LEN &&
- memcmp(reinterpret_cast<char*>(aPacket->packet), "fishead", 8) == 0;
+ return aPacket->bytes >= SKELETON_MIN_HEADER_LEN
+ && memcmp(reinterpret_cast<char*>(aPacket->packet), "fishead", 8) == 0;
}
static bool
IsSkeletonIndex(ogg_packet* aPacket)
{
static_assert(SKELETON_4_0_MIN_INDEX_LEN >= 5,
"Minimum length of skeleton index header incorrect");
- return aPacket->bytes >= SKELETON_4_0_MIN_INDEX_LEN &&
- memcmp(reinterpret_cast<char*>(aPacket->packet), "index", 5) == 0;
+ return aPacket->bytes >= SKELETON_4_0_MIN_INDEX_LEN
+ && memcmp(reinterpret_cast<char*>(aPacket->packet), "index", 5) == 0;
}
static bool
IsSkeletonFisbone(ogg_packet* aPacket)
{
static_assert(SKELETON_MIN_FISBONE_LEN >= 8,
"Minimum length of skeleton fisbone header incorrect");
- return aPacket->bytes >= SKELETON_MIN_FISBONE_LEN &&
- memcmp(reinterpret_cast<char*>(aPacket->packet), "fisbone", 8) == 0;
+ return aPacket->bytes >= SKELETON_MIN_FISBONE_LEN
+ && memcmp(reinterpret_cast<char*>(aPacket->packet), "fisbone", 8) == 0;
}
// Reads a variable length encoded integer at p. Will not read
// past aLimit. Returns pointer to character after end of integer.
static const unsigned char*
ReadVariableLengthInt(const unsigned char* p,
const unsigned char* aLimit,
int64_t& n)
{
int shift = 0;
int64_t byte = 0;
n = 0;
- while (p < aLimit &&
- (byte & 0x80) != 0x80 &&
- shift < 57)
+ while (p < aLimit
+ && (byte & 0x80) != 0x80
+ && shift < 57)
{
byte = static_cast<int64_t>(*p);
n |= ((byte & 0x7f) << shift);
shift += 7;
p++;
}
return p;
}
@@ -1564,23 +1573,26 @@ bool
SkeletonState::DecodeIndex(ogg_packet* aPacket)
{
NS_ASSERTION(aPacket->bytes >= SKELETON_4_0_MIN_INDEX_LEN,
"Index must be at least minimum size");
if (!mActive) {
return false;
}
- uint32_t serialno = LittleEndian::readUint32(aPacket->packet + INDEX_SERIALNO_OFFSET);
- int64_t numKeyPoints = LittleEndian::readInt64(aPacket->packet + INDEX_NUM_KEYPOINTS_OFFSET);
+ uint32_t serialno =
+ LittleEndian::readUint32(aPacket->packet + INDEX_SERIALNO_OFFSET);
+ int64_t numKeyPoints =
+ LittleEndian::readInt64(aPacket->packet + INDEX_NUM_KEYPOINTS_OFFSET);
int64_t endTime = 0, startTime = 0;
const unsigned char* p = aPacket->packet;
- int64_t timeDenom = LittleEndian::readInt64(aPacket->packet + INDEX_TIME_DENOM_OFFSET);
+ int64_t timeDenom =
+ LittleEndian::readInt64(aPacket->packet + INDEX_TIME_DENOM_OFFSET);
if (timeDenom == 0) {
LOG(LogLevel::Debug, ("Ogg Skeleton Index packet for stream %u has 0 "
"timestamp denominator.", serialno));
return (mActive = false);
}
// Extract the start time.
int64_t timeRawInt = LittleEndian::readInt64(p + INDEX_FIRST_NUMER_OFFSET);
@@ -1606,19 +1618,19 @@ SkeletonState::DecodeIndex(ogg_packet* a
(CheckedInt64(numKeyPoints) * MIN_KEY_POINT_SIZE) + INDEX_KEYPOINT_OFFSET;
if (!minPacketSize.isValid())
{
return (mActive = false);
}
int64_t sizeofIndex = aPacket->bytes - INDEX_KEYPOINT_OFFSET;
int64_t maxNumKeyPoints = sizeofIndex / MIN_KEY_POINT_SIZE;
- if (aPacket->bytes < minPacketSize.value() ||
- numKeyPoints > maxNumKeyPoints ||
- numKeyPoints < 0) {
+ if (aPacket->bytes < minPacketSize.value()
+ || numKeyPoints > maxNumKeyPoints
+ || numKeyPoints < 0) {
// Packet size is less than the theoretical minimum size, or the packet is
// claiming to store more keypoints than it's capable of storing. This means
// that the numKeyPoints field is too large or small for the packet to
// possibly contain as many packets as it claims to, so the numKeyPoints
// field is possibly malicious. Don't try decoding this index, we may run
// out of memory.
LOG(LogLevel::Debug, ("Possibly malicious number of key points reported "
"(%lld) in index packet for stream %u.",
@@ -1633,27 +1645,27 @@ SkeletonState::DecodeIndex(ogg_packet* a
const unsigned char* limit = aPacket->packet + aPacket->bytes;
int64_t numKeyPointsRead = 0;
CheckedInt64 offset = 0;
CheckedInt64 time = 0;
while (p < limit && numKeyPointsRead < numKeyPoints) {
int64_t delta = 0;
p = ReadVariableLengthInt(p, limit, delta);
offset += delta;
- if (p == limit ||
- !offset.isValid() ||
- offset.value() > mLength ||
- offset.value() < 0) {
+ if (p == limit
+ || !offset.isValid()
+ || offset.value() > mLength
+ || offset.value() < 0) {
return (mActive = false);
}
p = ReadVariableLengthInt(p, limit, delta);
time += delta;
- if (!time.isValid() ||
- time.value() > endTime ||
- time.value() < startTime) {
+ if (!time.isValid()
+ || time.value() > endTime
+ || time.value() < startTime) {
return (mActive = false);
}
CheckedInt64 timeUsecs = SaferMultDiv(time.value(), USECS_PER_S, timeDenom);
if (!timeUsecs.isValid()) {
return (mActive = false);
}
keyPoints->Add(offset.value(), timeUsecs.value());
numKeyPointsRead++;
@@ -1672,18 +1684,20 @@ SkeletonState::DecodeIndex(ogg_packet* a
nsresult
SkeletonState::IndexedSeekTargetForTrack(uint32_t aSerialno,
int64_t aTarget,
nsKeyPoint& aResult)
{
nsKeyFrameIndex* index = nullptr;
mIndex.Get(aSerialno, &index);
- if (!index || index->Length() == 0 ||
- aTarget < index->mStartTime || aTarget > index->mEndTime) {
+ if (!index
+ || index->Length() == 0
+ || aTarget < index->mStartTime
+ || aTarget > index->mEndTime) {
return NS_ERROR_FAILURE;
}
// Binary search to find the last key point with time less than target.
int start = 0;
int end = index->Length() - 1;
while (end > start) {
int mid = start + ((end - start + 1) >> 1);
@@ -1712,18 +1726,18 @@ SkeletonState::IndexedSeekTarget(int64_t
}
// Loop over all requested tracks' indexes, and get the keypoint for that
// seek target. Record the keypoint with the lowest offset, this will be
// our seek result. User must seek to the one with lowest offset to ensure we
// pass "keyframes" on all tracks when we decode forwards to the seek target.
nsSeekTarget r;
for (uint32_t i=0; i<aTracks.Length(); i++) {
nsKeyPoint k;
- if (NS_SUCCEEDED(IndexedSeekTargetForTrack(aTracks[i], aTarget, k)) &&
- k.mOffset < r.mKeyPoint.mOffset) {
+ if (NS_SUCCEEDED(IndexedSeekTargetForTrack(aTracks[i], aTarget, k))
+ && k.mOffset < r.mKeyPoint.mOffset) {
r.mKeyPoint = k;
r.mSerial = aTracks[i];
}
}
if (r.IsNull()) {
return NS_ERROR_FAILURE;
}
LOG(LogLevel::Debug, ("Indexed seek target for time %lld is offset %lld",
@@ -1731,20 +1745,20 @@ SkeletonState::IndexedSeekTarget(int64_t
aResult = r;
return NS_OK;
}
nsresult
SkeletonState::GetDuration(const nsTArray<uint32_t>& aTracks,
int64_t& aDuration)
{
- if (!mActive ||
- mVersion < SKELETON_VERSION(4,0) ||
- !HasIndex() ||
- aTracks.Length() == 0) {
+ if (!mActive
+ || mVersion < SKELETON_VERSION(4,0)
+ || !HasIndex()
+ || aTracks.Length() == 0) {
return NS_ERROR_FAILURE;
}
int64_t endTime = INT64_MIN;
int64_t startTime = INT64_MAX;
for (uint32_t i=0; i<aTracks.Length(); i++) {
nsKeyFrameIndex* index = nullptr;
mIndex.Get(aTracks[i], &index);
if (!index) {
@@ -1816,20 +1830,21 @@ SkeletonState::DecodeFisbone(ogg_packet*
if (i != 0 && !isContentTypeParsed) {
return false;
}
if ((i == 0 && IsASCII(strMsg)) || (i != 0 && IsUTF8(strMsg))) {
EMsgHeaderType eHeaderType = kFieldTypeMaps[i].mMsgHeaderType;
if (!field->mValuesStore.Contains(eHeaderType)) {
uint32_t nameLen = strlen(kFieldTypeMaps[i].mPatternToRecognize);
- field->mValuesStore.Put(eHeaderType, new nsCString(msgHead+nameLen,
- msgProbe-msgHead-nameLen));
+ field->mValuesStore.Put(
+ eHeaderType,
+ new nsCString(msgHead + nameLen, msgProbe - msgHead - nameLen));
}
- isContentTypeParsed = i==0 ? true : isContentTypeParsed;
+ isContentTypeParsed = i == 0 ? true : isContentTypeParsed;
}
break;
}
}
msgProbe += 2;
msgLength -= 2;
msgHead = msgProbe;
continue;
@@ -1854,28 +1869,29 @@ SkeletonState::DecodeHeader(ogg_packet*
if (IsSkeletonBOS(aPacket)) {
uint16_t verMajor =
LittleEndian::readUint16(aPacket->packet + SKELETON_VERSION_MAJOR_OFFSET);
uint16_t verMinor =
LittleEndian::readUint16(aPacket->packet + SKELETON_VERSION_MINOR_OFFSET);
// Read the presentation time. We read this before the version check as the
// presentation time exists in all versions.
- int64_t n =
- LittleEndian::readInt64(aPacket->packet + SKELETON_PRESENTATION_TIME_NUMERATOR_OFFSET);
- int64_t d =
- LittleEndian::readInt64(aPacket->packet + SKELETON_PRESENTATION_TIME_DENOMINATOR_OFFSET);
+ int64_t n = LittleEndian::readInt64(
+ aPacket->packet + SKELETON_PRESENTATION_TIME_NUMERATOR_OFFSET);
+ int64_t d = LittleEndian::readInt64(
+ aPacket->packet + SKELETON_PRESENTATION_TIME_DENOMINATOR_OFFSET);
mPresentationTime =
- d == 0 ? 0 : (static_cast<float>(n) / static_cast<float>(d)) * USECS_PER_S;
+ d == 0 ? 0
+ : (static_cast<float>(n) / static_cast<float>(d)) * USECS_PER_S;
mVersion = SKELETON_VERSION(verMajor, verMinor);
// We can only care to parse Skeleton version 4.0+.
- if (mVersion < SKELETON_VERSION(4,0) ||
- mVersion >= SKELETON_VERSION(5,0) ||
- aPacket->bytes < SKELETON_4_0_MIN_HEADER_LEN) {
+ if (mVersion < SKELETON_VERSION(4,0)
+ || mVersion >= SKELETON_VERSION(5,0)
+ || aPacket->bytes < SKELETON_4_0_MIN_HEADER_LEN) {
return false;
}
// Extract the segment length.
mLength =
LittleEndian::readInt64(aPacket->packet + SKELETON_FILE_LENGTH_OFFSET);
LOG(LogLevel::Debug, ("Skeleton segment length: %lld", mLength));
--- a/dom/media/ogg/OggCodecState.h
+++ b/dom/media/ogg/OggCodecState.h
@@ -3,23 +3,23 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#if !defined(OggCodecState_h_)
#define OggCodecState_h_
#include <ogg/ogg.h>
// For MOZ_SAMPLE_TYPE_*
+#include "FlacFrameParser.h"
+#include "VideoUtils.h"
#include <nsAutoPtr.h>
#include <nsAutoRef.h>
#include <nsDeque.h>
#include <nsTArray.h>
#include <nsClassHashtable.h>
-#include "VideoUtils.h"
-#include "FlacFrameParser.h"
#include <theora/theoradec.h>
#ifdef MOZ_TREMOR
#include <tremor/ivorbiscodec.h>
#else
#include <vorbis/codec.h>
#endif
@@ -56,22 +56,28 @@ class OggPacketDeallocator : public nsDe
// new 4KB page to the bitstream, which kills performance on Windows. This
// also gives us the option to timestamp packets rather than decoded
// frames/samples, reducing the amount of frames/samples we must decode to
// determine start-time at a particular offset, and gives us finer control
// over memory usage.
class OggPacketQueue : private nsDeque
{
public:
- OggPacketQueue() : nsDeque(new OggPacketDeallocator()) {}
+ OggPacketQueue() : nsDeque(new OggPacketDeallocator()) { }
~OggPacketQueue() { Erase(); }
bool IsEmpty() { return nsDeque::GetSize() == 0; }
void Append(ogg_packet* aPacket);
- ogg_packet* PopFront() { return static_cast<ogg_packet*>(nsDeque::PopFront()); }
- ogg_packet* PeekFront() { return static_cast<ogg_packet*>(nsDeque::PeekFront()); }
+ ogg_packet* PopFront()
+ {
+ return static_cast<ogg_packet*>(nsDeque::PopFront());
+ }
+ ogg_packet* PeekFront()
+ {
+ return static_cast<ogg_packet*>(nsDeque::PeekFront());
+ }
ogg_packet* Pop() { return static_cast<ogg_packet*>(nsDeque::Pop()); }
ogg_packet* operator[](size_t aIndex) const
{
return static_cast<ogg_packet*>(nsDeque::ObjectAt(aIndex));
}
size_t Length() const { return nsDeque::GetSize(); }
void PushFront(ogg_packet* aPacket) { nsDeque::PushFront(aPacket); }
void Erase() { nsDeque::Erase(); }
@@ -268,17 +274,18 @@ protected:
// can be pushed over to mPackets. Used by PageIn() implementations in
// subclasses.
nsresult PacketOutUntilGranulepos(bool& aFoundGranulepos);
// Temporary buffer in which to store packets while we're reading packets
// in order to capture granulepos.
nsTArray<ogg_packet*> mUnstamped;
- bool SetCodecSpecificConfig(MediaByteBuffer* aBuffer, OggPacketQueue& aHeaders);
+ bool SetCodecSpecificConfig(MediaByteBuffer* aBuffer,
+ OggPacketQueue& aHeaders);
private:
bool InternalInit();
};
class VorbisState : public OggCodecState
{
public:
@@ -392,17 +399,16 @@ private:
OggPacketQueue mHeaders;
// Reconstructs the granulepos of Theora packets stored in the
// mUnstamped array. mUnstamped must be filled with consecutive packets from
// the stream, with the last packet having a known granulepos. Using this
// known granulepos, and the known frame numbers, we recover the granulepos
// of all frames in the array. This enables us to determine their timestamps.
void ReconstructTheoraGranulepos();
-
};
class OpusState : public OggCodecState
{
public:
explicit OpusState(ogg_page* aBosPage);
virtual ~OpusState();
@@ -446,17 +452,18 @@ private:
AudioInfo mInfo;
OggPacketQueue mHeaders;
};
// Constructs a 32bit version number out of two 16 bit major,minor
// version numbers.
#define SKELETON_VERSION(major, minor) (((major)<<16)|(minor))
-enum EMsgHeaderType {
+enum EMsgHeaderType
+{
eContentType,
eRole,
eName,
eLanguage,
eTitle,
eDisplayHint,
eAltitude,
eTrackOrder,
@@ -517,17 +524,17 @@ public:
}
};
// Stores a keyframe's byte-offset, presentation time and the serialno
// of the stream it belongs to.
class nsSeekTarget
{
public:
- nsSeekTarget() : mSerial(0) {}
+ nsSeekTarget() : mSerial(0) { }
nsKeyPoint mKeyPoint;
uint32_t mSerial;
bool IsNull()
{
return mKeyPoint.IsNull() && mSerial == 0;
}
};
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -71,25 +71,27 @@ static int webmdemux_read(void* aBuffer,
context->GetResource()->Read(static_cast<char*>(aBuffer), count, &bytes);
bool eof = bytes < aLength;
return NS_FAILED(rv) ? -1 : eof ? 0 : 1;
}
static int webmdemux_seek(int64_t aOffset, int aWhence, void* aUserData)
{
MOZ_ASSERT(aUserData);
- WebMDemuxer::NestEggContext* context = reinterpret_cast<WebMDemuxer::NestEggContext*>(aUserData);
+ WebMDemuxer::NestEggContext* context =
+ reinterpret_cast<WebMDemuxer::NestEggContext*>(aUserData);
nsresult rv = context->GetResource()->Seek(aWhence, aOffset);
return NS_SUCCEEDED(rv) ? 0 : -1;
}
static int64_t webmdemux_tell(void* aUserData)
{
MOZ_ASSERT(aUserData);
- WebMDemuxer::NestEggContext* context = reinterpret_cast<WebMDemuxer::NestEggContext*>(aUserData);
+ WebMDemuxer::NestEggContext* context =
+ reinterpret_cast<WebMDemuxer::NestEggContext*>(aUserData);
return context->GetResource()->Tell();
}
static void webmdemux_log(nestegg* aContext,
unsigned int aSeverity,
char const* aFormat, ...)
{
if (!MOZ_LOG_TEST(gNesteggLog, LogLevel::Debug)) {
@@ -183,22 +185,24 @@ WebMDemuxer::~WebMDemuxer()
}
RefPtr<WebMDemuxer::InitPromise>
WebMDemuxer::Init()
{
InitBufferedState();
if (NS_FAILED(ReadMetadata())) {
- return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR,
+ __func__);
}
- if (!GetNumberTracks(TrackInfo::kAudioTrack) &&
- !GetNumberTracks(TrackInfo::kVideoTrack)) {
- return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
+ if (!GetNumberTracks(TrackInfo::kAudioTrack)
+ && !GetNumberTracks(TrackInfo::kVideoTrack)) {
+ return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR,
+ __func__);
}
return InitPromise::CreateAndResolve(NS_OK, __func__);
}
void
WebMDemuxer::InitBufferedState()
{
@@ -286,18 +290,18 @@ WebMDemuxer::ReadMetadata()
}
mBufferedState->NotifyDataArrived(buffer->Elements(), buffer->Length(), 0);
if (mBufferedState->GetInitEndOffset() < 0) {
return NS_ERROR_FAILURE;
}
MOZ_ASSERT(mBufferedState->GetInitEndOffset() <= resource.Tell());
}
mInitData = resource.MediaReadAt(0, mBufferedState->GetInitEndOffset());
- if (!mInitData ||
- mInitData->Length() != size_t(mBufferedState->GetInitEndOffset())) {
+ if (!mInitData
+ || mInitData->Length() != size_t(mBufferedState->GetInitEndOffset())) {
return NS_ERROR_FAILURE;
}
unsigned int ntracks = 0;
r = nestegg_track_count(context, &ntracks);
if (r == -1) {
return NS_ERROR_FAILURE;
}
@@ -331,20 +335,20 @@ WebMDemuxer::ReadMetadata()
unsigned int cropH = params.crop_right + params.crop_left;
unsigned int cropV = params.crop_bottom + params.crop_top;
nsIntRect pictureRect(params.crop_left,
params.crop_top,
params.width - cropH,
params.height - cropV);
// If the cropping data appears invalid then use the frame data
- if (pictureRect.width <= 0 ||
- pictureRect.height <= 0 ||
- pictureRect.x < 0 ||
- pictureRect.y < 0) {
+ if (pictureRect.width <= 0
+ || pictureRect.height <= 0
+ || pictureRect.x < 0
+ || pictureRect.y < 0) {
pictureRect.x = 0;
pictureRect.y = 0;
pictureRect.width = params.width;
pictureRect.height = params.height;
}
// Validate the container-reported frame and pictureRect sizes. This
// ensures that our video frame creation code doesn't overflow.
@@ -378,38 +382,42 @@ WebMDemuxer::ReadMetadata()
break;
case NESTEGG_VIDEO_STEREO_RIGHT_LEFT:
mInfo.mVideo.mStereoMode = StereoMode::RIGHT_LEFT;
break;
}
uint64_t duration = 0;
r = nestegg_duration(context, &duration);
if (!r) {
- mInfo.mVideo.mDuration = media::TimeUnit::FromNanoseconds(duration).ToMicroseconds();
+ mInfo.mVideo.mDuration =
+ media::TimeUnit::FromNanoseconds(duration).ToMicroseconds();
}
mInfo.mVideo.mCrypto = GetTrackCrypto(TrackInfo::kVideoTrack, track);
if (mInfo.mVideo.mCrypto.mValid) {
- mCrypto.AddInitData(NS_LITERAL_STRING("webm"), mInfo.mVideo.mCrypto.mKeyId);
+ mCrypto.AddInitData(NS_LITERAL_STRING("webm"),
+ mInfo.mVideo.mCrypto.mKeyId);
}
} else if (type == NESTEGG_TRACK_AUDIO && !mHasAudio) {
nestegg_audio_params params;
r = nestegg_track_audio_params(context, track, ¶ms);
if (r == -1) {
return NS_ERROR_FAILURE;
}
mAudioTrack = track;
mHasAudio = true;
mAudioCodec = nestegg_track_codec_id(context, track);
if (mAudioCodec == NESTEGG_CODEC_VORBIS) {
mInfo.mAudio.mMimeType = "audio/vorbis";
} else if (mAudioCodec == NESTEGG_CODEC_OPUS) {
mInfo.mAudio.mMimeType = "audio/opus";
- OpusDataDecoder::AppendCodecDelay(mInfo.mAudio.mCodecSpecificConfig,
- media::TimeUnit::FromNanoseconds(params.codec_delay).ToMicroseconds());
+ OpusDataDecoder::AppendCodecDelay(
+ mInfo.mAudio.mCodecSpecificConfig,
+ media::TimeUnit::FromNanoseconds(params.codec_delay)
+ .ToMicroseconds());
}
mSeekPreroll = params.seek_preroll;
mInfo.mAudio.mRate = params.rate;
mInfo.mAudio.mChannels = params.channels;
unsigned int nheaders = 0;
r = nestegg_track_codec_data_count(context, track, &nheaders);
if (r == -1) {
@@ -442,39 +450,41 @@ WebMDemuxer::ReadMetadata()
}
else {
mInfo.mAudio.mCodecSpecificConfig->AppendElements(headers[0],
headerLens[0]);
}
uint64_t duration = 0;
r = nestegg_duration(context, &duration);
if (!r) {
- mInfo.mAudio.mDuration = media::TimeUnit::FromNanoseconds(duration).ToMicroseconds();
+ mInfo.mAudio.mDuration =
+ media::TimeUnit::FromNanoseconds(duration).ToMicroseconds();
}
mInfo.mAudio.mCrypto = GetTrackCrypto(TrackInfo::kAudioTrack, track);
if (mInfo.mAudio.mCrypto.mValid) {
- mCrypto.AddInitData(NS_LITERAL_STRING("webm"), mInfo.mAudio.mCrypto.mKeyId);
+ mCrypto.AddInitData(NS_LITERAL_STRING("webm"),
+ mInfo.mAudio.mCrypto.mKeyId);
}
}
}
return NS_OK;
}
bool
WebMDemuxer::IsSeekable() const
{
- return Context(TrackInfo::kVideoTrack) &&
- nestegg_has_cues(Context(TrackInfo::kVideoTrack));
+ return Context(TrackInfo::kVideoTrack)
+ && nestegg_has_cues(Context(TrackInfo::kVideoTrack));
}
bool
WebMDemuxer::IsSeekableOnlyInBufferedRanges() const
{
- return Context(TrackInfo::kVideoTrack) &&
- !nestegg_has_cues(Context(TrackInfo::kVideoTrack));
+ return Context(TrackInfo::kVideoTrack)
+ && !nestegg_has_cues(Context(TrackInfo::kVideoTrack));
}
void
WebMDemuxer::EnsureUpToDateIndex()
{
if (!mNeedReIndex || !mInitData) {
return;
}
@@ -503,36 +513,39 @@ WebMDemuxer::NotifyDataArrived()
mNeedReIndex = true;
}
void
WebMDemuxer::NotifyDataRemoved()
{
mBufferedState->Reset();
if (mInitData) {
- mBufferedState->NotifyDataArrived(mInitData->Elements(), mInitData->Length(), 0);
+ mBufferedState->NotifyDataArrived(mInitData->Elements(),
+ mInitData->Length(), 0);
}
mNeedReIndex = true;
}
UniquePtr<EncryptionInfo>
WebMDemuxer::GetCrypto()
{
return mCrypto.IsEncrypted() ? MakeUnique<EncryptionInfo>(mCrypto) : nullptr;
}
CryptoTrack
-WebMDemuxer::GetTrackCrypto(TrackInfo::TrackType aType, size_t aTrackNumber) {
+WebMDemuxer::GetTrackCrypto(TrackInfo::TrackType aType, size_t aTrackNumber)
+{
const int WEBM_IV_SIZE = 16;
const unsigned char * contentEncKeyId;
size_t contentEncKeyIdLength;
CryptoTrack crypto;
nestegg* context = Context(aType);
- int r = nestegg_track_content_enc_key_id(context, aTrackNumber, &contentEncKeyId, &contentEncKeyIdLength);
+ int r = nestegg_track_content_enc_key_id(
+ context, aTrackNumber, &contentEncKeyId, &contentEncKeyIdLength);
if (r == -1) {
WEBM_DEBUG("nestegg_track_content_enc_key_id failed r=%d", r);
return crypto;
}
uint32_t i;
nsTArray<uint8_t> initData;
@@ -546,17 +559,18 @@ WebMDemuxer::GetTrackCrypto(TrackInfo::T
crypto.mIVSize = WEBM_IV_SIZE;
crypto.mKeyId = Move(initData);
}
return crypto;
}
bool
-WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType, MediaRawDataQueue *aSamples)
+WebMDemuxer::GetNextPacket(TrackInfo::TrackType aType,
+ MediaRawDataQueue *aSamples)
{
if (mIsMediaSource) {
// To ensure mLastWebMBlockOffset is properly up to date.
EnsureUpToDateIndex();
}
RefPtr<NesteggPacketHolder> holder(NextPacket(aType));
@@ -580,33 +594,33 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
int64_t next_tstamp = INT64_MIN;
if (aType == TrackInfo::kAudioTrack) {
RefPtr<NesteggPacketHolder> next_holder(NextPacket(aType));
if (next_holder) {
next_tstamp = next_holder->Timestamp();
PushAudioPacket(next_holder);
} else if (duration >= 0) {
next_tstamp = tstamp + duration;
- } else if (!mIsMediaSource ||
- (mIsMediaSource && mLastAudioFrameTime.isSome())) {
+ } else if (!mIsMediaSource
+ || (mIsMediaSource && mLastAudioFrameTime.isSome())) {
next_tstamp = tstamp;
next_tstamp += tstamp - mLastAudioFrameTime.refOr(0);
} else {
PushAudioPacket(holder);
}
mLastAudioFrameTime = Some(tstamp);
} else if (aType == TrackInfo::kVideoTrack) {
RefPtr<NesteggPacketHolder> next_holder(NextPacket(aType));
if (next_holder) {
next_tstamp = next_holder->Timestamp();
PushVideoPacket(next_holder);
} else if (duration >= 0) {
next_tstamp = tstamp + duration;
- } else if (!mIsMediaSource ||
- (mIsMediaSource && mLastVideoFrameTime.isSome())) {
+ } else if (!mIsMediaSource
+ || (mIsMediaSource && mLastVideoFrameTime.isSome())) {
next_tstamp = tstamp;
next_tstamp += tstamp - mLastVideoFrameTime.refOr(0);
} else {
PushVideoPacket(holder);
}
mLastVideoFrameTime = Some(tstamp);
}
@@ -645,38 +659,41 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
}
}
bool isKeyframe = false;
if (aType == TrackInfo::kAudioTrack) {
isKeyframe = true;
} else if (aType == TrackInfo::kVideoTrack) {
if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED) {
// Packet is encrypted, can't peek, use packet info
- isKeyframe = nestegg_packet_has_keyframe(holder->Packet()) == NESTEGG_PACKET_HAS_KEYFRAME_TRUE;
+ isKeyframe = nestegg_packet_has_keyframe(holder->Packet())
+ == NESTEGG_PACKET_HAS_KEYFRAME_TRUE;
} else {
vpx_codec_stream_info_t si;
PodZero(&si);
si.sz = sizeof(si);
switch (mVideoCodec) {
case NESTEGG_CODEC_VP8:
vpx_codec_peek_stream_info(vpx_codec_vp8_dx(), data, length, &si);
break;
case NESTEGG_CODEC_VP9:
vpx_codec_peek_stream_info(vpx_codec_vp9_dx(), data, length, &si);
break;
}
isKeyframe = si.is_kf;
if (isKeyframe) {
// We only look for resolution changes on keyframes for both VP8 and
// VP9. Other resolution changes are invalid.
- if (mLastSeenFrameWidth.isSome() && mLastSeenFrameHeight.isSome() &&
- (si.w != mLastSeenFrameWidth.value() ||
- si.h != mLastSeenFrameHeight.value())) {
+ if (mLastSeenFrameWidth.isSome()
+ && mLastSeenFrameHeight.isSome()
+ && (si.w != mLastSeenFrameWidth.value()
+ || si.h != mLastSeenFrameHeight.value())) {
mInfo.mVideo.mDisplay = nsIntSize(si.w, si.h);
- mSharedVideoTrackInfo = new SharedTrackInfo(mInfo.mVideo, ++sStreamSourceID);
+ mSharedVideoTrackInfo =
+ new SharedTrackInfo(mInfo.mVideo, ++sStreamSourceID);
}
mLastSeenFrameWidth = Some(si.w);
mLastSeenFrameHeight = Some(si.h);
}
}
}
WEBM_DEBUG("push sample tstamp: %ld next_tstamp: %ld length: %ld kf: %d",
@@ -712,18 +729,18 @@ WebMDemuxer::GetNextPacket(TrackInfo::Tr
discardFrames = TimeUnitToFrames(
media::TimeUnit::FromNanoseconds(discardPadding), mInfo.mAudio.mRate);
}
if (discardFrames.isValid()) {
sample->mDiscardPadding = discardFrames.value();
}
}
- if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_UNENCRYPTED ||
- packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED) {
+ if (packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_UNENCRYPTED
+ || packetEncryption == NESTEGG_PACKET_HAS_SIGNAL_BYTE_ENCRYPTED) {
nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
unsigned char const* iv;
size_t ivLength;
nestegg_packet_iv(holder->Packet(), &iv, &ivLength);
writer->mCrypto.mValid = true;
writer->mCrypto.mIVSize = ivLength;
if (ivLength == 0) {
// Frame is not encrypted
@@ -1007,17 +1024,18 @@ WebMTrackDemuxer::GetSamples(int32_t aNu
continue;
}
mNeedKeyframe = false;
samples->mSamples.AppendElement(sample);
aNumSamples--;
}
if (samples->mSamples.IsEmpty()) {
- return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
+ return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
+ __func__);
} else {
UpdateSamples(samples->mSamples);
return SamplesPromise::CreateAndResolve(samples, __func__);
}
}
void
WebMTrackDemuxer::SetNextKeyFrameTime()
@@ -1051,32 +1069,34 @@ WebMTrackDemuxer::SetNextKeyFrameTime()
if (sample->mKeyframe) {
frameTime = sample->mTime;
foundKeyframe = true;
}
int64_t sampleTimecode = sample->mTimecode;
skipSamplesQueue.Push(sample.forget());
if (!startTime) {
startTime.emplace(sampleTimecode);
- } else if (!foundKeyframe &&
- sampleTimecode > startTime.ref() + MAX_LOOK_AHEAD) {
+ } else if (!foundKeyframe
+ && sampleTimecode > startTime.ref() + MAX_LOOK_AHEAD) {
WEBM_DEBUG("Couldn't find keyframe in a reasonable time, aborting");
break;
}
}
// We may have demuxed more than intended, so ensure that all frames are kept
// in the right order.
mSamples.PushFront(Move(skipSamplesQueue));
if (frameTime != -1) {
mNextKeyframeTime.emplace(media::TimeUnit::FromMicroseconds(frameTime));
WEBM_DEBUG("Next Keyframe %f (%u queued %.02fs)",
mNextKeyframeTime.value().ToSeconds(),
uint32_t(mSamples.GetSize()),
- media::TimeUnit::FromMicroseconds(mSamples.Last()->mTimecode - mSamples.First()->mTimecode).ToSeconds());
+ media::TimeUnit::FromMicroseconds(mSamples.Last()->mTimecode
+ - mSamples.First()->mTimecode)
+ .ToSeconds());
} else {
WEBM_DEBUG("Couldn't determine next keyframe time (%u queued)",
uint32_t(mSamples.GetSize()));
}
}
void
WebMTrackDemuxer::Reset()
@@ -1099,18 +1119,19 @@ WebMTrackDemuxer::UpdateSamples(nsTArray
for (const auto& sample : aSamples) {
if (sample->mCrypto.mValid) {
nsAutoPtr<MediaRawDataWriter> writer(sample->CreateWriter());
writer->mCrypto.mMode = mInfo->mCrypto.mMode;
writer->mCrypto.mIVSize = mInfo->mCrypto.mIVSize;
writer->mCrypto.mKeyId.AppendElements(mInfo->mCrypto.mKeyId);
}
}
- if (mNextKeyframeTime.isNothing() ||
- aSamples.LastElement()->mTime >= mNextKeyframeTime.value().ToMicroseconds()) {
+ if (mNextKeyframeTime.isNothing()
+ || aSamples.LastElement()->mTime
+ >= mNextKeyframeTime.value().ToMicroseconds()) {
SetNextKeyFrameTime();
}
}
nsresult
WebMTrackDemuxer::GetNextRandomAccessPoint(media::TimeUnit* aTime)
{
if (mNextKeyframeTime.isNothing()) {
@@ -1119,17 +1140,18 @@ WebMTrackDemuxer::GetNextRandomAccessPoi
media::TimeUnit::FromMicroseconds(std::numeric_limits<int64_t>::max());
} else {
*aTime = mNextKeyframeTime.ref();
}
return NS_OK;
}
RefPtr<WebMTrackDemuxer::SkipAccessPointPromise>
-WebMTrackDemuxer::SkipToNextRandomAccessPoint(const media::TimeUnit& aTimeThreshold)
+WebMTrackDemuxer::SkipToNextRandomAccessPoint(
+ const media::TimeUnit& aTimeThreshold)
{
uint32_t parsed = 0;
bool found = false;
RefPtr<MediaRawData> sample;
int64_t sampleTime;
WEBM_DEBUG("TimeThreshold: %f", aTimeThreshold.ToSeconds());
while (!found && (sample = NextSample())) {
--- a/dom/media/webm/WebMDemuxer.h
+++ b/dom/media/webm/WebMDemuxer.h
@@ -13,99 +13,112 @@
typedef struct nestegg nestegg;
namespace mozilla {
class WebMBufferedState;
// Queue for holding MediaRawData samples
-class MediaRawDataQueue {
+class MediaRawDataQueue
+{
public:
- uint32_t GetSize() {
+ uint32_t GetSize()
+ {
return mQueue.size();
}
- void Push(MediaRawData* aItem) {
+ void Push(MediaRawData* aItem)
+ {
mQueue.push_back(aItem);
}
- void Push(already_AddRefed<MediaRawData>&& aItem) {
+ void Push(already_AddRefed<MediaRawData>&& aItem)
+ {
mQueue.push_back(Move(aItem));
}
void PushFront(MediaRawData* aItem) {
mQueue.push_front(aItem);
}
- void PushFront(already_AddRefed<MediaRawData>&& aItem) {
+ void PushFront(already_AddRefed<MediaRawData>&& aItem)
+ {
mQueue.push_front(Move(aItem));
}
- void PushFront(MediaRawDataQueue&& aOther) {
+ void PushFront(MediaRawDataQueue&& aOther)
+ {
while (!aOther.mQueue.empty()) {
PushFront(aOther.Pop());
}
}
- already_AddRefed<MediaRawData> PopFront() {
+ already_AddRefed<MediaRawData> PopFront()
+ {
RefPtr<MediaRawData> result = mQueue.front().forget();
mQueue.pop_front();
return result.forget();
}
- already_AddRefed<MediaRawData> Pop() {
+ already_AddRefed<MediaRawData> Pop()
+ {
RefPtr<MediaRawData> result = mQueue.back().forget();
mQueue.pop_back();
return result.forget();
}
- void Reset() {
+ void Reset()
+ {
while (!mQueue.empty()) {
mQueue.pop_front();
}
}
- MediaRawDataQueue& operator=(const MediaRawDataQueue& aOther) {
+ MediaRawDataQueue& operator=(const MediaRawDataQueue& aOther)
+ {
mQueue = aOther.mQueue;
return *this;
}
- const RefPtr<MediaRawData>& First() const {
+ const RefPtr<MediaRawData>& First() const
+ {
return mQueue.front();
}
- const RefPtr<MediaRawData>& Last() const {
+ const RefPtr<MediaRawData>& Last() const
+ {
return mQueue.back();
}
private:
std::deque<RefPtr<MediaRawData>> mQueue;
};
class WebMTrackDemuxer;
class WebMDemuxer : public MediaDataDemuxer
{
public:
explicit WebMDemuxer(MediaResource* aResource);
// Indicate if the WebMDemuxer is to be used with MediaSource. In which
// case the demuxer will stop reads to the last known complete block.
WebMDemuxer(MediaResource* aResource, bool aIsMediaSource);
-
+
RefPtr<InitPromise> Init() override;
bool HasTrackType(TrackInfo::TrackType aType) const override;
uint32_t GetNumberTracks(TrackInfo::TrackType aType) const override;
- UniquePtr<TrackInfo> GetTrackInfo(TrackInfo::TrackType aType, size_t aTrackNumber) const;
+ UniquePtr<TrackInfo> GetTrackInfo(TrackInfo::TrackType aType,
+ size_t aTrackNumber) const;
- already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(TrackInfo::TrackType aType,
- uint32_t aTrackNumber) override;
+ already_AddRefed<MediaTrackDemuxer>
+ GetTrackDemuxer(TrackInfo::TrackType aType, uint32_t aTrackNumber) override;
bool IsSeekable() const override;
bool IsSeekableOnlyInBufferedRanges() const override;
UniquePtr<EncryptionInfo> GetCrypto() override;
bool GetOffsetForTime(uint64_t aTime, int64_t* aOffset);
@@ -127,35 +140,39 @@ public:
return mIsMediaSource;
}
int64_t LastWebMBlockOffset() const
{
return mLastWebMBlockOffset;
}
- struct NestEggContext {
+ struct NestEggContext
+ {
NestEggContext(WebMDemuxer* aParent, MediaResource* aResource)
- : mParent(aParent)
- , mResource(aResource)
- , mContext(nullptr) {}
+ : mParent(aParent)
+ , mResource(aResource)
+ , mContext(nullptr)
+ {
+ }
~NestEggContext();
int Init();
// Public accessor for nestegg callbacks
bool IsMediaSource() const { return mParent->IsMediaSource(); }
MediaResourceIndex* GetResource() { return &mResource; }
int64_t GetEndDataOffset() const
{
return (!mParent->IsMediaSource() || mParent->LastWebMBlockOffset() < 0)
- ? mResource.GetLength() : mParent->LastWebMBlockOffset();
+ ? mResource.GetLength()
+ : mParent->LastWebMBlockOffset();
}
WebMDemuxer* mParent;
MediaResourceIndex mResource;
nestegg* mContext;
};
private:
@@ -258,17 +275,18 @@ public:
RefPtr<SeekPromise> Seek(const media::TimeUnit& aTime) override;
RefPtr<SamplesPromise> GetSamples(int32_t aNumSamples = 1) override;
void Reset() override;
nsresult GetNextRandomAccessPoint(media::TimeUnit* aTime) override;
- RefPtr<SkipAccessPointPromise> SkipToNextRandomAccessPoint(const media::TimeUnit& aTimeThreshold) override;
+ RefPtr<SkipAccessPointPromise> SkipToNextRandomAccessPoint(
+ const media::TimeUnit& aTimeThreshold) override;
media::TimeIntervals GetBuffered() override;
int64_t GetEvictionOffset(const media::TimeUnit& aTime) override;
void BreakCycles() override;
private: