--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -121,33 +121,34 @@ OggDemuxer::InitTrack(MessageField* aMsg
}
OggDemuxer::OggDemuxer(MediaResource* aResource)
: mTheoraState(nullptr)
, mVorbisState(nullptr)
, mOpusState(nullptr)
, mOpusEnabled(MediaDecoder::IsOpusEnabled())
, mSkeletonState(nullptr)
+ , mAudioOggState(aResource)
+ , mVideoOggState(aResource)
, mVorbisSerial(0)
, mOpusSerial(0)
, mTheoraSerial(0)
, mOpusPreSkip(0)
, mIsChained(false)
, mDecodedAudioFrames(0)
- , mResource(aResource)
{
MOZ_COUNT_CTOR(OggDemuxer);
PodZero(&mTheoraInfo);
}
OggDemuxer::~OggDemuxer()
{
- Reset();
- Cleanup();
MOZ_COUNT_DTOR(OggDemuxer);
+ Reset(TrackInfo::kAudioTrack);
+ Reset(TrackInfo::kVideoTrack);
if (HasAudio() || HasVideo()) {
// If we were able to initialize our decoders, report whether we encountered
// a chained stream or not.
bool isChained = mIsChained;
nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([=]() -> void {
OGG_DEBUG("Reporting telemetry MEDIA_OGG_LOADED_IS_CHAINED=%d", isChained);
Telemetry::Accumulate(Telemetry::ID::MEDIA_OGG_LOADED_IS_CHAINED, isChained);
});
@@ -178,20 +179,38 @@ const
int64_t
OggDemuxer::StartTime() const
{
MOZ_ASSERT(HaveStartTime());
return mStartTime.ref();
}
+bool
+OggDemuxer::HaveStartTime(TrackInfo::TrackType aType)
+{
+ return (aType == TrackInfo::kAudioTrack ? mAudioOggState : mVideoOggState)
+ .mStartTime.isSome();
+}
+
+int64_t
+OggDemuxer::StartTime(TrackInfo::TrackType aType)
+{
+ return (aType == TrackInfo::kAudioTrack ? mAudioOggState : mVideoOggState)
+ .mStartTime.refOr(TimeUnit::FromMicroseconds(0)).ToMicroseconds();
+}
+
RefPtr<OggDemuxer::InitPromise>
OggDemuxer::Init()
{
- int ret = ogg_sync_init(&mOggState);
+ int ret = ogg_sync_init(OggState(TrackInfo::kAudioTrack));
+ if (ret != 0) {
+ return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__);
+ }
+ ret = ogg_sync_init(OggState(TrackInfo::kVideoTrack));
if (ret != 0) {
return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__);
}
/*
if (InitBufferedState() != NS_OK) {
return InitPromise::CreateAndReject(DemuxerFailureReason::WAITING_FOR_DATA, __func__);
}
*/
@@ -225,32 +244,45 @@ OggDemuxer::GetTrackCodecState(TrackInfo
}
case TrackInfo::kVideoTrack:
return mTheoraState;
default:
return 0;
}
}
+TrackInfo::TrackType
+OggDemuxer::GetCodecStateType(OggCodecState* aState) const
+{
+ switch (aState->GetType()) {
+ case OggCodecState::TYPE_THEORA:
+ return TrackInfo::kVideoTrack;
+ case OggCodecState::TYPE_OPUS:
+ case OggCodecState::TYPE_VORBIS:
+ return TrackInfo::kAudioTrack;
+ default:
+ return TrackInfo::kUndefinedTrack;
+ }
+}
+
uint32_t
OggDemuxer::GetNumberTracks(TrackInfo::TrackType aType) const
{
switch(aType) {
case TrackInfo::kAudioTrack:
return HasAudio() ? 1 : 0;
case TrackInfo::kVideoTrack:
return HasVideo() ? 1 : 0;
default:
return 0;
}
}
UniquePtr<TrackInfo>
-OggDemuxer::GetTrackInfo(TrackInfo::TrackType aType,
- size_t aTrackNumber) const
+OggDemuxer::GetTrackInfo(TrackInfo::TrackType aType, size_t aTrackNumber) const
{
switch(aType) {
case TrackInfo::kAudioTrack:
return mInfo.mAudio.Clone();
case TrackInfo::kVideoTrack:
return mInfo.mVideo.Clone();
default:
return nullptr;
@@ -265,56 +297,34 @@ OggDemuxer::GetTrackDemuxer(TrackInfo::T
}
RefPtr<OggTrackDemuxer> e = new OggTrackDemuxer(this, aType, aTrackNumber);
mDemuxers.AppendElement(e);
return e.forget();
}
nsresult
-OggDemuxer::Reset()
+OggDemuxer::Reset(TrackInfo::TrackType aType)
{
- nsresult res = NS_OK;
-
// Discard any previously buffered packets/pages.
- ogg_sync_reset(&mOggState);
- if (mVorbisState && NS_FAILED(mVorbisState->Reset())) {
- res = NS_ERROR_FAILURE;
- }
- if (mOpusState && NS_FAILED(mOpusState->Reset())) { // false?
- res = NS_ERROR_FAILURE;
- }
- if (mTheoraState && NS_FAILED(mTheoraState->Reset())) {
- res = NS_ERROR_FAILURE;
- }
-
- return res;
-}
-
-nsresult
-OggDemuxer::ResetTrackState(TrackInfo::TrackType aType)
-{
+ ogg_sync_reset(OggState(aType));
OggCodecState* trackState = GetTrackCodecState(aType);
if (trackState) {
return trackState->Reset();
}
return NS_OK;
}
-void
-OggDemuxer::Cleanup()
-{
- ogg_sync_clear(&mOggState);
-}
-
bool
-OggDemuxer::ReadHeaders(OggCodecState* aState, OggHeaders& aHeaders)
+OggDemuxer::ReadHeaders(TrackInfo::TrackType aType,
+ OggCodecState* aState,
+ OggHeaders& aHeaders)
{
while (!aState->DoneReadingHeaders()) {
- DemuxUntilPacketAvailable(aState);
+ DemuxUntilPacketAvailable(aType, aState);
ogg_packet* packet = aState->PacketOut();
if (!packet) {
OGG_DEBUG("Ran out of header packets early; deactivating stream %ld", aState->mSerial);
aState->Deactivate();
return false;
}
// Save a copy of the header packet for the decoder to use later;
@@ -324,16 +334,17 @@ OggDemuxer::ReadHeaders(OggCodecState* a
// Local OggCodecState needs to decode headers in order to process
// packet granulepos -> time mappings, etc.
if (!aState->DecodeHeader(packet)) {
OGG_DEBUG("Failed to decode ogg header packet; deactivating stream %ld", aState->mSerial);
aState->Deactivate();
return false;
}
}
+
return aState->Init();
}
void
OggDemuxer::BuildSerialList(nsTArray<uint32_t>& aTracks)
{
// Obtaining seek index information for currently active bitstreams.
if (HasVideo()) {
@@ -446,17 +457,20 @@ OggDemuxer::SetupTargetSkeleton()
// being set (if they exist).
if (mSkeletonState) {
OggHeaders headers;
if (!HasAudio() && !HasVideo()) {
// We have a skeleton track, but no audio or video, may as well disable
// the skeleton, we can't do anything useful with this media.
OGG_DEBUG("Deactivating skeleton stream %ld", mSkeletonState->mSerial);
mSkeletonState->Deactivate();
- } else if (ReadHeaders(mSkeletonState, headers) && mSkeletonState->HasIndex()) {
+ } else if (ReadHeaders(TrackInfo::kAudioTrack, mSkeletonState, headers) &&
+ mSkeletonState->HasIndex()) {
+ // We don't particularly care about which track we are currently using
+ // as both MediaResource points to the same content.
// Extract the duration info out of the index, so we don't need to seek to
// the end of resource to get it.
nsTArray<uint32_t> tracks;
BuildSerialList(tracks);
int64_t duration = 0;
if (NS_SUCCEEDED(mSkeletonState->GetDuration(tracks, duration))) {
OGG_DEBUG("Got duration from Skeleton index %lld", duration);
mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(duration));
@@ -521,19 +535,17 @@ OggDemuxer::SetupMediaTracksInfo(const n
FillTags(&mInfo.mAudio, vorbisState->GetTags());
} else if (codecState->GetType() == OggCodecState::TYPE_OPUS) {
OpusState* opusState = static_cast<OpusState*>(codecState);
if (!(mOpusState && mOpusState->mSerial == opusState->mSerial)) {
continue;
}
if (msgInfo) {
- InitTrack(msgInfo,
- &mInfo.mAudio,
- mOpusState == opusState);
+ InitTrack(msgInfo, &mInfo.mAudio, mOpusState == opusState);
}
mInfo.mAudio.mRate = opusState->mRate;
mInfo.mAudio.mChannels = opusState->mChannels;
FillTags(&mInfo.mAudio, opusState->GetTags());
}
}
}
@@ -558,72 +570,81 @@ OggDemuxer::ReadMetadata()
// We read packets until all bitstreams have read all their header packets.
// We record the offset of the first non-header page so that we know
// what page to seek to when seeking to the media start.
// @FIXME we have to read all the header packets on all the streams
// and THEN we can run SetupTarget*
// @fixme fixme
- ogg_page page;
+ TrackInfo::TrackType tracks[2] =
+ { TrackInfo::kAudioTrack, TrackInfo::kVideoTrack };
+
nsTArray<OggCodecState*> bitstreams;
nsTArray<uint32_t> serials;
- bool readAllBOS = false;
- while (!readAllBOS) {
- if (!ReadOggPage(&page)) {
- // Some kind of error...
- OGG_DEBUG("OggDemuxer::ReadOggPage failed? leaving ReadMetadata...");
- break;
- }
- int serial = ogg_page_serialno(&page);
+ for (uint32_t i = 0; i < ArrayLength(tracks); i++) {
+ ogg_page page;
+ bool readAllBOS = false;
+ while (!readAllBOS) {
+ if (!ReadOggPage(tracks[i], &page)) {
+ // Some kind of error...
+ OGG_DEBUG("OggDemuxer::ReadOggPage failed? leaving ReadMetadata...");
+ return NS_ERROR_FAILURE;
+ }
+
+ int serial = ogg_page_serialno(&page);
- if (!ogg_page_bos(&page)) {
- // We've encountered a non Beginning Of Stream page. No more BOS pages
- // can follow in this Ogg segment, so there will be no other bitstreams
- // in the Ogg (unless it's invalid).
- readAllBOS = true;
- } else if (!mCodecStore.Contains(serial)) {
- // We've not encountered a stream with this serial number before. Create
- // an OggCodecState to demux it, and map that to the OggCodecState
- // in mCodecStates.
- OggCodecState* codecState = OggCodecState::Create(&page);
- mCodecStore.Add(serial, codecState);
- bitstreams.AppendElement(codecState);
- serials.AppendElement(serial);
- }
- if (NS_FAILED(DemuxOggPage(&page))) {
- return NS_ERROR_FAILURE;
+ if (!ogg_page_bos(&page)) {
+ // We've encountered a non Beginning Of Stream page. No more BOS pages
+ // can follow in this Ogg segment, so there will be no other bitstreams
+ // in the Ogg (unless it's invalid).
+ readAllBOS = true;
+ } else if (!mCodecStore.Contains(serial)) {
+ // We've not encountered a stream with this serial number before. Create
+ // an OggCodecState to demux it, and map that to the OggCodecState
+ // in mCodecStates.
+ OggCodecState* codecState = OggCodecState::Create(&page);
+ mCodecStore.Add(serial, codecState);
+ bitstreams.AppendElement(codecState);
+ serials.AppendElement(serial);
+ }
+ if (NS_FAILED(DemuxOggPage(tracks[i], &page))) {
+ return NS_ERROR_FAILURE;
+ }
}
}
// We've read all BOS pages, so we know the streams contained in the media.
// 1. Find the first encountered Theora/Vorbis/Opus bitstream, and configure
// it as the target A/V bitstream.
// 2. Deactivate the rest of bitstreams for now, until we have MediaInfo
// support multiple track infos.
for (uint32_t i = 0; i < bitstreams.Length(); ++i) {
OggCodecState* s = bitstreams[i];
if (s) {
OggHeaders headers;
- if (s->GetType() == OggCodecState::TYPE_THEORA && ReadHeaders(s, headers)) {
+ if (s->GetType() == OggCodecState::TYPE_THEORA &&
+ ReadHeaders(TrackInfo::kVideoTrack, s, headers)) {
if (!mTheoraState) {
TheoraState* theoraState = static_cast<TheoraState*>(s);
SetupTargetTheora(theoraState, headers);
} else {
s->Deactivate();
}
- } else if (s->GetType() == OggCodecState::TYPE_VORBIS && ReadHeaders(s, headers)) {
+ } else if (s->GetType() == OggCodecState::TYPE_VORBIS &&
+ ReadHeaders(TrackInfo::kAudioTrack, s, headers)) {
if (!mVorbisState) {
VorbisState* vorbisState = static_cast<VorbisState*>(s);
SetupTargetVorbis(vorbisState, headers);
} else {
s->Deactivate();
}
- } else if (s->GetType() == OggCodecState::TYPE_OPUS && ReadHeaders(s, headers)) {
+ } else if (s->GetType() == OggCodecState::TYPE_OPUS &&
+ ReadHeaders(TrackInfo::kAudioTrack, s, headers)) {
if (mOpusEnabled) {
if (!mOpusState) {
OpusState* opusState = static_cast<OpusState*>(s);
SetupTargetOpus(opusState, headers);
} else {
s->Deactivate();
}
} else {
@@ -631,42 +652,41 @@ OggDemuxer::ReadMetadata()
" See media.opus.enabled in about:config");
}
} else if (s->GetType() == OggCodecState::TYPE_SKELETON && !mSkeletonState) {
mSkeletonState = static_cast<SkeletonState*>(s);
} else {
// Deactivate any non-primary bitstreams.
s->Deactivate();
}
-
}
}
SetupTargetSkeleton();
SetupMediaTracksInfo(serials);
if (HasAudio() || HasVideo()) {
int64_t startTime = -1;
FindStartTime(startTime);
NS_ASSERTION(startTime >= 0, "Must have a non-negative start time");
OGG_DEBUG("Detected stream start time %lld", startTime);
if (startTime >= 0) {
mStartTime.emplace(startTime);
}
if (mInfo.mMetadataDuration.isNothing() &&
- mResource.GetLength() >= 0 && IsSeekable())
- {
+ Resource(TrackInfo::kAudioTrack)->GetLength() >= 0 &&
+ Resource(TrackInfo::kAudioTrack)->GetResource()->IsTransportSeekable()) {
// We didn't get a duration from the index or a Content-Duration header.
// Seek to the end of file to find the end time.
- int64_t length = mResource.GetLength();
+ int64_t length = Resource(TrackInfo::kAudioTrack)->GetLength();
NS_ASSERTION(length > 0, "Must have a content length to get end time");
- int64_t endTime = RangeEndTime(length);
+ int64_t endTime = RangeEndTime(TrackInfo::kAudioTrack, length);
if (endTime != -1) {
mInfo.mUnadjustedMetadataEndTime.emplace(TimeUnit::FromMicroseconds(endTime));
mInfo.mMetadataDuration.emplace(TimeUnit::FromMicroseconds(endTime - mStartTime.refOr(0)));
OGG_DEBUG("Got Ogg duration from seeking to end %lld", endTime);
}
}
if (mInfo.mMetadataDuration.isNothing()) {
@@ -707,17 +727,18 @@ OggDemuxer::ReadOggChain()
VorbisState* newVorbisState = nullptr;
nsAutoPtr<MetadataTags> tags;
if (HasVideo() || HasSkeleton() || !HasAudio()) {
return false;
}
ogg_page page;
- if (!ReadOggPage(&page) || !ogg_page_bos(&page)) {
+ if (!ReadOggPage(TrackInfo::kAudioTrack, &page) || !ogg_page_bos(&page)) {
+ // Chaining is only supported for audio only ogg files.
return false;
}
int serial = ogg_page_serialno(&page);
if (mCodecStore.Contains(serial)) {
return false;
}
@@ -748,17 +769,18 @@ OggDemuxer::ReadOggChain()
}
MessageField* msgInfo = nullptr;
if (mSkeletonState && mSkeletonState->mMsgFieldStore.Contains(serial)) {
mSkeletonState->mMsgFieldStore.Get(serial, &msgInfo);
}
OggHeaders vorbisHeaders;
- if ((newVorbisState && ReadHeaders(newVorbisState, vorbisHeaders)) &&
+ if ((newVorbisState &&
+ ReadHeaders(TrackInfo::kAudioTrack, newVorbisState, vorbisHeaders)) &&
(mVorbisState->mInfo.rate == newVorbisState->mInfo.rate) &&
(mVorbisState->mInfo.channels == newVorbisState->mInfo.channels)) {
SetupTargetVorbis(newVorbisState, vorbisHeaders);
LOG(LogLevel::Debug, ("New vorbis ogg link, serial=%d\n", mVorbisSerial));
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio, true);
@@ -767,17 +789,18 @@ OggDemuxer::ReadOggChain()
mInfo.mAudio.mRate = newVorbisState->mInfo.rate;
mInfo.mAudio.mChannels = newVorbisState->mInfo.channels;
chained = true;
tags = newVorbisState->GetTags();
}
OggHeaders opusHeaders;
- if ((newOpusState && ReadHeaders(newOpusState, opusHeaders)) &&
+ if ((newOpusState &&
+ ReadHeaders(TrackInfo::kAudioTrack, newOpusState, opusHeaders)) &&
(mOpusState->mRate == newOpusState->mRate) &&
(mOpusState->mChannels == newOpusState->mChannels)) {
SetupTargetOpus(newOpusState, opusHeaders);
if (msgInfo) {
InitTrack(msgInfo, &mInfo.mAudio, true);
}
@@ -802,58 +825,87 @@ OggDemuxer::ReadOggChain()
*/
}
return true;
}
return false;
}
+ogg_sync_state*
+OggDemuxer::OggState(TrackInfo::TrackType aType)
+{
+ if (aType == TrackInfo::kVideoTrack) {
+ return &mVideoOggState.mOggState.mState;
+ }
+ return &mAudioOggState.mOggState.mState;
+}
+
+MediaResourceIndex*
+OggDemuxer::Resource(TrackInfo::TrackType aType)
+{
+ if (aType == TrackInfo::kVideoTrack) {
+ return &mVideoOggState.mResource;
+ }
+ return &mAudioOggState.mResource;
+}
+
+MediaResourceIndex*
+OggDemuxer::CommonResource()
+{
+ return &mAudioOggState.mResource;
+}
+
bool
-OggDemuxer::ReadOggPage(ogg_page* aPage)
+OggDemuxer::ReadOggPage(TrackInfo::TrackType aType, ogg_page* aPage)
{
int ret = 0;
- while((ret = ogg_sync_pageseek(&mOggState, aPage)) <= 0) {
+ while((ret = ogg_sync_pageseek(OggState(aType), aPage)) <= 0) {
if (ret < 0) {
// Lost page sync, have to skip up to next page.
continue;
}
// Returns a buffer that can be written too
// with the given size. This buffer is stored
// in the ogg synchronisation structure.
- char* buffer = ogg_sync_buffer(&mOggState, 4096);
+ char* buffer = ogg_sync_buffer(OggState(aType), 4096);
NS_ASSERTION(buffer, "ogg_sync_buffer failed");
// Read from the resource into the buffer
uint32_t bytesRead = 0;
- nsresult rv = mResource.Read(buffer, 4096, &bytesRead);
+ nsresult rv = Resource(aType)->Read(buffer, 4096, &bytesRead);
if (NS_FAILED(rv) || !bytesRead) {
// End of file or error.
return false;
}
// Update the synchronisation layer with the number
// of bytes written to the buffer
- ret = ogg_sync_wrote(&mOggState, bytesRead);
+ ret = ogg_sync_wrote(OggState(aType), bytesRead);
NS_ENSURE_TRUE(ret == 0, false);
}
return true;
}
nsresult
-OggDemuxer::DemuxOggPage(ogg_page* aPage)
+OggDemuxer::DemuxOggPage(TrackInfo::TrackType aType, ogg_page* aPage)
{
int serial = ogg_page_serialno(aPage);
OggCodecState* codecState = mCodecStore.Get(serial);
if (codecState == nullptr) {
OGG_DEBUG("encountered packet for unrecognized codecState");
return NS_ERROR_FAILURE;
}
+ if (GetCodecStateType(codecState) != aType &&
+ codecState->GetType() != OggCodecState::TYPE_SKELETON) {
+ // Not a page we're interested in.
+ return NS_OK;
+ }
if (NS_FAILED(codecState->PageIn(aPage))) {
OGG_DEBUG("codecState->PageIn failed");
return NS_ERROR_FAILURE;
}
return NS_OK;
}
bool
@@ -876,59 +928,58 @@ OggDemuxer::GetNextPacket(TrackInfo::Tra
{
OggCodecState* state = GetTrackCodecState(aType);
ogg_packet* packet = nullptr;
do {
if (packet) {
OggCodecState::ReleasePacket(state->PacketOut());
}
- DemuxUntilPacketAvailable(state);
+ DemuxUntilPacketAvailable(aType, state);
packet = state->PacketPeek();
} while (packet && state->IsHeader(packet));
return packet;
}
void
-OggDemuxer::DemuxUntilPacketAvailable(OggCodecState* aState)
+OggDemuxer::DemuxUntilPacketAvailable(TrackInfo::TrackType aType,
+ OggCodecState* aState)
{
while (!aState->IsPacketReady()) {
OGG_DEBUG("no packet yet, reading some more");
ogg_page page;
- if (!ReadOggPage(&page)) {
+ if (!ReadOggPage(aType, &page)) {
OGG_DEBUG("no more pages to read in resource?");
return;
}
- DemuxOggPage(&page);
+ DemuxOggPage(aType, &page);
}
}
TimeIntervals
-OggDemuxer::GetBuffered()
+OggDemuxer::GetBuffered(TrackInfo::TrackType aType)
{
- if (!HaveStartTime()) {
+ if (!HaveStartTime(aType)) {
return TimeIntervals();
}
- {
- if (mIsChained) {
- return TimeIntervals::Invalid();
- }
+ if (mIsChained) {
+ return TimeIntervals::Invalid();
}
TimeIntervals buffered;
// HasAudio and HasVideo are not used here as they take a lock and cause
// a deadlock. Accessing mInfo doesn't require a lock - it doesn't change
// after metadata is read.
if (!mInfo.HasValidMedia()) {
// No need to search through the file if there are no audio or video tracks
return buffered;
}
- AutoPinned<MediaResource> resource(mResource.GetResource());
+ AutoPinned<MediaResource> resource(Resource(aType)->GetResource());
MediaByteRangeSet ranges;
nsresult res = resource->GetCachedRanges(ranges);
NS_ENSURE_SUCCESS(res, TimeIntervals::Invalid());
// Traverse across the buffered byte ranges, determining the time ranges
// they contain. MediaResource::GetNextCachedData(offset) returns -1 when
// offset is after the end of the media resource, or there's no more cached
// data after the offset. This loop will run until we've checked every
@@ -947,17 +998,17 @@ OggDemuxer::GetBuffered()
// Find the start time of the range. Read pages until we find one with a
// granulepos which we can convert into a timestamp to use as the time of
// the start of the buffered range.
ogg_sync_reset(&sync.mState);
while (startTime == -1) {
ogg_page page;
int32_t discard;
- PageSyncResult pageSyncResult = PageSync(&mResource,
+ PageSyncResult pageSyncResult = PageSync(Resource(aType),
&sync.mState,
true,
startOffset,
endOffset,
&page,
discard);
if (pageSyncResult == PAGE_SYNC_ERROR) {
return TimeIntervals::Invalid();
@@ -971,48 +1022,47 @@ OggDemuxer::GetBuffered()
if (granulepos == -1) {
// Page doesn't have an end time, advance to the next page
// until we find one.
startOffset += page.header_len + page.body_len;
continue;
}
uint32_t serial = ogg_page_serialno(&page);
- if (mVorbisState && serial == mVorbisSerial) {
+ if (aType == TrackInfo::kAudioTrack && mVorbisState &&
+ serial == mVorbisSerial) {
startTime = VorbisState::Time(&mVorbisInfo, granulepos);
NS_ASSERTION(startTime > 0, "Must have positive start time");
- }
- else if (mOpusState && serial == mOpusSerial) {
+ } else if (aType == TrackInfo::kAudioTrack && mOpusState &&
+ serial == mOpusSerial) {
startTime = OpusState::Time(mOpusPreSkip, granulepos);
NS_ASSERTION(startTime > 0, "Must have positive start time");
- }
- else if (mTheoraState && serial == mTheoraSerial) {
+ } else if (aType == TrackInfo::kVideoTrack && mTheoraState &&
+ serial == mTheoraSerial) {
startTime = TheoraState::Time(&mTheoraInfo, granulepos);
NS_ASSERTION(startTime > 0, "Must have positive start time");
- }
- else if (mCodecStore.Contains(serial)) {
+ } else if (mCodecStore.Contains(serial)) {
// Stream is not the theora or vorbis stream we're playing,
// but is one that we have header data for.
startOffset += page.header_len + page.body_len;
continue;
- }
- else {
+ } else {
// Page is for a stream we don't know about (possibly a chained
// ogg), return OK to abort the finding any further ranges. This
// prevents us searching through the rest of the media when we
// may not be able to extract timestamps from it.
SetChained();
return buffered;
}
}
if (startTime != -1) {
// We were able to find a start time for that range, see if we can
// find an end time.
- int64_t endTime = RangeEndTime(startOffset, endOffset, true);
+ int64_t endTime = RangeEndTime(aType, startOffset, endOffset, true);
if (endTime > startTime) {
buffered += TimeInterval(
TimeUnit::FromMicroseconds(startTime - StartTime()),
TimeUnit::FromMicroseconds(endTime - StartTime()));
}
}
}
@@ -1023,99 +1073,116 @@ void
OggDemuxer::FindStartTime(int64_t& aOutStartTime)
{
// Extract the start times of the bitstreams in order to calculate
// the duration.
int64_t videoStartTime = INT64_MAX;
int64_t audioStartTime = INT64_MAX;
if (HasVideo()) {
- ogg_packet* pkt = GetNextPacket(TrackInfo::kVideoTrack);
- if (pkt) {
- videoStartTime = mTheoraState->PacketStartTime(pkt);
+ FindStartTime(TrackInfo::kVideoTrack, videoStartTime);
+ if (videoStartTime != INT64_MAX) {
OGG_DEBUG("OggDemuxer::FindStartTime() video=%lld", videoStartTime);
+ mVideoOggState.mStartTime =
+ Some(TimeUnit::FromMicroseconds(videoStartTime));
}
}
if (HasAudio()) {
- OggCodecState* state = GetTrackCodecState(TrackInfo::kAudioTrack);
- ogg_packet* pkt = GetNextPacket(TrackInfo::kAudioTrack);
- if (pkt) {
- audioStartTime = state->PacketStartTime(pkt);
- OGG_DEBUG("OggReader::FindStartTime() audio=%lld", audioStartTime);
+ FindStartTime(TrackInfo::kAudioTrack, audioStartTime);
+ if (audioStartTime != INT64_MAX) {
+ OGG_DEBUG("OggDemuxer::FindStartTime() audio=%lld", audioStartTime);
+ mAudioOggState.mStartTime =
+ Some(TimeUnit::FromMicroseconds(audioStartTime));
}
}
int64_t startTime = std::min(videoStartTime, audioStartTime);
if (startTime != INT64_MAX) {
aOutStartTime = startTime;
}
}
+void
+OggDemuxer::FindStartTime(TrackInfo::TrackType aType, int64_t& aOutStartTime)
+{
+ int64_t startTime = INT64_MAX;
+
+ OggCodecState* state = GetTrackCodecState(aType);
+ ogg_packet* pkt = GetNextPacket(aType);
+ if (pkt) {
+ startTime = state->PacketStartTime(pkt);
+ }
+
+ if (startTime != INT64_MAX) {
+ aOutStartTime = startTime;
+ }
+}
+
nsresult
-OggDemuxer::SeekInternal(const TimeUnit& aTarget)
+OggDemuxer::SeekInternal(TrackInfo::TrackType aType, const TimeUnit& aTarget)
{
int64_t target = aTarget.ToMicroseconds();
OGG_DEBUG("About to seek to %lld", target);
nsresult res;
int64_t adjustedTarget = target;
- int64_t startTime = StartTime();
+ int64_t startTime = StartTime(aType);
int64_t endTime = mInfo.mMetadataDuration->ToMicroseconds();
- if (HasAudio() && mOpusState){
+ if (aType == TrackInfo::kAudioTrack && mOpusState){
adjustedTarget = std::max(startTime, target - OGG_SEEK_OPUS_PREROLL);
}
if (adjustedTarget == startTime) {
// We've seeked to the media start. Just seek to the offset of the first
// content page.
- res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, 0);
+ res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, 0);
NS_ENSURE_SUCCESS(res,res);
- res = Reset();
+ res = Reset(aType);
NS_ENSURE_SUCCESS(res,res);
} else {
// TODO: This may seek back unnecessarily far in the video, but we don't
// have a way of asking Skeleton to seek to a different target for each
// stream yet. Using adjustedTarget here is at least correct, if slow.
- IndexedSeekResult sres = SeekToKeyframeUsingIndex(adjustedTarget);
+ IndexedSeekResult sres = SeekToKeyframeUsingIndex(aType, adjustedTarget);
NS_ENSURE_TRUE(sres != SEEK_FATAL_ERROR, NS_ERROR_FAILURE);
if (sres == SEEK_INDEX_FAIL) {
// No index or other non-fatal index-related failure. Try to seek
// using a bisection search. Determine the already downloaded data
// in the media cache, so we can try to seek in the cached data first.
AutoTArray<SeekRange, 16> ranges;
- res = GetSeekRanges(ranges);
+ res = GetSeekRanges(aType, ranges);
NS_ENSURE_SUCCESS(res,res);
// Figure out if the seek target lies in a buffered range.
- SeekRange r = SelectSeekRange(ranges, target, startTime, endTime, true);
+ SeekRange r = SelectSeekRange(aType, ranges, target, startTime, endTime, true);
if (!r.IsNull()) {
// We know the buffered range in which the seek target lies, do a
// bisection search in that buffered range.
- res = SeekInBufferedRange(target, adjustedTarget, startTime, endTime, ranges, r);
+ res = SeekInBufferedRange(aType, target, adjustedTarget, startTime, endTime, ranges, r);
NS_ENSURE_SUCCESS(res,res);
} else {
// The target doesn't lie in a buffered range. Perform a bisection
// search over the whole media, using the known buffered ranges to
// reduce the search space.
- res = SeekInUnbuffered(target, startTime, endTime, ranges);
+ res = SeekInUnbuffered(aType, target, startTime, endTime, ranges);
NS_ENSURE_SUCCESS(res,res);
}
}
}
- if (HasVideo()) {
+ if (aType == TrackInfo::kVideoTrack) {
// Demux forwards until we find the next keyframe. This is required,
// as although the seek should finish on a page containing a keyframe,
// there may be non-keyframes in the page before the keyframe.
// When doing fastSeek we display the first frame after the seek, so
// we need to advance the decode to the keyframe otherwise we'll get
// visual artifacts in the first frame output after the seek.
while (true) {
- DemuxUntilPacketAvailable(mTheoraState);
+ DemuxUntilPacketAvailable(aType, mTheoraState);
ogg_packet* packet = mTheoraState->PacketPeek();
if (packet == nullptr) {
OGG_DEBUG("End of Theora stream reached before keyframe found in indexed seek");
break;
}
if (mTheoraState->IsKeyframe(packet)) {
OGG_DEBUG("Theora keyframe found after seek");
break;
@@ -1124,28 +1191,28 @@ OggDemuxer::SeekInternal(const TimeUnit&
ogg_packet* releaseMe = mTheoraState->PacketOut();
OggCodecState::ReleasePacket(releaseMe);
}
}
return NS_OK;
}
OggDemuxer::IndexedSeekResult
-OggDemuxer::RollbackIndexedSeek(int64_t aOffset)
+OggDemuxer::RollbackIndexedSeek(TrackInfo::TrackType aType, int64_t aOffset)
{
if (mSkeletonState) {
mSkeletonState->Deactivate();
}
- nsresult res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, aOffset);
+ nsresult res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, aOffset);
NS_ENSURE_SUCCESS(res, SEEK_FATAL_ERROR);
return SEEK_INDEX_FAIL;
}
OggDemuxer::IndexedSeekResult
-OggDemuxer::SeekToKeyframeUsingIndex(int64_t aTarget)
+OggDemuxer::SeekToKeyframeUsingIndex(TrackInfo::TrackType aType, int64_t aTarget)
{
if (!HasSkeleton() || !mSkeletonState->HasIndex()) {
return SEEK_INDEX_FAIL;
}
// We have an index from the Skeleton track, try to use it to seek.
AutoTArray<uint32_t, 2> tracks;
BuildSerialList(tracks);
SkeletonState::nsSeekTarget keyframe;
@@ -1153,65 +1220,65 @@ OggDemuxer::SeekToKeyframeUsingIndex(int
tracks,
keyframe)))
{
// Could not locate a keypoint for the target in the index.
return SEEK_INDEX_FAIL;
}
// Remember original resource read cursor position so we can rollback on failure.
- int64_t tell = mResource.Tell();
+ int64_t tell = Resource(aType)->Tell();
// Seek to the keypoint returned by the index.
- if (keyframe.mKeyPoint.mOffset > mResource.GetLength() ||
+ if (keyframe.mKeyPoint.mOffset > Resource(aType)->GetLength() ||
keyframe.mKeyPoint.mOffset < 0)
{
// Index must be invalid.
- return RollbackIndexedSeek(tell);
+ return RollbackIndexedSeek(aType, tell);
}
LOG(LogLevel::Debug, ("Seeking using index to keyframe at offset %lld\n",
keyframe.mKeyPoint.mOffset));
- nsresult res = mResource.Seek(nsISeekableStream::NS_SEEK_SET,
- keyframe.mKeyPoint.mOffset);
+ nsresult res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET,
+ keyframe.mKeyPoint.mOffset);
NS_ENSURE_SUCCESS(res, SEEK_FATAL_ERROR);
// We've moved the read set, so reset decode.
- res = Reset();
+ res = Reset(aType);
NS_ENSURE_SUCCESS(res, SEEK_FATAL_ERROR);
// Check that the page the index thinks is exactly here is actually exactly
// here. If not, the index is invalid.
ogg_page page;
int skippedBytes = 0;
- PageSyncResult syncres = PageSync(&mResource,
- &mOggState,
+ PageSyncResult syncres = PageSync(Resource(aType),
+ OggState(aType),
false,
keyframe.mKeyPoint.mOffset,
- mResource.GetLength(),
+ Resource(aType)->GetLength(),
&page,
skippedBytes);
NS_ENSURE_TRUE(syncres != PAGE_SYNC_ERROR, SEEK_FATAL_ERROR);
if (syncres != PAGE_SYNC_OK || skippedBytes != 0) {
LOG(LogLevel::Debug, ("Indexed-seek failure: Ogg Skeleton Index is invalid "
"or sync error after seek"));
- return RollbackIndexedSeek(tell);
+ return RollbackIndexedSeek(aType, tell);
}
uint32_t serial = ogg_page_serialno(&page);
if (serial != keyframe.mSerial) {
// Serialno of page at offset isn't what the index told us to expect.
// Assume the index is invalid.
- return RollbackIndexedSeek(tell);
+ return RollbackIndexedSeek(aType, tell);
}
OggCodecState* codecState = mCodecStore.Get(serial);
if (codecState && codecState->mActive &&
ogg_stream_pagein(&codecState->mState, &page) != 0)
{
// Couldn't insert page into the ogg resource, or somehow the resource
// is no longer active.
- return RollbackIndexedSeek(tell);
+ return RollbackIndexedSeek(aType, tell);
}
return SEEK_OK;
}
// Reads a page from the media resource.
OggDemuxer::PageSyncResult
OggDemuxer::PageSync(MediaResourceIndex* aResource,
ogg_sync_state* aState,
@@ -1300,17 +1367,17 @@ OggTrackDemuxer::GetInfo() const
RefPtr<OggTrackDemuxer::SeekPromise>
OggTrackDemuxer::Seek(TimeUnit aTime)
{
// Seeks to aTime. Upon success, SeekPromise will be resolved with the
// actual time seeked to. Typically the random access point time
mQueuedSample = nullptr;
TimeUnit seekTime = aTime;
- if (mParent->SeekInternal(aTime) == NS_OK) {
+ if (mParent->SeekInternal(mType, aTime) == NS_OK) {
RefPtr<MediaRawData> sample(NextSample());
// Check what time we actually seeked to.
if (sample != nullptr) {
seekTime = TimeUnit::FromMicroseconds(sample->mTime);
OGG_DEBUG("%p seeked to time %lld", this, seekTime.ToMicroseconds());
}
mQueuedSample = sample;
@@ -1367,22 +1434,22 @@ OggTrackDemuxer::GetSamples(int32_t aNum
} else {
return SamplesPromise::CreateAndResolve(samples, __func__);
}
}
void
OggTrackDemuxer::Reset()
{
- mParent->ResetTrackState(mType);
+ mParent->Reset(mType);
mQueuedSample = nullptr;
TimeIntervals buffered = GetBuffered();
if (buffered.Length()) {
OGG_DEBUG("Seek to start point: %f", buffered.Start(0).ToSeconds());
- mParent->SeekInternal(buffered.Start(0));
+ mParent->SeekInternal(mType, buffered.Start(0));
}
}
RefPtr<OggTrackDemuxer::SkipAccessPointPromise>
OggTrackDemuxer::SkipToNextRandomAccessPoint(TimeUnit aTimeThreshold)
{
uint32_t parsed = 0;
bool found = false;
@@ -1405,17 +1472,17 @@ OggTrackDemuxer::SkipToNextRandomAccessP
SkipFailureHolder failure(DemuxerFailureReason::END_OF_STREAM, parsed);
return SkipAccessPointPromise::CreateAndReject(Move(failure), __func__);
}
}
TimeIntervals
OggTrackDemuxer::GetBuffered()
{
- return mParent->GetBuffered();
+ return mParent->GetBuffered(mType);
}
void
OggTrackDemuxer::BreakCycles()
{
mParent = nullptr;
}
@@ -1431,50 +1498,51 @@ OggDemuxer::GetPageChecksum(ogg_page* pa
uint32_t c = p[0] +
(p[1] << 8) +
(p[2] << 16) +
(p[3] << 24);
return c;
}
int64_t
-OggDemuxer::RangeStartTime(int64_t aOffset)
+OggDemuxer::RangeStartTime(TrackInfo::TrackType aType, int64_t aOffset)
{
- int64_t position = mResource.Tell();
- nsresult res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, aOffset);
+ int64_t position = Resource(aType)->Tell();
+ nsresult res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, aOffset);
NS_ENSURE_SUCCESS(res, 0);
int64_t startTime = 0;
- FindStartTime(startTime); // @fixme
- res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, position);
+ FindStartTime(aType, startTime);
+ res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, position);
NS_ENSURE_SUCCESS(res, -1);
return startTime;
}
struct nsDemuxerAutoOggSyncState {
nsDemuxerAutoOggSyncState() {
ogg_sync_init(&mState);
}
~nsDemuxerAutoOggSyncState() {
ogg_sync_clear(&mState);
}
ogg_sync_state mState;
};
int64_t
-OggDemuxer::RangeEndTime(int64_t aEndOffset)
+OggDemuxer::RangeEndTime(TrackInfo::TrackType aType, int64_t aEndOffset)
{
- int64_t position = mResource.Tell();
- int64_t endTime = RangeEndTime(0, aEndOffset, false);
- nsresult res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, position);
+ int64_t position = Resource(aType)->Tell();
+ int64_t endTime = RangeEndTime(aType, 0, aEndOffset, false);
+ nsresult res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, position);
NS_ENSURE_SUCCESS(res, -1);
return endTime;
}
int64_t
-OggDemuxer::RangeEndTime(int64_t aStartOffset,
+OggDemuxer::RangeEndTime(TrackInfo::TrackType aType,
+ int64_t aStartOffset,
int64_t aEndOffset,
bool aCachedDataOnly)
{
nsDemuxerAutoOggSyncState sync;
// We need to find the last page which ends before aEndOffset that
// has a granulepos that we can convert to a timestamp. We do this by
// backing off from aEndOffset until we encounter a page on which we can
@@ -1506,39 +1574,39 @@ OggDemuxer::RangeEndTime(int64_t aStartO
checksumAfterSeek = 0;
ogg_sync_reset(&sync.mState);
readStartOffset = std::max(static_cast<int64_t>(0), readStartOffset - step);
// There's no point reading more than the maximum size of
// an Ogg page into data we've previously scanned. Any data
// between readLimitOffset and aEndOffset must be garbage
// and we can ignore it thereafter.
readLimitOffset = std::min(readLimitOffset,
- readStartOffset + maxOggPageSize);
+ readStartOffset + maxOggPageSize);
readHead = std::max(aStartOffset, readStartOffset);
}
int64_t limit = std::min(static_cast<int64_t>(UINT32_MAX),
- aEndOffset - readHead);
+ aEndOffset - readHead);
limit = std::max(static_cast<int64_t>(0), limit);
limit = std::min(limit, static_cast<int64_t>(step));
uint32_t bytesToRead = static_cast<uint32_t>(limit);
uint32_t bytesRead = 0;
char* buffer = ogg_sync_buffer(&sync.mState, bytesToRead);
NS_ASSERTION(buffer, "Must have buffer");
nsresult res;
if (aCachedDataOnly) {
- res = mResource.GetResource()->ReadFromCache(buffer, readHead, bytesToRead);
+ res = Resource(aType)->GetResource()->ReadFromCache(buffer, readHead, bytesToRead);
NS_ENSURE_SUCCESS(res, -1);
bytesRead = bytesToRead;
} else {
NS_ASSERTION(readHead < aEndOffset,
"resource pos must be before range end");
- res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, readHead);
+ res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, readHead);
NS_ENSURE_SUCCESS(res, -1);
- res = mResource.Read(buffer, bytesToRead, &bytesRead);
+ res = Resource(aType)->Read(buffer, bytesToRead, &bytesRead);
NS_ENSURE_SUCCESS(res, -1);
}
readHead += bytesRead;
if (readHead > readLimitOffset) {
mustBackOff = true;
}
// Update the synchronisation layer with the number
@@ -1591,59 +1659,60 @@ OggDemuxer::RangeEndTime(int64_t aStartO
endTime = t;
}
}
return endTime;
}
nsresult
-OggDemuxer::GetSeekRanges(nsTArray<SeekRange>& aRanges)
+OggDemuxer::GetSeekRanges(TrackInfo::TrackType aType,
+ nsTArray<SeekRange>& aRanges)
{
- AutoPinned<MediaResource> resource(mResource.GetResource());
+ AutoPinned<MediaResource> resource(Resource(aType)->GetResource());
MediaByteRangeSet cached;
nsresult res = resource->GetCachedRanges(cached);
NS_ENSURE_SUCCESS(res, res);
for (uint32_t index = 0; index < cached.Length(); index++) {
auto& range = cached[index];
int64_t startTime = -1;
int64_t endTime = -1;
- if (NS_FAILED(Reset())) {
+ if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
int64_t startOffset = range.mStart;
int64_t endOffset = range.mEnd;
- startTime = RangeStartTime(startOffset);
+ startTime = RangeStartTime(aType, startOffset);
if (startTime != -1 &&
- ((endTime = RangeEndTime(endOffset)) != -1))
- {
+ ((endTime = RangeEndTime(aType, endOffset)) != -1)) {
NS_WARN_IF_FALSE(startTime < endTime,
"Start time must be before end time");
aRanges.AppendElement(SeekRange(startOffset,
endOffset,
startTime,
endTime));
}
}
- if (NS_FAILED(Reset())) {
+ if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
return NS_OK;
}
OggDemuxer::SeekRange
-OggDemuxer::SelectSeekRange(const nsTArray<SeekRange>& ranges,
+OggDemuxer::SelectSeekRange(TrackInfo::TrackType aType,
+ const nsTArray<SeekRange>& ranges,
int64_t aTarget,
int64_t aStartTime,
int64_t aEndTime,
bool aExact)
{
int64_t so = 0;
- int64_t eo = mResource.GetLength();
+ int64_t eo = Resource(aType)->GetLength();
int64_t st = aStartTime;
int64_t et = aEndTime;
for (uint32_t i = 0; i < ranges.Length(); i++) {
const SeekRange& r = ranges[i];
if (r.mTimeStart < aTarget) {
so = r.mOffsetStart;
st = r.mTimeStart;
}
@@ -1660,63 +1729,66 @@ OggDemuxer::SelectSeekRange(const nsTArr
if (aExact || eo == -1) {
return SeekRange();
}
return SeekRange(so, eo, st, et);
}
nsresult
-OggDemuxer::SeekInBufferedRange(int64_t aTarget,
+OggDemuxer::SeekInBufferedRange(TrackInfo::TrackType aType,
+ int64_t aTarget,
int64_t aAdjustedTarget,
int64_t aStartTime,
int64_t aEndTime,
const nsTArray<SeekRange>& aRanges,
const SeekRange& aRange)
{
OGG_DEBUG("Seeking in buffered data to %lld using bisection search", aTarget);
- if (HasVideo() || aAdjustedTarget >= aTarget) {
+ if (aType == TrackInfo::kVideoTrack || aAdjustedTarget >= aTarget) {
// We know the exact byte range in which the target must lie. It must
// be buffered in the media cache. Seek there.
- nsresult res = SeekBisection(aTarget, aRange, 0);
- if (NS_FAILED(res) || !HasVideo()) {
+ nsresult res = SeekBisection(aType, aTarget, aRange, 0);
+ if (NS_FAILED(res) || aType != TrackInfo::kVideoTrack) {
return res;
}
// We have an active Theora bitstream. Peek the next Theora frame, and
// extract its keyframe's time.
- DemuxUntilPacketAvailable(mTheoraState);
+ DemuxUntilPacketAvailable(aType, mTheoraState);
ogg_packet* packet = mTheoraState->PacketPeek();
if (packet && !mTheoraState->IsKeyframe(packet)) {
// First post-seek frame isn't a keyframe, seek back to previous keyframe,
// otherwise we'll get visual artifacts.
NS_ASSERTION(packet->granulepos != -1, "Must have a granulepos");
int shift = mTheoraState->mInfo.keyframe_granule_shift;
int64_t keyframeGranulepos = (packet->granulepos >> shift) << shift;
int64_t keyframeTime = mTheoraState->StartTime(keyframeGranulepos);
SEEK_LOG(LogLevel::Debug, ("Keyframe for %lld is at %lld, seeking back to it",
frameTime, keyframeTime));
aAdjustedTarget = std::min(aAdjustedTarget, keyframeTime);
}
}
nsresult res = NS_OK;
if (aAdjustedTarget < aTarget) {
- SeekRange k = SelectSeekRange(aRanges,
+ SeekRange k = SelectSeekRange(aType,
+ aRanges,
aAdjustedTarget,
aStartTime,
aEndTime,
false);
- res = SeekBisection(aAdjustedTarget, k, OGG_SEEK_FUZZ_USECS);
+ res = SeekBisection(aType, aAdjustedTarget, k, OGG_SEEK_FUZZ_USECS);
}
return res;
}
nsresult
-OggDemuxer::SeekInUnbuffered(int64_t aTarget,
+OggDemuxer::SeekInUnbuffered(TrackInfo::TrackType aType,
+ int64_t aTarget,
int64_t aStartTime,
int64_t aEndTime,
const nsTArray<SeekRange>& aRanges)
{
OGG_DEBUG("Seeking in unbuffered data to %lld using bisection search", aTarget);
// If we've got an active Theora bitstream, determine the maximum possible
// time in usecs which a keyframe could be before a given interframe. We
@@ -1726,42 +1798,44 @@ OggDemuxer::SeekInUnbuffered(int64_t aTa
// bisections; one for the seek target frame, and another to find its
// keyframe. It's usually faster to just download this extra data, rather
// tham perform two bisections to find the seek target's keyframe. We
// don't do this offsetting when seeking in a buffered range,
// as the extra decoding causes a noticeable speed hit when all the data
// is buffered (compared to just doing a bisection to exactly find the
// keyframe).
int64_t keyframeOffsetMs = 0;
- if (HasVideo() && mTheoraState) {
+ if (aType == TrackInfo::kVideoTrack && mTheoraState) {
keyframeOffsetMs = mTheoraState->MaxKeyframeOffset();
}
// Add in the Opus pre-roll if necessary, as well.
- if (HasAudio() && mOpusState) {
+ if (aType == TrackInfo::kAudioTrack && mOpusState) {
keyframeOffsetMs = std::max(keyframeOffsetMs, OGG_SEEK_OPUS_PREROLL);
}
int64_t seekTarget = std::max(aStartTime, aTarget - keyframeOffsetMs);
// Minimize the bisection search space using the known timestamps from the
// buffered ranges.
- SeekRange k = SelectSeekRange(aRanges, seekTarget, aStartTime, aEndTime, false);
- return SeekBisection(seekTarget, k, OGG_SEEK_FUZZ_USECS);
+ SeekRange k =
+ SelectSeekRange(aType, aRanges, seekTarget, aStartTime, aEndTime, false);
+ return SeekBisection(aType, seekTarget, k, OGG_SEEK_FUZZ_USECS);
}
nsresult
-OggDemuxer::SeekBisection(int64_t aTarget,
+OggDemuxer::SeekBisection(TrackInfo::TrackType aType,
+ int64_t aTarget,
const SeekRange& aRange,
uint32_t aFuzz)
{
nsresult res;
- if (aTarget == aRange.mTimeStart) {
- if (NS_FAILED(Reset())) {
+ if (aTarget <= aRange.mTimeStart) {
+ if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
- res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, 0);
+ res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, 0);
NS_ENSURE_SUCCESS(res,res);
return NS_OK;
}
// Bisection search, find start offset of last page with end time less than
// the seek target.
ogg_int64_t startOffset = aRange.mOffsetStart;
ogg_int64_t startTime = aRange.mTimeStart;
@@ -1794,17 +1868,17 @@ OggDemuxer::SeekBisection(int64_t aTarge
bool mustBackoff = false;
// Guess where we should bisect to, based on the bit rate and the time
// remaining in the interval. Loop until we can determine the time at
// the guess offset.
while (true) {
// Discard any previously buffered packets/pages.
- if (NS_FAILED(Reset())) {
+ if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
interval = endOffset - startOffset - startLength;
if (interval == 0) {
// Our interval is empty, we've found the optimal seek point, as the
// page at the start offset is before the seek target, and the page
// at the end offset is after the seek target.
@@ -1857,18 +1931,18 @@ OggDemuxer::SeekBisection(int64_t aTarge
NS_ASSERTION(guess != previousGuess, "Guess should be different to previous");
previousGuess = guess;
hops++;
// Locate the next page after our seek guess, and then figure out the
// granule time of the audio and video bitstreams there. We can then
// make a bisection decision based on our location in the media.
- PageSyncResult pageSyncResult = PageSync(&mResource,
- &mOggState,
+ PageSyncResult pageSyncResult = PageSync(Resource(aType),
+ OggState(aType),
false,
guess,
endOffset,
&page,
skippedBytes);
NS_ENSURE_TRUE(pageSyncResult != PAGE_SYNC_ERROR, NS_ERROR_FAILURE);
if (pageSyncResult == PAGE_SYNC_END_OF_RANGE) {
@@ -1888,54 +1962,54 @@ OggDemuxer::SeekBisection(int64_t aTarge
// Read pages until we can determine the granule time of the audio and
// video bitstream.
ogg_int64_t audioTime = -1;
ogg_int64_t videoTime = -1;
do {
// Add the page to its codec state, determine its granule time.
uint32_t serial = ogg_page_serialno(&page);
OggCodecState* codecState = mCodecStore.Get(serial);
- if (codecState && codecState->mActive) {
- int ret = ogg_stream_pagein(&codecState->mState, &page);
- NS_ENSURE_TRUE(ret == 0, NS_ERROR_FAILURE);
- }
+ if (codecState && GetCodecStateType(codecState) == aType) {
+ if (codecState->mActive) {
+ int ret = ogg_stream_pagein(&codecState->mState, &page);
+ NS_ENSURE_TRUE(ret == 0, NS_ERROR_FAILURE);
+ }
+
+ ogg_int64_t granulepos = ogg_page_granulepos(&page);
- ogg_int64_t granulepos = ogg_page_granulepos(&page);
+ if (aType == TrackInfo::kAudioTrack &&
+ granulepos > 0 && audioTime == -1) {
+ if (mVorbisState && serial == mVorbisState->mSerial) {
+ audioTime = mVorbisState->Time(granulepos);
+ } else if (mOpusState && serial == mOpusState->mSerial) {
+ audioTime = mOpusState->Time(granulepos);
+ }
+ }
- if (HasAudio() && granulepos > 0 && audioTime == -1) {
- if (mVorbisState && serial == mVorbisState->mSerial) {
- audioTime = mVorbisState->Time(granulepos);
- } else if (mOpusState && serial == mOpusState->mSerial) {
- audioTime = mOpusState->Time(granulepos);
+ if (aType == TrackInfo::kVideoTrack &&
+ granulepos > 0 && serial == mTheoraState->mSerial &&
+ videoTime == -1) {
+ videoTime = mTheoraState->Time(granulepos);
+ }
+
+ if (pageOffset + pageLength >= endOffset) {
+ // Hit end of readable data.
+ break;
}
}
-
- if (HasVideo() &&
- granulepos > 0 &&
- serial == mTheoraState->mSerial &&
- videoTime == -1) {
- videoTime = mTheoraState->Time(granulepos);
- }
-
- if (pageOffset + pageLength >= endOffset) {
- // Hit end of readable data.
+ if (!ReadOggPage(aType, &page)) {
break;
}
- if (!ReadOggPage(&page)) {
- break;
- }
-
- } while ((HasAudio() && audioTime == -1) ||
- (HasVideo() && videoTime == -1));
+ } while ((aType == TrackInfo::kAudioTrack && audioTime == -1) ||
+ (aType == TrackInfo::kVideoTrack && videoTime == -1));
- if ((HasAudio() && audioTime == -1) ||
- (HasVideo() && videoTime == -1))
- {
+ if ((aType == TrackInfo::kAudioTrack && audioTime == -1) ||
+ (aType == TrackInfo::kVideoTrack && videoTime == -1)) {
// We don't have timestamps for all active tracks...
if (pageOffset == startOffset + startLength &&
pageOffset + pageLength >= endOffset) {
// We read the entire interval without finding timestamps for all
// active tracks. We know the interval start offset is before the seek
// target, and the interval end is after the seek target, and we can't
// terminate inside the interval, so we terminate the seek at the
// start of the interval.
@@ -1946,40 +2020,40 @@ OggDemuxer::SeekBisection(int64_t aTarge
// We should backoff; cause the guess to back off from the end, so
// that we've got more room to capture.
mustBackoff = true;
continue;
}
// We've found appropriate time stamps here. Proceed to bisect
// the search space.
- granuleTime = std::max(audioTime, videoTime);
+ granuleTime = aType == TrackInfo::kAudioTrack ? audioTime : videoTime;
NS_ASSERTION(granuleTime > 0, "Must get a granuletime");
break;
} // End of "until we determine time at guess offset" loop.
if (interval == 0) {
// Seek termination condition; we've found the page boundary of the
// last page before the target, and the first page after the target.
SEEK_LOG(LogLevel::Debug, ("Terminating seek at offset=%lld", startOffset));
NS_ASSERTION(startTime < aTarget, "Start time must always be less than target");
- res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, startOffset);
+ res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, startOffset);
NS_ENSURE_SUCCESS(res,res);
- if (NS_FAILED(Reset())) {
+ if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
break;
}
SEEK_LOG(LogLevel::Debug, ("Time at offset %lld is %lld", guess, granuleTime));
if (granuleTime < seekTarget && granuleTime > seekLowerBound) {
// We're within the fuzzy region in which we want to terminate the search.
- res = mResource.Seek(nsISeekableStream::NS_SEEK_SET, pageOffset);
+ res = Resource(aType)->Seek(nsISeekableStream::NS_SEEK_SET, pageOffset);
NS_ENSURE_SUCCESS(res,res);
- if (NS_FAILED(Reset())) {
+ if (NS_FAILED(Reset(aType))) {
return NS_ERROR_FAILURE;
}
SEEK_LOG(LogLevel::Debug, ("Terminating seek at offset=%lld", pageOffset));
break;
}
if (granuleTime >= seekTarget) {
// We've landed after the seek target.
--- a/dom/media/ogg/OggDemuxer.h
+++ b/dom/media/ogg/OggDemuxer.h
@@ -44,32 +44,33 @@ private:
nsAutoOggSyncState() {
ogg_sync_init(&mState);
}
~nsAutoOggSyncState() {
ogg_sync_clear(&mState);
}
ogg_sync_state mState;
};
- media::TimeIntervals GetBuffered();
+ media::TimeIntervals GetBuffered(TrackInfo::TrackType aType);
void FindStartTime(int64_t& aOutStartTime);
+ void FindStartTime(TrackInfo::TrackType, int64_t& aOutStartTime);
- nsresult SeekInternal(const media::TimeUnit& aTarget);
+ nsresult SeekInternal(TrackInfo::TrackType aType, const media::TimeUnit& aTarget);
// Seeks to the keyframe preceding the target time using available
// keyframe indexes.
enum IndexedSeekResult {
SEEK_OK, // Success.
SEEK_INDEX_FAIL, // Failure due to no index, or invalid index.
SEEK_FATAL_ERROR // Error returned by a stream operation.
};
- IndexedSeekResult SeekToKeyframeUsingIndex(int64_t aTarget);
+ IndexedSeekResult SeekToKeyframeUsingIndex(TrackInfo::TrackType aType, int64_t aTarget);
// Rolls back a seek-using-index attempt, returning a failure error code.
- IndexedSeekResult RollbackIndexedSeek(int64_t aOffset);
+ IndexedSeekResult RollbackIndexedSeek(TrackInfo::TrackType aType, int64_t aOffset);
// Represents a section of contiguous media, with a start and end offset,
// and the timestamps of the start and end of that range, that is cached.
// Used to denote the extremities of a range in which we can seek quickly
// (because it's cached).
class SeekRange {
public:
SeekRange()
@@ -95,54 +96,58 @@ private:
mTimeStart == 0 &&
mTimeEnd == 0;
}
int64_t mOffsetStart, mOffsetEnd; // in bytes.
int64_t mTimeStart, mTimeEnd; // in usecs.
};
- nsresult GetSeekRanges(nsTArray<SeekRange>& aRanges);
- SeekRange SelectSeekRange(const nsTArray<SeekRange>& ranges,
+ nsresult GetSeekRanges(TrackInfo::TrackType aType, nsTArray<SeekRange>& aRanges);
+ SeekRange SelectSeekRange(TrackInfo::TrackType aType,
+ const nsTArray<SeekRange>& ranges,
int64_t aTarget,
int64_t aStartTime,
int64_t aEndTime,
bool aExact);
// Seeks to aTarget usecs in the buffered range aRange using bisection search,
// or to the keyframe prior to aTarget if we have video. aAdjustedTarget is
// an adjusted version of the target used to account for Opus pre-roll, if
// necessary. aStartTime must be the presentation time at the start of media,
// and aEndTime the time at end of media. aRanges must be the time/byte ranges
// buffered in the media cache as per GetSeekRanges().
- nsresult SeekInBufferedRange(int64_t aTarget,
+ nsresult SeekInBufferedRange(TrackInfo::TrackType aType,
+ int64_t aTarget,
int64_t aAdjustedTarget,
int64_t aStartTime,
int64_t aEndTime,
const nsTArray<SeekRange>& aRanges,
const SeekRange& aRange);
// Seeks to before aTarget usecs in media using bisection search. If the media
// has video, this will seek to before the keyframe required to render the
// media at aTarget. Will use aRanges in order to narrow the bisection
// search space. aStartTime must be the presentation time at the start of
// media, and aEndTime the time at end of media. aRanges must be the time/byte
// ranges buffered in the media cache as per GetSeekRanges().
- nsresult SeekInUnbuffered(int64_t aTarget,
+ nsresult SeekInUnbuffered(TrackInfo::TrackType aType,
+ int64_t aTarget,
int64_t aStartTime,
int64_t aEndTime,
const nsTArray<SeekRange>& aRanges);
// Performs a seek bisection to move the media stream's read cursor to the
// last ogg page boundary which has end time before aTarget usecs on both the
// Theora and Vorbis bitstreams. Limits its search to data inside aRange;
// i.e. it will only read inside of the aRange's start and end offsets.
// aFuzz is the number of usecs of leniency we'll allow; we'll terminate the
// seek when we land in the range (aTime - aFuzz, aTime) usecs.
- nsresult SeekBisection(int64_t aTarget,
+ nsresult SeekBisection(TrackInfo::TrackType aType,
+ int64_t aTarget,
const SeekRange& aRange,
uint32_t aFuzz);
// Chunk size to read when reading Ogg files. Average Ogg page length
// is about 4300 bytes, so we read the file in chunks larger than that.
static const int PAGE_STEP = 8192;
enum PageSyncResult {
@@ -156,50 +161,47 @@ private:
int64_t aOffset,
int64_t aEndOffset,
ogg_page* aPage,
int& aSkippedBytes);
// Demux next Ogg packet
ogg_packet* GetNextPacket(TrackInfo::TrackType aType);
- nsresult ResetTrackState(TrackInfo::TrackType aType);
-
- nsresult Reset();
+ nsresult Reset(TrackInfo::TrackType aType);
static const nsString GetKind(const nsCString& aRole);
static void InitTrack(MessageField* aMsgInfo,
TrackInfo* aInfo,
bool aEnable);
// Really private!
~OggDemuxer();
- void Cleanup();
// Read enough of the file to identify track information and header
// packets necessary for decoding to begin.
nsresult ReadMetadata();
// Read a page of data from the Ogg file. Returns true if a page has been
// read, false if the page read failed or end of file reached.
- bool ReadOggPage(ogg_page* aPage);
+ bool ReadOggPage(TrackInfo::TrackType aType, ogg_page* aPage);
// Send a page off to the individual streams it belongs to.
// Reconstructed packets, if any are ready, will be available
// on the individual OggCodecStates.
- nsresult DemuxOggPage(ogg_page* aPage);
+ nsresult DemuxOggPage(TrackInfo::TrackType aType, ogg_page* aPage);
// Read data and demux until a packet is available on the given stream state
- void DemuxUntilPacketAvailable(OggCodecState* aState);
+ void DemuxUntilPacketAvailable(TrackInfo::TrackType aType, OggCodecState* aState);
// Reads and decodes header packets for aState, until either header decode
// fails, or is complete. Initializes the codec state before returning.
// Returns true if reading headers and initializtion of the stream
// succeeds.
- bool ReadHeaders(OggCodecState* aState, OggHeaders& aHeaders);
+ bool ReadHeaders(TrackInfo::TrackType aType, OggCodecState* aState, OggHeaders& aHeaders);
// Reads the next link in the chain.
bool ReadOggChain();
// Set this media as being a chain and notifies the state machine that the
// media is no longer seekable.
void SetChained();
@@ -215,33 +217,34 @@ private:
void SetupMediaTracksInfo(const nsTArray<uint32_t>& aSerials);
void FillTags(TrackInfo* aInfo, MetadataTags* aTags);
// Compute an ogg page's checksum
ogg_uint32_t GetPageChecksum(ogg_page* aPage);
// Get the end time of aEndOffset. This is the playback position we'd reach
// after playback finished at aEndOffset.
- int64_t RangeEndTime(int64_t aEndOffset);
+ int64_t RangeEndTime(TrackInfo::TrackType aType, int64_t aEndOffset);
// Get the end time of aEndOffset, without reading before aStartOffset.
// This is the playback position we'd reach after playback finished at
// aEndOffset. If bool aCachedDataOnly is true, then we'll only read
// from data which is cached in the media cached, otherwise we'll do
// regular blocking reads from the media stream. If bool aCachedDataOnly
// is true, this can safely be called on the main thread, otherwise it
// must be called on the state machine thread.
- int64_t RangeEndTime(int64_t aStartOffset,
+ int64_t RangeEndTime(TrackInfo::TrackType aType,
+ int64_t aStartOffset,
int64_t aEndOffset,
bool aCachedDataOnly);
// Get the start time of the range beginning at aOffset. This is the start
- // time of the first frame and or audio sample we'd be able to play if we
+ // time of the first aType sample we'd be able to play if we
// started playback at aOffset.
- int64_t RangeStartTime(int64_t aOffset);
+ int64_t RangeStartTime(TrackInfo::TrackType aType, int64_t aOffset);
MediaInfo mInfo;
nsTArray<RefPtr<OggTrackDemuxer>> mDemuxers;
// Map of codec-specific bitstream states.
OggCodecStore mCodecStore;
@@ -251,27 +254,39 @@ private:
// Decode state of the Vorbis bitstream we're decoding, if we have audio.
VorbisState* mVorbisState;
// Decode state of the Opus bitstream we're decoding, if we have one.
OpusState* mOpusState;
// Get the bitstream decode state for the given track type
OggCodecState* GetTrackCodecState(TrackInfo::TrackType aType) const;
+ TrackInfo::TrackType GetCodecStateType(OggCodecState* aState) const;
// Represents the user pref media.opus.enabled at the time our
// contructor was called. We can't check it dynamically because
// we're not on the main thread;
bool mOpusEnabled;
// Decode state of the Skeleton bitstream.
SkeletonState* mSkeletonState;
// Ogg decoding state.
- ogg_sync_state mOggState;
+ struct OggStateContext {
+ explicit OggStateContext(MediaResource* aResource) : mResource(aResource) {}
+ nsAutoOggSyncState mOggState;
+ MediaResourceIndex mResource;
+ Maybe<media::TimeUnit> mStartTime;
+ };
+
+ ogg_sync_state* OggState(TrackInfo::TrackType aType);
+ MediaResourceIndex* Resource(TrackInfo::TrackType aType);
+ MediaResourceIndex* CommonResource();
+ OggStateContext mAudioOggState;
+ OggStateContext mVideoOggState;
// Vorbis/Opus/Theora data used to compute timestamps. This is written on the
// decoder thread and read on the main thread. All reading on the main
// thread must be done after metadataloaded. We can't use the existing
// data in the codec states due to threading issues. You must check the
// associated mTheoraState or mVorbisState pointer is non-null before
// using this codec data.
uint32_t mVorbisSerial;
@@ -285,30 +300,30 @@ private:
// Booleans to indicate if we have audio and/or video data
bool HasVideo() const;
bool HasAudio() const;
bool HasSkeleton() const {
return mSkeletonState != 0 && mSkeletonState->mActive;
}
bool HaveStartTime () const;
+ bool HaveStartTime (TrackInfo::TrackType aType);
int64_t StartTime() const;
+ int64_t StartTime(TrackInfo::TrackType aType);
// The picture region inside Theora frame to be displayed, if we have
// a Theora video track.
nsIntRect mPicture;
// True if we are decoding a chained ogg.
bool mIsChained;
// Number of audio frames decoded so far.
int64_t mDecodedAudioFrames;
- MediaResourceIndex mResource;
-
friend class OggTrackDemuxer;
};
class OggTrackDemuxer : public MediaTrackDemuxer
{
public:
OggTrackDemuxer(OggDemuxer* aParent,
TrackInfo::TrackType aType,