Bug 1246051 - have MediaQueue<T>::Peek/PeekFront return a RefPtr<> to avoid dangling pointers per comment 0. r=gerald.
MozReview-Commit-ID: JhPVlqMWsTu
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -424,17 +424,17 @@ MediaDecoderStateMachine::GetDecodedAudi
}
void MediaDecoderStateMachine::DiscardStreamData()
{
MOZ_ASSERT(OnTaskQueue());
const auto clockTime = GetClock();
while (true) {
- const MediaData* a = AudioQueue().PeekFront();
+ RefPtr<MediaData> a = AudioQueue().PeekFront();
// If we discard audio samples fed to the stream immediately, we will
// keep decoding audio samples till the end and consume a lot of memory.
// Therefore we only discard those behind the stream clock to throttle
// the decoding speed.
// Note we don't discard a sample when |a->mTime == clockTime| because that
// will discard the 1st sample when clockTime is still 0.
if (a && a->mTime < clockTime) {
@@ -1972,17 +1972,17 @@ MediaDecoderStateMachine::SeekCompleted(
int64_t seekTime = mSeekTask->GetSeekJob().mTarget.GetTime().ToMicroseconds();
int64_t newCurrentTime = seekTime;
// Setup timestamp state.
RefPtr<MediaData> video = VideoQueue().PeekFront();
if (seekTime == Duration().ToMicroseconds()) {
newCurrentTime = seekTime;
} else if (HasAudio()) {
- MediaData* audio = AudioQueue().PeekFront();
+ RefPtr<MediaData> audio = AudioQueue().PeekFront();
// Though we adjust the newCurrentTime in audio-based, and supplemented
// by video. For better UX, should NOT bind the slide position to
// the first audio data timestamp directly.
// While seeking to a position where there's only either audio or video, or
// seeking to a position lies before audio or video, we need to check if
// seekTime is bounded in suitable duration. See Bug 1112438.
int64_t audioStart = audio ? audio->mTime : seekTime;
// We only pin the seek time to the video start time if the video frame
--- a/dom/media/MediaQueue.h
+++ b/dom/media/MediaQueue.h
@@ -60,22 +60,22 @@ public:
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
RefPtr<T> rv = dont_AddRef(static_cast<T*>(nsDeque::PopFront()));
if (rv) {
mPopEvent.Notify(rv);
}
return rv.forget();
}
- inline T* Peek() {
+ inline RefPtr<T> Peek() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
return static_cast<T*>(nsDeque::Peek());
}
- inline T* PeekFront() {
+ inline RefPtr<T> PeekFront() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
return static_cast<T*>(nsDeque::PeekFront());
}
void Reset() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
while (GetSize() > 0) {
RefPtr<T> x = dont_AddRef(static_cast<T*>(nsDeque::PopFront()));
@@ -104,18 +104,18 @@ public:
}
// Returns the approximate number of microseconds of items in the queue.
int64_t Duration() {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
if (GetSize() == 0) {
return 0;
}
- T* last = Peek();
- T* first = PeekFront();
+ T* last = static_cast<T*>(nsDeque::Peek());
+ T* first = static_cast<T*>(nsDeque::PeekFront());
return last->GetEndTime() - first->mTime;
}
void LockedForEach(nsDequeFunctor& aFunctor) const {
ReentrantMonitorAutoEnter mon(mReentrantMonitor);
ForEach(aFunctor);
}
--- a/dom/media/mediasink/DecodedAudioDataSink.cpp
+++ b/dom/media/mediasink/DecodedAudioDataSink.cpp
@@ -218,17 +218,17 @@ DecodedAudioDataSink::PopFrames(uint32_t
};
while (!mCurrentData) {
// No data in the queue. Return an empty chunk.
if (AudioQueue().GetSize() == 0) {
return MakeUnique<Chunk>();
}
- AudioData* a = AudioQueue().PeekFront()->As<AudioData>();
+ RefPtr<AudioData> a = AudioQueue().PeekFront()->As<AudioData>();
// Ignore the element with 0 frames and try next.
if (a->mFrames == 0) {
RefPtr<MediaData> releaseMe = AudioQueue().PopFront();
continue;
}
// Ignore invalid samples.
--- a/dom/media/mediasink/VideoSink.cpp
+++ b/dom/media/mediasink/VideoSink.cpp
@@ -378,17 +378,17 @@ VideoSink::UpdateRenderedVideoFrames()
// the current frame.
NS_ASSERTION(clockTime >= 0, "Should have positive clock time.");
int64_t remainingTime = -1;
if (VideoQueue().GetSize() > 0) {
RefPtr<MediaData> currentFrame = VideoQueue().PopFront();
int32_t framesRemoved = 0;
while (VideoQueue().GetSize() > 0) {
- MediaData* nextFrame = VideoQueue().PeekFront();
+ RefPtr<MediaData> nextFrame = VideoQueue().PeekFront();
if (nextFrame->mTime > clockTime) {
remainingTime = nextFrame->mTime - clockTime;
break;
}
++framesRemoved;
if (!currentFrame->As<VideoData>()->mSentToCompositor) {
mFrameStats.NotifyDecodedFrames(0, 0, 1);
VSINK_LOG_V("discarding video frame mTime=%lld clock_time=%lld",
--- a/dom/media/ogg/OggReader.cpp
+++ b/dom/media/ogg/OggReader.cpp
@@ -1353,17 +1353,17 @@ nsresult OggReader::SeekInBufferedRange(
bool skip = false;
eof = !DecodeVideoFrame(skip, 0);
if (mDecoder->IsOggDecoderShutdown()) {
return NS_ERROR_FAILURE;
}
} while (!eof &&
mVideoQueue.GetSize() == 0);
- VideoData* video = mVideoQueue.PeekFront();
+ RefPtr<VideoData> video = mVideoQueue.PeekFront();
if (video && !video->mKeyframe) {
// First decoded frame isn't a keyframe, seek back to previous keyframe,
// otherwise we'll get visual artifacts.
NS_ASSERTION(video->mTimecode != -1, "Must have a granulepos");
int shift = mTheoraState->mInfo.keyframe_granule_shift;
int64_t keyframeGranulepos = (video->mTimecode >> shift) << shift;
int64_t keyframeTime = mTheoraState->StartTime(keyframeGranulepos);
SEEK_LOG(LogLevel::Debug, ("Keyframe for %lld is at %lld, seeking back to it",
@@ -1485,17 +1485,17 @@ nsresult OggReader::SeekInternal(int64_t
// as although the seek should finish on a page containing a keyframe,
// there may be non-keyframes in the page before the keyframe.
// When doing fastSeek we display the first frame after the seek, so
// we need to advance the decode to the keyframe otherwise we'll get
// visual artifacts in the first frame output after the seek.
// First, we must check to see if there's already a keyframe in the frames
// that we may have already decoded, and discard frames up to the
// keyframe.
- VideoData* v;
+ RefPtr<VideoData> v;
while ((v = mVideoQueue.PeekFront()) && !v->mKeyframe) {
RefPtr<VideoData> releaseMe = mVideoQueue.PopFront();
}
if (mVideoQueue.GetSize() == 0) {
// We didn't find a keyframe in the frames already here, so decode
// forwards until we find a keyframe.
bool skip = true;
while (DecodeVideoFrame(skip, 0) && skip) {
@@ -1954,80 +1954,75 @@ media::TimeIntervals OggReader::GetBuffe
}
}
}
return buffered;
#endif
}
-VideoData* OggReader::FindStartTime(int64_t& aOutStartTime)
+void OggReader::FindStartTime(int64_t& aOutStartTime)
{
MOZ_ASSERT(OnTaskQueue());
// Extract the start times of the bitstreams in order to calculate
// the duration.
int64_t videoStartTime = INT64_MAX;
int64_t audioStartTime = INT64_MAX;
- VideoData* videoData = nullptr;
if (HasVideo()) {
- videoData = SyncDecodeToFirstVideoData();
+ RefPtr<VideoData> videoData = SyncDecodeToFirstVideoData();
if (videoData) {
videoStartTime = videoData->mTime;
LOG(LogLevel::Debug, ("OggReader::FindStartTime() video=%lld", videoStartTime));
}
}
if (HasAudio()) {
- AudioData* audioData = SyncDecodeToFirstAudioData();
+ RefPtr<AudioData> audioData = SyncDecodeToFirstAudioData();
if (audioData) {
audioStartTime = audioData->mTime;
LOG(LogLevel::Debug, ("OggReader::FindStartTime() audio=%lld", audioStartTime));
}
}
int64_t startTime = std::min(videoStartTime, audioStartTime);
if (startTime != INT64_MAX) {
aOutStartTime = startTime;
}
-
- return videoData;
}
-AudioData* OggReader::SyncDecodeToFirstAudioData()
+RefPtr<AudioData> OggReader::SyncDecodeToFirstAudioData()
{
bool eof = false;
while (!eof && AudioQueue().GetSize() == 0) {
if (mDecoder->IsOggDecoderShutdown()) {
return nullptr;
}
eof = !DecodeAudioData();
}
if (eof) {
AudioQueue().Finish();
}
- AudioData* d = nullptr;
- return (d = AudioQueue().PeekFront()) ? d : nullptr;
+ return AudioQueue().PeekFront();
}
-VideoData* OggReader::SyncDecodeToFirstVideoData()
+RefPtr<VideoData> OggReader::SyncDecodeToFirstVideoData()
{
bool eof = false;
while (!eof && VideoQueue().GetSize() == 0) {
if (mDecoder->IsOggDecoderShutdown()) {
return nullptr;
}
bool keyframeSkip = false;
eof = !DecodeVideoFrame(keyframeSkip, 0);
}
if (eof) {
VideoQueue().Finish();
}
- VideoData* d = nullptr;
- return (d = VideoQueue().PeekFront()) ? d : nullptr;
+ return VideoQueue().PeekFront();
}
OggCodecStore::OggCodecStore()
: mMonitor("CodecStore")
{
}
void OggCodecStore::Add(uint32_t serial, OggCodecState* codecState)
--- a/dom/media/ogg/OggReader.h
+++ b/dom/media/ogg/OggReader.h
@@ -72,19 +72,19 @@ private:
bool HasVideo() {
return mTheoraState != 0 && mTheoraState->mActive;
}
// TODO: DEPRECATED. This uses synchronous decoding.
// Stores the presentation time of the first frame we'd be able to play if
// we started playback at the current position. Returns the first video
// frame, if we have video.
- VideoData* FindStartTime(int64_t& aOutStartTime);
- AudioData* SyncDecodeToFirstAudioData();
- VideoData* SyncDecodeToFirstVideoData();
+ void FindStartTime(int64_t& aOutStartTime);
+ RefPtr<AudioData> SyncDecodeToFirstAudioData();
+ RefPtr<VideoData> SyncDecodeToFirstVideoData();
// This monitor should be taken when reading or writing to mIsChained.
ReentrantMonitor mMonitor;
// Specialized Reset() method to signal if the seek is
// to the start of the stream.
nsresult ResetDecode(bool start);