Bug 1362910. P2 - fix callers.
Since RefPtr<T>&& can't be converted to T* implicitly, we need to change
T* to RefPtr<T> in resolve/reject callbacks to make it compile again.
We should review the changes later to look for the opportunity to
optimize away unnecessary AddRef/Release pairs.
MozReview-Commit-ID: 22rHQ8dhxJv
--- a/dom/media/MediaDecoderReaderWrapper.cpp
+++ b/dom/media/MediaDecoderReaderWrapper.cpp
@@ -47,19 +47,19 @@ MediaDecoderReaderWrapper::RequestAudioD
{
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
MOZ_ASSERT(!mShutdown);
int64_t startTime = StartTime().ToMicroseconds();
return InvokeAsync(mReader->OwnerThread(), mReader.get(),
__func__, &MediaDecoderReader::RequestAudioData)
->Then(mOwnerThread, __func__,
- [startTime] (AudioData* aAudio) {
+ [startTime] (RefPtr<AudioData> aAudio) {
aAudio->AdjustForStartTime(startTime);
- return AudioDataPromise::CreateAndResolve(aAudio, __func__);
+ return AudioDataPromise::CreateAndResolve(aAudio.forget(), __func__);
},
[] (const MediaResult& aError) {
return AudioDataPromise::CreateAndReject(aError, __func__);
});
}
RefPtr<MediaDecoderReaderWrapper::VideoDataPromise>
MediaDecoderReaderWrapper::RequestVideoData(bool aSkipToNextKeyframe,
@@ -73,19 +73,19 @@ MediaDecoderReaderWrapper::RequestVideoD
}
int64_t startTime = StartTime().ToMicroseconds();
return InvokeAsync(
mReader->OwnerThread(), mReader.get(), __func__,
&MediaDecoderReader::RequestVideoData,
aSkipToNextKeyframe, aTimeThreshold)
->Then(mOwnerThread, __func__,
- [startTime] (VideoData* aVideo) {
+ [startTime] (RefPtr<VideoData> aVideo) {
aVideo->AdjustForStartTime(startTime);
- return VideoDataPromise::CreateAndResolve(aVideo, __func__);
+ return VideoDataPromise::CreateAndResolve(aVideo.forget(), __func__);
},
[] (const MediaResult& aError) {
return VideoDataPromise::CreateAndReject(aError, __func__);
});
}
RefPtr<MediaDecoderReader::SeekPromise>
MediaDecoderReaderWrapper::Seek(const SeekTarget& aTarget)
@@ -133,28 +133,28 @@ MediaDecoderReaderWrapper::Shutdown()
{
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
mShutdown = true;
return InvokeAsync(mReader->OwnerThread(), mReader.get(), __func__,
&MediaDecoderReader::Shutdown);
}
RefPtr<MediaDecoderReaderWrapper::MetadataPromise>
-MediaDecoderReaderWrapper::OnMetadataRead(MetadataHolder* aMetadata)
+MediaDecoderReaderWrapper::OnMetadataRead(RefPtr<MetadataHolder> aMetadata)
{
MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
if (mShutdown) {
return MetadataPromise::CreateAndReject(
NS_ERROR_DOM_MEDIA_ABORT_ERR, __func__);
}
if (mStartTime.isNothing()) {
mStartTime.emplace(aMetadata->mInfo.mStartTime);
}
- return MetadataPromise::CreateAndResolve(aMetadata, __func__);
+ return MetadataPromise::CreateAndResolve(aMetadata.forget(), __func__);
}
RefPtr<MediaDecoderReaderWrapper::MetadataPromise>
MediaDecoderReaderWrapper::OnMetadataNotRead(const MediaResult& aError)
{
return MetadataPromise::CreateAndReject(aError, __func__);
}
--- a/dom/media/MediaDecoderReaderWrapper.h
+++ b/dom/media/MediaDecoderReaderWrapper.h
@@ -85,17 +85,17 @@ public:
}
void SetCDMProxy(CDMProxy* aProxy) { mReader->SetCDMProxy(aProxy); }
void SetVideoBlankDecode(bool aIsBlankDecode);
private:
~MediaDecoderReaderWrapper();
- RefPtr<MetadataPromise> OnMetadataRead(MetadataHolder* aMetadata);
+ RefPtr<MetadataPromise> OnMetadataRead(RefPtr<MetadataHolder> aMetadata);
RefPtr<MetadataPromise> OnMetadataNotRead(const MediaResult& aError);
const RefPtr<AbstractThread> mOwnerThread;
const RefPtr<MediaDecoderReader> mReader;
bool mShutdown = false;
Maybe<media::TimeUnit> mStartTime;
};
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -355,17 +355,17 @@ public:
// Set mode to METADATA since we are about to read metadata.
Resource()->SetReadMode(MediaCacheStream::MODE_METADATA);
// We disconnect mMetadataRequest in Exit() so it is fine to capture
// a raw pointer here.
Reader()->ReadMetadata()
->Then(OwnerThread(), __func__,
- [this] (MetadataHolder* aMetadata) {
+ [this] (RefPtr<MetadataHolder> aMetadata) {
OnMetadataRead(aMetadata);
},
[this] (const MediaResult& aError) {
OnMetadataNotRead(aError);
})
->Track(mMetadataRequest);
}
@@ -3229,17 +3229,17 @@ MediaDecoderStateMachine::RequestAudioDa
MOZ_ASSERT(!IsRequestingAudioData());
MOZ_ASSERT(!IsWaitingAudioData());
LOGV("Queueing audio task - queued=%" PRIuSIZE ", decoder-queued=%" PRIuSIZE,
AudioQueue().GetSize(), mReader->SizeOfAudioQueueInFrames());
RefPtr<MediaDecoderStateMachine> self = this;
mReader->RequestAudioData()->Then(
OwnerThread(), __func__,
- [this, self] (AudioData* aAudio) {
+ [this, self] (RefPtr<AudioData> aAudio) {
MOZ_ASSERT(aAudio);
mAudioDataRequest.Complete();
// audio->GetEndTime() is not always mono-increasing in chained ogg.
mDecodedAudioEndTime = std::max(
aAudio->GetEndTime(), mDecodedAudioEndTime);
LOGV("OnAudioDecoded [%" PRId64 ",%" PRId64 "]",
aAudio->mTime.ToMicroseconds(),
aAudio->GetEndTime().ToMicroseconds());
@@ -3276,17 +3276,17 @@ MediaDecoderStateMachine::RequestVideoDa
", skip=%i, time=%" PRId64,
VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames(),
aSkipToNextKeyframe, aCurrentTime.ToMicroseconds());
TimeStamp videoDecodeStartTime = TimeStamp::Now();
RefPtr<MediaDecoderStateMachine> self = this;
mReader->RequestVideoData(aSkipToNextKeyframe, aCurrentTime)->Then(
OwnerThread(), __func__,
- [this, self, videoDecodeStartTime] (VideoData* aVideo) {
+ [this, self, videoDecodeStartTime] (RefPtr<VideoData> aVideo) {
MOZ_ASSERT(aVideo);
mVideoDataRequest.Complete();
// Handle abnormal or negative timestamps.
mDecodedVideoEndTime = std::max(
mDecodedVideoEndTime, aVideo->GetEndTime());
LOGV("OnVideoDecoded [%" PRId64 ",%" PRId64 "]",
aVideo->mTime.ToMicroseconds(),
aVideo->GetEndTime().ToMicroseconds());
--- a/dom/media/MediaFormatReader.cpp
+++ b/dom/media/MediaFormatReader.cpp
@@ -289,17 +289,17 @@ LocalAllocPolicy::ProcessRequest()
return;
}
RefPtr<AutoDeallocToken> token = new AutoDeallocToken(this);
RefPtr<LocalAllocPolicy> self = this;
GlobalAllocPolicy::Instance(mTrack).Alloc()->Then(
mOwnerThread, __func__,
- [self, token](Token* aToken) {
+ [self, token](RefPtr<Token> aToken) {
self->mTokenRequest.Complete();
token->Append(aToken);
self->mPendingPromise.Resolve(token, __func__);
},
[self, token]() {
self->mTokenRequest.Complete();
self->mPendingPromise.Reject(true, __func__);
})->Track(mTokenRequest);
@@ -583,19 +583,19 @@ private:
void
MediaFormatReader::DecoderFactory::RunStage(Data& aData)
{
switch (aData.mStage) {
case Stage::None: {
MOZ_ASSERT(!aData.mToken);
aData.mPolicy->Alloc()->Then(
mOwner->OwnerThread(), __func__,
- [this, &aData] (Token* aToken) {
+ [this, &aData] (RefPtr<Token> aToken) {
aData.mTokenRequest.Complete();
- aData.mToken = aToken;
+ aData.mToken = aToken.forget();
aData.mStage = Stage::CreateDecoder;
RunStage(aData);
},
[&aData] () {
aData.mTokenRequest.Complete();
aData.mStage = Stage::None;
})->Track(aData.mTokenRequest);
aData.mStage = Stage::WaitForToken;
--- a/dom/media/android/AndroidMediaReader.cpp
+++ b/dom/media/android/AndroidMediaReader.cpp
@@ -314,17 +314,17 @@ AndroidMediaReader::Seek(const SeekTarge
// a sync point, whereas for video there are only keyframes once every few
// seconds. So if we have both audio and video, we must seek the video
// stream to the preceeding keyframe first, get the stream time, and then
// seek the audio stream to match the video stream's time. Otherwise, the
// audio and video streams won't be in sync after the seek.
mVideoSeekTimeUs = aTarget.GetTime().ToMicroseconds();
RefPtr<AndroidMediaReader> self = this;
- DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (MediaData* v) {
+ DecodeToFirstVideoData()->Then(OwnerThread(), __func__, [self] (RefPtr<MediaData> v) {
self->mSeekRequest.Complete();
self->mAudioSeekTimeUs = v->mTime.ToMicroseconds();
self->mSeekPromise.Resolve(media::TimeUnit::FromMicroseconds(self->mAudioSeekTimeUs), __func__);
}, [self, aTarget] () {
self->mSeekRequest.Complete();
self->mAudioSeekTimeUs = aTarget.GetTime().ToMicroseconds();
self->mSeekPromise.Resolve(aTarget.GetTime(), __func__);
})->Track(mSeekRequest);
--- a/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
+++ b/dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
@@ -54,34 +54,34 @@ public:
MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
MOZ_RELEASE_ASSERT(mDecrypts.Count() == 0,
"Can only process one sample at a time");
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
RefPtr<EMEDecryptor> self = this;
mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
->Then(mTaskQueue, __func__,
- [self, this](MediaRawData* aSample) {
+ [self, this](RefPtr<MediaRawData> aSample) {
mKeyRequest.Complete();
ThrottleDecode(aSample);
},
[self, this]() {
mKeyRequest.Complete();
})
->Track(mKeyRequest);
return p;
}
void ThrottleDecode(MediaRawData* aSample)
{
RefPtr<EMEDecryptor> self = this;
mThroughputLimiter.Throttle(aSample)
->Then(mTaskQueue, __func__,
- [self, this] (MediaRawData* aSample) {
+ [self, this] (RefPtr<MediaRawData> aSample) {
mThrottleRequest.Complete();
AttemptDecode(aSample);
},
[self, this]() {
mThrottleRequest.Complete();
})
->Track(mThrottleRequest);
}
@@ -265,17 +265,17 @@ EMEMediaDataDecoderProxy::EMEMediaDataDe
RefPtr<MediaDataDecoder::DecodePromise>
EMEMediaDataDecoderProxy::Decode(MediaRawData* aSample)
{
RefPtr<DecodePromise> p = mDecodePromise.Ensure(__func__);
RefPtr<EMEMediaDataDecoderProxy> self = this;
mSamplesWaitingForKey->WaitIfKeyNotUsable(aSample)
->Then(mTaskQueue, __func__,
- [self, this](MediaRawData* aSample) {
+ [self, this](RefPtr<MediaRawData> aSample) {
mKeyRequest.Complete();
nsAutoPtr<MediaRawDataWriter> writer(aSample->CreateWriter());
mProxy->GetSessionIdsForKeyId(aSample->mCrypto.mKeyId,
writer->mCrypto.mSessionIds);
MediaDataDecoderProxy::Decode(aSample)
->Then(mTaskQueue, __func__,
[self, this](const DecodedData& aResults) {
--- a/dom/media/webaudio/MediaBufferDecoder.cpp
+++ b/dom/media/webaudio/MediaBufferDecoder.cpp
@@ -109,20 +109,20 @@ private:
nsCOMPtr<nsIRunnable> event =
new ReportResultTask(mDecodeJob, &WebAudioDecodeJob::OnFailure, aErrorCode);
mMainThread->Dispatch(event.forget());
}
}
void Decode();
- void OnMetadataRead(MetadataHolder* aMetadata);
+ void OnMetadataRead(RefPtr<MetadataHolder> aMetadata);
void OnMetadataNotRead(const MediaResult& aError);
void RequestSample();
- void SampleDecoded(AudioData* aData);
+ void SampleDecoded(RefPtr<AudioData> aData);
void SampleNotDecoded(const MediaResult& aError);
void FinishDecode();
void AllocateBuffer();
void CallbackTheResult();
void Cleanup()
{
MOZ_ASSERT(NS_IsMainThread());
@@ -264,17 +264,17 @@ MediaDecodeTask::Decode()
mDecoderReader->SetIgnoreAudioOutputFormat();
mDecoderReader->AsyncReadMetadata()->Then(mDecoderReader->OwnerThread(), __func__, this,
&MediaDecodeTask::OnMetadataRead,
&MediaDecodeTask::OnMetadataNotRead);
}
void
-MediaDecodeTask::OnMetadataRead(MetadataHolder* aMetadata)
+MediaDecodeTask::OnMetadataRead(RefPtr<MetadataHolder> aMetadata)
{
mMediaInfo = aMetadata->mInfo;
if (!mMediaInfo.HasAudio()) {
mDecoderReader->Shutdown();
ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio);
return;
}
@@ -311,17 +311,17 @@ void
MediaDecodeTask::RequestSample()
{
mDecoderReader->RequestAudioData()->Then(mDecoderReader->OwnerThread(), __func__, this,
&MediaDecodeTask::SampleDecoded,
&MediaDecodeTask::SampleNotDecoded);
}
void
-MediaDecodeTask::SampleDecoded(AudioData* aData)
+MediaDecodeTask::SampleDecoded(RefPtr<AudioData> aData)
{
MOZ_ASSERT(!NS_IsMainThread());
mAudioQueue.Push(aData);
if (!mFirstFrameDecoded) {
mDecoderReader->ReadUpdatedMetadata(&mMediaInfo);
mFirstFrameDecoded = true;
}
RequestSample();