Bug 1350814 - Replace use of int64_t for microseconds by TimeUnit in AudioSink.
MozReview-Commit-ID: 3diOpJu7g8i
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -2695,17 +2695,18 @@ MediaDecoderStateMachine::AudioAudibleCh
media::MediaSink*
MediaDecoderStateMachine::CreateAudioSink()
{
RefPtr<MediaDecoderStateMachine> self = this;
auto audioSinkCreator = [self] () {
MOZ_ASSERT(self->OnTaskQueue());
AudioSink* audioSink = new AudioSink(
- self->mTaskQueue, self->mAudioQueue, self->GetMediaTime(),
+ self->mTaskQueue, self->mAudioQueue,
+ TimeUnit::FromMicroseconds(self->GetMediaTime()),
self->Info().mAudio, self->mAudioChannel);
self->mAudibleListener = audioSink->AudibleEvent().Connect(
self->mTaskQueue, self.get(),
&MediaDecoderStateMachine::AudioAudibleChanged);
return audioSink;
};
return new AudioSinkWrapper(mTaskQueue, audioSinkCreator);
--- a/dom/media/mediasink/AudioSink.cpp
+++ b/dom/media/mediasink/AudioSink.cpp
@@ -28,32 +28,30 @@ namespace media {
// The amount of audio frames that is used to fuzz rounding errors.
static const int64_t AUDIO_FUZZ_FRAMES = 1;
// Amount of audio frames we will be processing ahead of use
static const int32_t LOW_AUDIO_USECS = 300000;
AudioSink::AudioSink(AbstractThread* aThread,
MediaQueue<AudioData>& aAudioQueue,
- int64_t aStartTime,
+ TimeUnit aStartTime,
const AudioInfo& aInfo,
dom::AudioChannel aChannel)
: mStartTime(aStartTime)
- , mLastGoodPosition(0)
, mInfo(aInfo)
, mChannel(aChannel)
, mPlaying(true)
, mMonitor("AudioSink")
, mWritten(0)
, mErrored(false)
, mPlaybackComplete(false)
, mOwnerThread(aThread)
, mProcessedQueueLength(0)
, mFramesParsed(0)
- , mLastEndTime(0)
, mIsAudioDataAudible(false)
, mAudioQueue(aAudioQueue)
{
bool resampling = MediaPrefs::AudioSinkResampling();
if (resampling) {
mOutputRate = MediaPrefs::AudioSinkResampleRate();
} else if (mInfo.mRate == 44100 || mInfo.mRate == 48000) {
@@ -98,22 +96,23 @@ AudioSink::Init(const PlaybackParams& aP
RefPtr<GenericPromise> p = mEndPromise.Ensure(__func__);
nsresult rv = InitializeAudioStream(aParams);
if (NS_FAILED(rv)) {
mEndPromise.Reject(rv, __func__);
}
return p;
}
-int64_t
+TimeUnit
AudioSink::GetPosition()
{
- int64_t pos;
+ int64_t tmp;
if (mAudioStream &&
- (pos = mAudioStream->GetPosition()) >= 0) {
+ (tmp = mAudioStream->GetPosition()) >= 0) {
+ TimeUnit pos = TimeUnit::FromMicroseconds(tmp);
NS_ASSERTION(pos >= mLastGoodPosition,
"AudioStream position shouldn't go backward");
// Update the last good position when we got a good one.
if (pos >= mLastGoodPosition) {
mLastGoodPosition = pos;
}
}
@@ -216,32 +215,32 @@ AudioSink::InitializeAudioStream(const P
mAudioStream->SetVolume(aParams.mVolume);
mAudioStream->SetPlaybackRate(aParams.mPlaybackRate);
mAudioStream->SetPreservesPitch(aParams.mPreservesPitch);
mAudioStream->Start();
return NS_OK;
}
-int64_t
+TimeUnit
AudioSink::GetEndTime() const
{
int64_t written;
{
MonitorAutoLock mon(mMonitor);
written = mWritten;
}
- CheckedInt64 playedUsecs = FramesToUsecs(written, mOutputRate) + mStartTime;
- if (!playedUsecs.isValid()) {
+ TimeUnit played = FramesToTimeUnit(written, mOutputRate) + mStartTime;
+ if (!played.IsValid()) {
NS_WARNING("Int overflow calculating audio end time");
- return 0;
+ return TimeUnit::Zero();
}
// As we may be resampling, rounding errors may occur. Ensure we never get
// past the original end time.
- return std::min<int64_t>(mLastEndTime, playedUsecs.value());
+ return std::min(mLastEndTime, played);
}
UniquePtr<AudioStream::Chunk>
AudioSink::PopFrames(uint32_t aFrames)
{
class Chunk : public AudioStream::Chunk {
public:
Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
@@ -402,18 +401,18 @@ AudioSink::NotifyAudioNeeded()
AudioConfig(data->mChannels, data->mRate),
AudioConfig(mOutputChannels, mOutputRate));
}
// See if there's a gap in the audio. If there is, push silence into the
// audio hardware, so we can play across the gap.
// Calculate the timestamp of the next chunk of audio in numbers of
// samples.
- CheckedInt64 sampleTime = UsecsToFrames(data->mTime - mStartTime,
- data->mRate);
+ CheckedInt64 sampleTime = TimeUnitToFrames(
+ TimeUnit::FromMicroseconds(data->mTime) - mStartTime, data->mRate);
// Calculate the number of frames that have been pushed onto the audio hardware.
CheckedInt64 missingFrames = sampleTime - mFramesParsed;
if (!missingFrames.isValid()) {
NS_WARNING("Int overflow in AudioSink");
mErrored = true;
return;
}
@@ -445,17 +444,17 @@ AudioSink::NotifyAudioNeeded()
mErrored = true;
return;
}
RefPtr<AudioData> silence = CreateAudioFromBuffer(Move(silenceData), data);
PushProcessedAudio(silence);
}
}
- mLastEndTime = data->GetEndTime();
+ mLastEndTime = TimeUnit::FromMicroseconds(data->GetEndTime());
mFramesParsed += data->mFrames;
if (mConverter->InputConfig() != mConverter->OutputConfig()) {
// We must ensure that the size in the buffer contains exactly the number
// of frames, in case one of the audio producer over allocated the buffer.
AlignedAudioBuffer buffer(Move(data->mAudioData));
buffer.SetLength(size_t(data->mFrames) * data->mChannels);
--- a/dom/media/mediasink/AudioSink.h
+++ b/dom/media/mediasink/AudioSink.h
@@ -27,32 +27,32 @@ class AudioConverter;
namespace media {
class AudioSink : private AudioStream::DataSource {
using PlaybackParams = MediaSink::PlaybackParams;
public:
AudioSink(AbstractThread* aThread,
MediaQueue<AudioData>& aAudioQueue,
- int64_t aStartTime,
+ TimeUnit aStartTime,
const AudioInfo& aInfo,
dom::AudioChannel aChannel);
~AudioSink();
// Return a promise which will be resolved when AudioSink
// finishes playing, or rejected if any error.
RefPtr<GenericPromise> Init(const PlaybackParams& aParams);
/*
* All public functions are not thread-safe.
* Called on the task queue of MDSM only.
*/
- int64_t GetPosition();
- int64_t GetEndTime() const;
+ TimeUnit GetPosition();
+ TimeUnit GetEndTime() const;
// Check whether we've pushed more frames to the audio hardware than it has
// played.
bool HasUnplayedFrames();
// Shut down the AudioSink's resources.
void Shutdown();
@@ -75,25 +75,25 @@ private:
bool Ended() const override;
void Drained() override;
void CheckIsAudible(const AudioData* aData);
// The audio stream resource. Used on the task queue of MDSM only.
RefPtr<AudioStream> mAudioStream;
- // The presentation time of the first audio frame that was played in
- // microseconds. We can add this to the audio stream position to determine
+ // The presentation time of the first audio frame that was played.
+ // We can add this to the audio stream position to determine
// the current audio time.
- const int64_t mStartTime;
+ const TimeUnit mStartTime;
// Keep the last good position returned from the audio stream. Used to ensure
// position returned by GetPosition() is mono-increasing in spite of audio
// stream error. Used on the task queue of MDSM only.
- int64_t mLastGoodPosition;
+ TimeUnit mLastGoodPosition;
const AudioInfo mInfo;
const dom::AudioChannel mChannel;
// Used on the task queue of MDSM only.
bool mPlaying;
@@ -144,17 +144,17 @@ private:
MediaEventListener mAudioQueueListener;
MediaEventListener mAudioQueueFinishListener;
MediaEventListener mProcessedQueueListener;
// Number of frames processed from mAudioQueue. Used to determine gaps in
// the input stream. It indicates the time in frames since playback started
// at the current input framerate.
int64_t mFramesParsed;
Maybe<RefPtr<AudioData>> mLastProcessedPacket;
- int64_t mLastEndTime;
+ TimeUnit mLastEndTime;
// Never modifed after construction.
uint32_t mOutputRate;
uint32_t mOutputChannels;
// True when audio is producing audible sound, false when audio is silent.
bool mIsAudioDataAudible;
MediaEventProducer<bool> mAudibleEvent;
--- a/dom/media/mediasink/AudioSinkWrapper.cpp
+++ b/dom/media/mediasink/AudioSinkWrapper.cpp
@@ -53,17 +53,17 @@ AudioSinkWrapper::OnEnded(TrackType aTyp
}
int64_t
AudioSinkWrapper::GetEndTime(TrackType aType) const
{
AssertOwnerThread();
MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
if (aType == TrackInfo::kAudioTrack && mAudioSink) {
- return mAudioSink->GetEndTime();
+ return mAudioSink->GetEndTime().ToMicroseconds();
}
return 0;
}
int64_t
AudioSinkWrapper::GetVideoPosition(TimeStamp aNow) const
{
AssertOwnerThread();
@@ -80,17 +80,17 @@ AudioSinkWrapper::GetPosition(TimeStamp*
AssertOwnerThread();
MOZ_ASSERT(mIsStarted, "Must be called after playback starts.");
int64_t pos = -1;
TimeStamp t = TimeStamp::Now();
if (!mAudioEnded) {
// Rely on the audio sink to report playback position when it is not ended.
- pos = mAudioSink->GetPosition();
+ pos = mAudioSink->GetPosition().ToMicroseconds();
} else if (!mPlayStartTime.IsNull()) {
// Calculate playback position using system clock if we are still playing.
pos = GetVideoPosition(t);
} else {
// Return how long we've played if we are not playing.
pos = mPlayDuration;
}