Bug 1404997 - P7. Simplify played time calculations. r?pehrsons
Now that the graph rate match the one out of NetEQ, we can remove an unecessary conversion.
Additionally, move a member from the base case to the only one where it's used.
MozReview-Commit-ID: II5mdcl0vhK
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -181,31 +181,32 @@ MediaStreamGraphImpl::ExtractPendingInpu
bool finished;
{
MutexAutoLock lock(aStream->mMutex);
if (aStream->mPullEnabled && !aStream->mFinished &&
!aStream->mListeners.IsEmpty()) {
// Compute how much stream time we'll need assuming we don't block
// the stream at all.
StreamTime t = aStream->GraphTimeToStreamTime(aDesiredUpToTime);
+ StreamTime current = aStream->mTracks.GetEnd();
LOG(LogLevel::Verbose,
("Calling NotifyPull aStream=%p t=%f current end=%f",
aStream,
MediaTimeToSeconds(t),
- MediaTimeToSeconds(aStream->mTracks.GetEnd())));
- if (t > aStream->mTracks.GetEnd()) {
+ MediaTimeToSeconds(current)));
+ if (t > current) {
*aEnsureNextIteration = true;
#ifdef DEBUG
if (aStream->mListeners.Length() == 0) {
LOG(
LogLevel::Error,
("No listeners in NotifyPull aStream=%p desired=%f current end=%f",
aStream,
MediaTimeToSeconds(t),
- MediaTimeToSeconds(aStream->mTracks.GetEnd())));
+ MediaTimeToSeconds(current)));
aStream->DumpTrackInfo();
}
#endif
for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
MediaStreamListener* l = aStream->mListeners[j];
{
MutexAutoUnlock unlock(aStream->mMutex);
l->NotifyPull(this, t);
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -2070,17 +2070,16 @@ private:
};
class GenericReceiveListener : public MediaStreamListener
{
public:
explicit GenericReceiveListener(dom::MediaStreamTrack* track)
: track_(track)
, played_ticks_(0)
- , last_log_(0)
, principal_handle_(PRINCIPAL_HANDLE_NONE)
, listening_(false)
, maybe_track_needs_unmute_(true)
{
MOZ_ASSERT(track->GetInputStream()->AsSourceStream());
}
virtual ~GenericReceiveListener()
@@ -2179,17 +2178,16 @@ public:
void SetPrincipalHandle_msg(const PrincipalHandle& principal_handle)
{
principal_handle_ = principal_handle;
}
protected:
RefPtr<dom::MediaStreamTrack> track_;
TrackTicks played_ticks_;
- TrackTicks last_log_; // played_ticks_ when we last logged
PrincipalHandle principal_handle_;
bool listening_;
Atomic<bool> maybe_track_needs_unmute_;
};
MediaPipelineReceive::MediaPipelineReceive(const std::string& pc,
nsCOMPtr<nsIEventTarget> main_thread,
nsCOMPtr<nsIEventTarget> sts_thread,
@@ -2204,16 +2202,17 @@ MediaPipelineReceive::~MediaPipelineRece
class MediaPipelineReceiveAudio::PipelineListener
: public GenericReceiveListener
{
public:
PipelineListener(dom::MediaStreamTrack* track,
const RefPtr<MediaSessionConduit>& conduit)
: GenericReceiveListener(track)
, conduit_(conduit)
+ , last_log_(0)
{
}
~PipelineListener()
{
if (!NS_IsMainThread()) {
// release conduit on mainthread. Must use forget()!
nsresult rv =
@@ -2234,21 +2233,23 @@ public:
track_->GetInputStream()->AsSourceStream();
MOZ_ASSERT(source);
if (!source) {
CSFLogError(LOGTAG, "NotifyPull() called from a non-SourceMediaStream");
return;
}
TrackRate rate = graph->GraphRate();
- uint32_t samples_per_10ms = rate/100;
+ uint32_t samples_per_10ms = rate / 100;
+ // Determine how many frames we need.
+ // As we get frames from conduit_ at the same rate as the graph's rate,
+ // the number of frames needed straightfully determined.
+ TrackTicks framesNeeded = desired_time - played_ticks_;
- // This comparison is done in total time to avoid accumulated roundoff errors.
- while (source->TicksToTimeRoundDown(rate,
- played_ticks_) < desired_time) {
+ while (framesNeeded >= 0) {
int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
int samples_length;
// This fetches 10ms of data, either mono or stereo
MediaConduitErrorCode err =
static_cast<AudioSessionConduit*>(conduit_.get())
->GetAudioFrame(scratch_buffer,
@@ -2302,16 +2303,17 @@ public:
outputChannels.AppendElements(channels);
segment.AppendFrames(
samples.forget(), outputChannels, frames, principal_handle_);
// Handle track not actually added yet or removed/finished
if (source->AppendToTrack(track_->GetInputTrackId(), &segment)) {
+ framesNeeded -= frames;
played_ticks_ += frames;
if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
if (played_ticks_ > last_log_ + rate) { // ~ 1 second
MOZ_LOG(
AudioLogModule(),
LogLevel::Debug,
("%p: Inserting %zu samples into track %d, total = %" PRIu64,
(void*)this,
@@ -2327,16 +2329,17 @@ public:
// buffer - but don't i-loop!
return;
}
}
}
private:
RefPtr<MediaSessionConduit> conduit_;
+ TrackTicks last_log_; // played_ticks_ when we last logged
};
MediaPipelineReceiveAudio::MediaPipelineReceiveAudio(
const std::string& pc,
nsCOMPtr<nsIEventTarget> main_thread,
nsCOMPtr<nsIEventTarget> sts_thread,
RefPtr<AudioSessionConduit> conduit,
dom::MediaStreamTrack* aTrack)