Bug 1178738 - Have SpeechSynthesis::mSpeechQueue not contain spoken utterance. r?smaug
MozReview-Commit-ID: CyXGDbenWtq
--- a/dom/media/webspeech/synth/SpeechSynthesis.cpp
+++ b/dom/media/webspeech/synth/SpeechSynthesis.cpp
@@ -75,54 +75,61 @@ SpeechSynthesis::~SpeechSynthesis()
}
JSObject*
SpeechSynthesis::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
{
return SpeechSynthesisBinding::Wrap(aCx, this, aGivenProto);
}
+SpeechSynthesisUtterance*
+SpeechSynthesis::CurrentUtterance() const
+{
+ return mCurrentTask ? mCurrentTask->mUtterance.get() : nullptr;
+}
+
bool
SpeechSynthesis::Pending() const
{
- switch (mSpeechQueue.Length()) {
- case 0:
- return false;
-
- case 1:
- return mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_PENDING;
-
- default:
+ if (mSpeechQueue.Length() > 0) {
return true;
}
+
+ SpeechSynthesisUtterance* utterance = CurrentUtterance();
+ if (utterance && utterance->GetState() == SpeechSynthesisUtterance::STATE_PENDING) {
+ return true;
+ }
+
+ return false;
}
bool
SpeechSynthesis::Speaking() const
{
- if (!mSpeechQueue.IsEmpty() &&
- mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
+ SpeechSynthesisUtterance* utterance = CurrentUtterance();
+ if (utterance && utterance->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
return true;
}
// Returns global speaking state if global queue is enabled. Or false.
return nsSynthVoiceRegistry::GetInstance()->IsSpeaking();
}
bool
SpeechSynthesis::Paused() const
{
+ SpeechSynthesisUtterance* utterance = CurrentUtterance();
return mHoldQueue || (mCurrentTask && mCurrentTask->IsPrePaused()) ||
- (!mSpeechQueue.IsEmpty() && mSpeechQueue.ElementAt(0)->IsPaused());
+ (utterance && utterance->IsPaused());
}
bool
SpeechSynthesis::HasEmptyQueue() const
{
- return mSpeechQueue.Length() == 0;
+ return !mCurrentTask && mSpeechQueue.Length() == 0;
}
bool SpeechSynthesis::HasVoices() const
{
uint32_t voiceCount = mVoiceCache.Count();
if (voiceCount == 0) {
nsresult rv = nsSynthVoiceRegistry::GetInstance()->GetVoiceCount(&voiceCount);
if(NS_WARN_IF(NS_FAILED(rv))) {
@@ -157,16 +164,17 @@ SpeechSynthesis::AdvanceQueue()
LOG(LogLevel::Debug,
("SpeechSynthesis::AdvanceQueue length=%d", mSpeechQueue.Length()));
if (mSpeechQueue.IsEmpty()) {
return;
}
RefPtr<SpeechSynthesisUtterance> utterance = mSpeechQueue.ElementAt(0);
+ mSpeechQueue.RemoveElementAt(0);
nsAutoString docLang;
nsCOMPtr<nsPIDOMWindowInner> window = GetOwner();
nsIDocument* doc = window ? window->GetExtantDoc() : nullptr;
if (doc) {
Element* elm = doc->GetHtmlElement();
@@ -183,39 +191,32 @@ SpeechSynthesis::AdvanceQueue()
}
return;
}
void
SpeechSynthesis::Cancel()
{
- if (!mSpeechQueue.IsEmpty() &&
- mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
- // Remove all queued utterances except for current one, we will remove it
- // in OnEnd
- mSpeechQueue.RemoveElementsAt(1, mSpeechQueue.Length() - 1);
- } else {
- mSpeechQueue.Clear();
- }
+ mSpeechQueue.Clear();
if (mCurrentTask) {
mCurrentTask->Cancel();
}
}
void
SpeechSynthesis::Pause()
{
if (Paused()) {
return;
}
- if (mCurrentTask && !mSpeechQueue.IsEmpty() &&
- mSpeechQueue.ElementAt(0)->GetState() != SpeechSynthesisUtterance::STATE_ENDED) {
+ SpeechSynthesisUtterance* utterance = CurrentUtterance();
+ if (utterance && utterance->GetState() != SpeechSynthesisUtterance::STATE_ENDED) {
mCurrentTask->Pause();
} else {
mHoldQueue = true;
}
}
void
SpeechSynthesis::Resume()
@@ -232,20 +233,16 @@ SpeechSynthesis::Resume()
}
}
void
SpeechSynthesis::OnEnd(const nsSpeechTask* aTask)
{
MOZ_ASSERT(mCurrentTask == aTask);
- if (!mSpeechQueue.IsEmpty()) {
- mSpeechQueue.RemoveElementAt(0);
- }
-
mCurrentTask = nullptr;
AdvanceQueue();
}
void
SpeechSynthesis::GetVoices(nsTArray< RefPtr<SpeechSynthesisVoice> >& aResult)
{
aResult.Clear();
--- a/dom/media/webspeech/synth/SpeechSynthesis.h
+++ b/dom/media/webspeech/synth/SpeechSynthesis.h
@@ -39,32 +39,35 @@ public:
JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
bool Pending() const;
bool Speaking() const;
bool Paused() const;
+ /* Returns true if speech queue is empty and there is no speaking utterance */
bool HasEmptyQueue() const;
void Speak(SpeechSynthesisUtterance& aUtterance);
void Cancel();
void Pause();
void Resume();
void OnEnd(const nsSpeechTask* aTask);
void GetVoices(nsTArray< RefPtr<SpeechSynthesisVoice> >& aResult);
void ForceEnd();
+ SpeechSynthesisUtterance* CurrentUtterance() const;
+
IMPL_EVENT_HANDLER(voiceschanged)
private:
virtual ~SpeechSynthesis();
void AdvanceQueue();
bool HasVoices() const;
--- a/dom/media/webspeech/synth/nsSpeechTask.h
+++ b/dom/media/webspeech/synth/nsSpeechTask.h
@@ -19,16 +19,17 @@ class SpeechSynthesisUtterance;
class SpeechSynthesis;
class SynthStreamListener;
class nsSpeechTask : public nsISpeechTask
, public nsIAudioChannelAgentCallback
, public nsSupportsWeakReference
{
friend class SynthStreamListener;
+ friend class SpeechSynthesis;
public:
NS_DECL_CYCLE_COLLECTING_ISUPPORTS
NS_DECL_CYCLE_COLLECTION_CLASS_AMBIGUOUS(nsSpeechTask, nsISpeechTask)
NS_DECL_NSISPEECHTASK
NS_DECL_NSIAUDIOCHANNELAGENTCALLBACK