--- a/dom/media/webspeech/synth/SpeechSynthesisUtterance.cpp
+++ b/dom/media/webspeech/synth/SpeechSynthesisUtterance.cpp
@@ -153,24 +153,26 @@ void
SpeechSynthesisUtterance::GetChosenVoiceURI(nsString& aResult) const
{
aResult = mChosenVoiceURI;
}
void
SpeechSynthesisUtterance::DispatchSpeechSynthesisEvent(const nsAString& aEventType,
uint32_t aCharIndex,
+ Nullable<uint32_t> aCharLength,
float aElapsedTime,
const nsAString& aName)
{
SpeechSynthesisEventInit init;
init.mBubbles = false;
init.mCancelable = false;
init.mUtterance = this;
init.mCharIndex = aCharIndex;
+ init.mCharLength = aCharLength;
init.mElapsedTime = aElapsedTime;
init.mName = aName;
RefPtr<SpeechSynthesisEvent> event =
SpeechSynthesisEvent::Constructor(this, aEventType, init);
DispatchTrustedEvent(event);
}
--- a/dom/media/webspeech/synth/SpeechSynthesisUtterance.h
+++ b/dom/media/webspeech/synth/SpeechSynthesisUtterance.h
@@ -92,16 +92,17 @@ public:
IMPL_EVENT_HANDLER(mark)
IMPL_EVENT_HANDLER(boundary)
private:
virtual ~SpeechSynthesisUtterance();
void DispatchSpeechSynthesisEvent(const nsAString& aEventType,
uint32_t aCharIndex,
+ Nullable<uint32_t> aCharLength,
float aElapsedTime, const nsAString& aName);
nsString mText;
nsString mLang;
float mVolume;
--- a/dom/media/webspeech/synth/cocoa/OSXSpeechSynthesizerService.mm
+++ b/dom/media/webspeech/synth/cocoa/OSXSpeechSynthesizerService.mm
@@ -38,17 +38,17 @@ public:
mStartingTime = TimeStamp::Now();
}
NS_DECL_CYCLE_COLLECTING_ISUPPORTS
NS_DECL_CYCLE_COLLECTION_CLASS_AMBIGUOUS(SpeechTaskCallback, nsISpeechTaskCallback)
NS_DECL_NSISPEECHTASKCALLBACK
- void OnWillSpeakWord(uint32_t aIndex);
+ void OnWillSpeakWord(uint32_t aIndex, uint32_t aLength);
void OnError(uint32_t aIndex);
void OnDidFinishSpeaking();
private:
virtual ~SpeechTaskCallback()
{
[mSpeechSynthesizer release];
}
@@ -132,24 +132,25 @@ SpeechTaskCallback::OnVolumeChanged(floa
float
SpeechTaskCallback::GetTimeDurationFromStart()
{
TimeDuration duration = TimeStamp::Now() - mStartingTime;
return duration.ToMilliseconds();
}
void
-SpeechTaskCallback::OnWillSpeakWord(uint32_t aIndex)
+SpeechTaskCallback::OnWillSpeakWord(uint32_t aIndex, uint32_t aLength)
{
mCurrentIndex = aIndex < mOffsets.Length() ? mOffsets[aIndex] : mCurrentIndex;
if (!mTask) {
return;
}
mTask->DispatchBoundary(NS_LITERAL_STRING("word"),
- GetTimeDurationFromStart(), mCurrentIndex);
+ GetTimeDurationFromStart(),
+ mCurrentIndex, aLength, 1);
}
void
SpeechTaskCallback::OnError(uint32_t aIndex)
{
if (!mTask) {
return;
}
@@ -180,17 +181,17 @@ SpeechTaskCallback::OnDidFinishSpeaking(
[super init];
mCallback = aCallback;
return self;
}
- (void)speechSynthesizer:(NSSpeechSynthesizer *)aSender
willSpeakWord:(NSRange)aRange ofString:(NSString*)aString
{
- mCallback->OnWillSpeakWord(aRange.location);
+ mCallback->OnWillSpeakWord(aRange.location, aRange.length);
}
- (void)speechSynthesizer:(NSSpeechSynthesizer *)aSender
didFinishSpeaking:(BOOL)aFinishedSpeaking
{
mCallback->OnDidFinishSpeaking();
}
--- a/dom/media/webspeech/synth/ipc/PSpeechSynthesisRequest.ipdl
+++ b/dom/media/webspeech/synth/ipc/PSpeechSynthesisRequest.ipdl
@@ -32,15 +32,16 @@ async protocol PSpeechSynthesisRequest
async OnEnd(bool aIsError, float aElapsedTime, uint32_t aCharIndex);
async OnStart(nsString aUri);
async OnPause(float aElapsedTime, uint32_t aCharIndex);
async OnResume(float aElapsedTime, uint32_t aCharIndex);
- async OnBoundary(nsString aName, float aElapsedTime, uint32_t aCharIndex);
+ async OnBoundary(nsString aName, float aElapsedTime, uint32_t aCharIndex,
+ uint32_t aCharLength, uint8_t argc);
async OnMark(nsString aName, float aElapsedTime, uint32_t aCharIndex);
};
} // namespace dom
} // namespace mozilla
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.cpp
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.cpp
@@ -126,19 +126,21 @@ SpeechSynthesisRequestChild::RecvOnResum
{
mTask->DispatchResumeImpl(aElapsedTime, aCharIndex);
return IPC_OK();
}
mozilla::ipc::IPCResult
SpeechSynthesisRequestChild::RecvOnBoundary(const nsString& aName,
const float& aElapsedTime,
- const uint32_t& aCharIndex)
+ const uint32_t& aCharIndex,
+ const uint32_t& aCharLength,
+ const uint8_t& argc)
{
- mTask->DispatchBoundaryImpl(aName, aElapsedTime, aCharIndex);
+ mTask->DispatchBoundaryImpl(aName, aElapsedTime, aCharIndex, aCharLength, argc);
return IPC_OK();
}
mozilla::ipc::IPCResult
SpeechSynthesisRequestChild::RecvOnMark(const nsString& aName,
const float& aElapsedTime,
const uint32_t& aCharIndex)
{
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.h
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.h
@@ -58,17 +58,19 @@ protected:
const float& aElapsedTime,
const uint32_t& aCharIndex) override;
mozilla::ipc::IPCResult RecvOnPause(const float& aElapsedTime, const uint32_t& aCharIndex) override;
mozilla::ipc::IPCResult RecvOnResume(const float& aElapsedTime, const uint32_t& aCharIndex) override;
mozilla::ipc::IPCResult RecvOnBoundary(const nsString& aName, const float& aElapsedTime,
- const uint32_t& aCharIndex) override;
+ const uint32_t& aCharIndex,
+ const uint32_t& aCharLength,
+ const uint8_t& argc) override;
mozilla::ipc::IPCResult RecvOnMark(const nsString& aName, const float& aElapsedTime,
const uint32_t& aCharIndex) override;
RefPtr<SpeechTaskChild> mTask;
};
class SpeechTaskChild : public nsSpeechTask
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
@@ -203,20 +203,22 @@ SpeechTaskParent::DispatchErrorImpl(floa
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
SpeechTaskParent::DispatchBoundaryImpl(const nsAString& aName,
- float aElapsedTime, uint32_t aCharIndex)
+ float aElapsedTime, uint32_t aCharIndex,
+ uint32_t aCharLength, uint8_t argc)
{
MOZ_ASSERT(mActor);
- if(NS_WARN_IF(!(mActor->SendOnBoundary(nsString(aName), aElapsedTime, aCharIndex)))) {
+ if(NS_WARN_IF(!(mActor->SendOnBoundary(nsString(aName), aElapsedTime,
+ aCharIndex, aCharLength, argc)))) {
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
SpeechTaskParent::DispatchMarkImpl(const nsAString& aName,
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.h
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.h
@@ -88,17 +88,18 @@ public:
nsresult DispatchPauseImpl(float aElapsedTime, uint32_t aCharIndex);
nsresult DispatchResumeImpl(float aElapsedTime, uint32_t aCharIndex);
nsresult DispatchErrorImpl(float aElapsedTime, uint32_t aCharIndex);
nsresult DispatchBoundaryImpl(const nsAString& aName,
- float aElapsedTime, uint32_t aCharIndex);
+ float aElapsedTime, uint32_t aCharIndex,
+ uint32_t aCharLength, uint8_t argc);
nsresult DispatchMarkImpl(const nsAString& aName,
float aElapsedTime, uint32_t aCharIndex);
private:
SpeechSynthesisRequestParent* mActor;
};
--- a/dom/media/webspeech/synth/nsISpeechService.idl
+++ b/dom/media/webspeech/synth/nsISpeechService.idl
@@ -108,19 +108,21 @@ interface nsISpeechTask : nsISupports
void dispatchError(in float aElapsedTime, in unsigned long aCharIndex);
/**
* Dispatch boundary event.
*
* @param aName name of boundary, 'word' or 'sentence'
* @param aElapsedTime time in seconds since speech has started.
* @param aCharIndex offset of spoken characters.
+ * @param aCharLength length of text in boundary event to be spoken.
*/
- void dispatchBoundary(in DOMString aName, in float aElapsedTime,
- in unsigned long aCharIndex);
+ [optional_argc] void dispatchBoundary(in DOMString aName, in float aElapsedTime,
+ in unsigned long aCharIndex,
+ [optional] in unsigned long aCharLength);
/**
* Dispatch mark event.
*
* @param aName mark identifier.
* @param aElapsedTime time in seconds since speech has started.
* @param aCharIndex offset of spoken characters.
*/
--- a/dom/media/webspeech/synth/nsSpeechTask.cpp
+++ b/dom/media/webspeech/synth/nsSpeechTask.cpp
@@ -373,18 +373,18 @@ nsSpeechTask::DispatchStartImpl(const ns
if(NS_WARN_IF(!(mUtterance->mState == SpeechSynthesisUtterance::STATE_PENDING))) {
return NS_ERROR_NOT_AVAILABLE;
}
CreateAudioChannelAgent();
mUtterance->mState = SpeechSynthesisUtterance::STATE_SPEAKING;
mUtterance->mChosenVoiceURI = aUri;
- mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("start"), 0, 0,
- EmptyString());
+ mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("start"), 0,
+ nullptr, 0, EmptyString());
return NS_OK;
}
NS_IMETHODIMP
nsSpeechTask::DispatchEnd(float aElapsedTime, uint32_t aCharIndex)
{
if (!mIndirectAudio) {
@@ -428,17 +428,17 @@ nsSpeechTask::DispatchEndImpl(float aEla
mSpeechSynthesis->OnEnd(this);
}
if (utterance->mState == SpeechSynthesisUtterance::STATE_PENDING) {
utterance->mState = SpeechSynthesisUtterance::STATE_NONE;
} else {
utterance->mState = SpeechSynthesisUtterance::STATE_ENDED;
utterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("end"),
- aCharIndex, aElapsedTime,
+ aCharIndex, nullptr, aElapsedTime,
EmptyString());
}
return NS_OK;
}
NS_IMETHODIMP
nsSpeechTask::DispatchPause(float aElapsedTime, uint32_t aCharIndex)
@@ -461,17 +461,17 @@ nsSpeechTask::DispatchPauseImpl(float aE
}
if(NS_WARN_IF(mUtterance->mState == SpeechSynthesisUtterance::STATE_ENDED)) {
return NS_ERROR_NOT_AVAILABLE;
}
mUtterance->mPaused = true;
if (mUtterance->mState == SpeechSynthesisUtterance::STATE_SPEAKING) {
mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("pause"),
- aCharIndex, aElapsedTime,
+ aCharIndex, nullptr, aElapsedTime,
EmptyString());
}
return NS_OK;
}
NS_IMETHODIMP
nsSpeechTask::DispatchResume(float aElapsedTime, uint32_t aCharIndex)
{
@@ -493,17 +493,17 @@ nsSpeechTask::DispatchResumeImpl(float a
}
if(NS_WARN_IF(mUtterance->mState == SpeechSynthesisUtterance::STATE_ENDED)) {
return NS_ERROR_NOT_AVAILABLE;
}
mUtterance->mPaused = false;
if (mUtterance->mState == SpeechSynthesisUtterance::STATE_SPEAKING) {
mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("resume"),
- aCharIndex, aElapsedTime,
+ aCharIndex, nullptr, aElapsedTime,
EmptyString());
}
return NS_OK;
}
NS_IMETHODIMP
nsSpeechTask::DispatchError(float aElapsedTime, uint32_t aCharIndex)
@@ -531,45 +531,48 @@ nsSpeechTask::DispatchErrorImpl(float aE
}
if (mSpeechSynthesis) {
mSpeechSynthesis->OnEnd(this);
}
mUtterance->mState = SpeechSynthesisUtterance::STATE_ENDED;
mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("error"),
- aCharIndex, aElapsedTime,
+ aCharIndex, nullptr, aElapsedTime,
EmptyString());
return NS_OK;
}
NS_IMETHODIMP
nsSpeechTask::DispatchBoundary(const nsAString& aName,
- float aElapsedTime, uint32_t aCharIndex)
+ float aElapsedTime, uint32_t aCharIndex,
+ uint32_t aCharLength, uint8_t argc)
{
if (!mIndirectAudio) {
NS_WARNING("Can't call DispatchBoundary() from a direct audio speech service");
return NS_ERROR_FAILURE;
}
- return DispatchBoundaryImpl(aName, aElapsedTime, aCharIndex);
+ return DispatchBoundaryImpl(aName, aElapsedTime, aCharIndex, aCharLength, argc);
}
nsresult
nsSpeechTask::DispatchBoundaryImpl(const nsAString& aName,
- float aElapsedTime, uint32_t aCharIndex)
+ float aElapsedTime, uint32_t aCharIndex,
+ uint32_t aCharLength, uint8_t argc)
{
MOZ_ASSERT(mUtterance);
if(NS_WARN_IF(!(mUtterance->mState == SpeechSynthesisUtterance::STATE_SPEAKING))) {
return NS_ERROR_NOT_AVAILABLE;
}
+ mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("boundary"),
+ aCharIndex,
+ argc ? static_cast<Nullable<uint32_t> >(aCharLength) : nullptr,
+ aElapsedTime, aName);
- mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("boundary"),
- aCharIndex, aElapsedTime,
- aName);
return NS_OK;
}
NS_IMETHODIMP
nsSpeechTask::DispatchMark(const nsAString& aName,
float aElapsedTime, uint32_t aCharIndex)
{
if (!mIndirectAudio) {
@@ -585,17 +588,17 @@ nsSpeechTask::DispatchMarkImpl(const nsA
float aElapsedTime, uint32_t aCharIndex)
{
MOZ_ASSERT(mUtterance);
if(NS_WARN_IF(!(mUtterance->mState == SpeechSynthesisUtterance::STATE_SPEAKING))) {
return NS_ERROR_NOT_AVAILABLE;
}
mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("mark"),
- aCharIndex, aElapsedTime,
+ aCharIndex, nullptr, aElapsedTime,
aName);
return NS_OK;
}
void
nsSpeechTask::Pause()
{
MOZ_ASSERT(XRE_IsParentProcess());
--- a/dom/media/webspeech/synth/nsSpeechTask.h
+++ b/dom/media/webspeech/synth/nsSpeechTask.h
@@ -81,17 +81,19 @@ protected:
virtual nsresult DispatchPauseImpl(float aElapsedTime, uint32_t aCharIndex);
virtual nsresult DispatchResumeImpl(float aElapsedTime, uint32_t aCharIndex);
virtual nsresult DispatchErrorImpl(float aElapsedTime, uint32_t aCharIndex);
virtual nsresult DispatchBoundaryImpl(const nsAString& aName,
float aElapsedTime,
- uint32_t aCharIndex);
+ uint32_t aCharIndex,
+ uint32_t aCharLength,
+ uint8_t argc);
virtual nsresult DispatchMarkImpl(const nsAString& aName,
float aElapsedTime, uint32_t aCharIndex);
RefPtr<SpeechSynthesisUtterance> mUtterance;
float mVolume;
--- a/dom/media/webspeech/synth/windows/SapiService.cpp
+++ b/dom/media/webspeech/synth/windows/SapiService.cpp
@@ -134,27 +134,29 @@ SapiCallback::OnSpeechEvent(const SPEVEN
mCurrentIndex = mSpeakTextLen;
}
mTask->DispatchEnd(GetTickCount() - mStartingTime, mCurrentIndex);
mTask = nullptr;
break;
case SPEI_TTS_BOOKMARK:
mCurrentIndex = static_cast<ULONG>(speechEvent.lParam) - mTextOffset;
mTask->DispatchBoundary(NS_LITERAL_STRING("mark"),
- GetTickCount() - mStartingTime, mCurrentIndex);
+ GetTickCount() - mStartingTime, mCurrentIndex, 0, 0);
break;
case SPEI_WORD_BOUNDARY:
mCurrentIndex = static_cast<ULONG>(speechEvent.lParam) - mTextOffset;
mTask->DispatchBoundary(NS_LITERAL_STRING("word"),
- GetTickCount() - mStartingTime, mCurrentIndex);
+ GetTickCount() - mStartingTime, mCurrentIndex,
+ static_cast<ULONG>(speechEvent.wParam), 1);
break;
case SPEI_SENTENCE_BOUNDARY:
mCurrentIndex = static_cast<ULONG>(speechEvent.lParam) - mTextOffset;
mTask->DispatchBoundary(NS_LITERAL_STRING("sentence"),
- GetTickCount() - mStartingTime, mCurrentIndex);
+ GetTickCount() - mStartingTime, mCurrentIndex,
+ static_cast<ULONG>(speechEvent.wParam), 1);
break;
default:
break;
}
}
// static
void __stdcall
--- a/dom/webidl/SpeechSynthesisEvent.webidl
+++ b/dom/webidl/SpeechSynthesisEvent.webidl
@@ -11,19 +11,21 @@
*/
[Constructor(DOMString type, SpeechSynthesisEventInit eventInitDict),
Pref="media.webspeech.synth.enabled"]
interface SpeechSynthesisEvent : Event
{
readonly attribute SpeechSynthesisUtterance utterance;
readonly attribute unsigned long charIndex;
+ readonly attribute unsigned long? charLength;
readonly attribute float elapsedTime;
readonly attribute DOMString? name;
};
dictionary SpeechSynthesisEventInit : EventInit
{
required SpeechSynthesisUtterance utterance;
unsigned long charIndex = 0;
+ unsigned long? charLength = null;
float elapsedTime = 0;
DOMString name = "";
};