Pen Settings

HTML

CSS

CSS Base

Vendor Prefixing

Add External Stylesheets/Pens

Any URL's added here will be added as <link>s in order, and before the CSS in the editor. If you link to another Pen, it will include the CSS from that Pen. If the preprocessor matches, it will attempt to combine them before processing.

+ add another resource

JavaScript

Babel includes JSX processing.

Add External Scripts/Pens

Any URL's added here will be added as <script>s in order, and run before the JavaScript in the editor. You can use the URL of any other Pen and it will include the JavaScript from that Pen.

+ add another resource

Packages

Add Packages

Search for and use JavaScript packages from npm here. By selecting a package, an import statement will be added to the top of the JavaScript editor for this package.

Behavior

Save Automatically?

If active, Pens will autosave every 30 seconds after being saved once.

Auto-Updating Preview

If enabled, the preview panel updates automatically as you code. If disabled, use the "Run" button to update.

Format on Save

If enabled, your code will be formatted when you actively save your Pen. Note: your code becomes un-folded during formatting.

Editor Settings

Code Indentation

Want to change your Syntax Highlighting theme, Fonts and more?

Visit your global Editor Settings.

HTML

              
                <h1>Test the Speech Recognition API</h1>

<form>
  <p>Normally it works fine on Google Chrome. To know the support of the Speech Recognition API on the other browsers :
    <a href="https://caniuse.com/?search=Speech%20Recognition" target="_blank">click here</a><br><br>
  </p>
  <div class="controls">
    <label>Phrase test
      <input id="phrase" type="text" placeholder="type the phrase you want to test with voice recognition (not mandatory)">
    </label>
    <div class="clearfix"></div>
  </div>

  <div class="controls">
    <label>Choose the language
      <select>
      </select>
    </label>
    <div class="clearfix"></div>
  </div>

  <div class="controls">
    <p>Press the button then say the phrase to test the recognition.</p>
    <button id="record" type="submit">Start new test</button>
    <div class="clearfix"></div>
  </div>

  <div class="controls">
    <p class="phrase">Phrase...</p>
    <div class="clearfix"></div>
    <p class="result">Right or wrong?</p>
    <div class="clearfix"></div>
    <p class="output">...diagnostic messages</p>
  </div>

</form>
              
            
!

CSS

              
                body,
html {
  margin: 0;
}

html {
  height: 100%;
}

body {
  height: inherit;
  overflow: hidden;
}

h1,
p {
  font-family: sans-serif;
  text-align: center;
}

div p {
  padding: 20px;
  background-color: rgba(0, 0, 0, 0.2);
}

.txt,
form > div {
  margin-bottom: 10px;
  overflow: auto;
}
.clearfix {
  clear: both;
}
#phrase {
  width: 60%;
}

.controls {
  text-align: center;
  margin-top: 10px;
}
.controls button {
  padding: 10px;
}

.phrase {
  font-weight: bold;
}

.output {
  font-style: italic;
}

              
            
!

JS

              
                var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition;
var SpeechGrammarList = SpeechGrammarList || webkitSpeechGrammarList;
var SpeechRecognitionEvent =
  SpeechRecognitionEvent || webkitSpeechRecognitionEvent;

/**
 * Tip to make the WebSpeech getVoices function compatible with Chrome
 * Solution proposed by Flavio Copes on the JSBootcamp (https://2020.thejsbootcamp.com/)
 * @returns {Promise<unknown>}
 */
const getVoices = () => {
  return new Promise((resolve) => {
    let voices = speechSynthesis.getVoices();
    if (voices.length) {
      resolve(voices);
      return;
    }
    speechSynthesis.onvoiceschanged = () => {
      voices = speechSynthesis.getVoices();
      resolve(voices);
    };
  });
};

/**
 * Tip to make the WebSpeech getVoices function compatible with Chrome
 * Solution proposed by Flavio Copes on the JSBootcamp (https://2020.thejsbootcamp.com/)
 * @param callback
 * @returns {Promise<void>}
 */
const prepareVoicesList = async (callback) => {
  let tmpvoices = [];
  (await getVoices()).forEach((voice) => {
    tmpvoices.push(voice);
  });
  if (callback) {
    callback(tmpvoices);
  }
};

function testSpeechRecognition() {
  "use strict";

  var default_lang = "fr-FR";

  var inputForm = document.querySelector("form");
  var phrasePara = inputForm.querySelector(".phrase");
  var resultPara = inputForm.querySelector(".result");
  var diagnosticPara = inputForm.querySelector(".output");
  var testBtn = inputForm.querySelector("button");
  var inputTxt = inputForm.querySelector("input");
  var langSelect = inputForm.querySelector("select");

  function populateLangList(prmvoices) {
    let lstvoices = [];
    prmvoices
      .map((item) => item.lang)
      .forEach((item) => {
        if (!lstvoices.find((element) => element == item)) {
          lstvoices.push(item);
        }
      });
    if (lstvoices.length == 0) {
      // just in case of unaivability of the list of languages
      lstvoices = ["fr-FR", "en-US", "en-GB"];
    }
    // empty the select field of all the children if they exist
    while (langSelect.firstChild) {
      langSelect.removeChild(langSelect.firstChild);
    }
    lstvoices.forEach((voice) => {
      let option = document.createElement("option");
      option.textContent = voice;
      if (voice == default_lang) {
        option.setAttribute("selected", "selected");
      }
      option.setAttribute("value", voice);
      langSelect.appendChild(option);
    });
  }

  function prepareApp(prmvoices) {
    populateLangList(prmvoices);

    testBtn.addEventListener("click", testSpeech);
  }

  prepareVoicesList(prepareApp);

  function testSpeech(evt) {
    evt.preventDefault();
    testBtn.disabled = true;
    testBtn.textContent = "Test in progress";

    let phrase = String(inputTxt.value).trim().toLowerCase();
    // To ensure case consistency while checking with the returned output text
    phrasePara.textContent = phrase;
    resultPara.textContent = "Right or wrong?";
    resultPara.style.background = "rgba(0,0,0,0.2)";
    diagnosticPara.textContent = "...diagnostic messages";

    var grammar =
      "#JSGF V1.0; grammar phrase; public <phrase> = " + phrase + ";";
    var recognition = new SpeechRecognition();
    var speechRecognitionList = new SpeechGrammarList();
    speechRecognitionList.addFromString(grammar, 1);
    recognition.grammars = speechRecognitionList;
    console.log("langSelect.value => ", langSelect.value);
    recognition.lang = langSelect.value;
    recognition.interimResults = false;
    recognition.maxAlternatives = 1;

    recognition.start();

    recognition.onresult = function (event) {
      console.log("onresult fired");
      // The SpeechRecognitionEvent results property returns a SpeechRecognitionResultList object
      // The SpeechRecognitionResultList object contains SpeechRecognitionResult objects.
      // It has a getter so it can be accessed like an array
      // The first [0] returns the SpeechRecognitionResult at position 0.
      // Each SpeechRecognitionResult object contains SpeechRecognitionAlternative objects that contain individual results.
      // These also have getters so they can be accessed like arrays.
      // The second [0] returns the SpeechRecognitionAlternative at position 0.
      // We then return the transcript property of the SpeechRecognitionAlternative object
      let result = event.results[0][0];
      var speechResult = result.transcript.toLowerCase();
      diagnosticPara.textContent = "Speech received: " + speechResult + ".";
      console.log("Speech received: " + speechResult);
      let confidence = Math.ceil(result.confidence * 100);
      if (speechResult === phrase) {
        resultPara.textContent = `I heard the correct phrase! (confidence : ${confidence} %)`;
        resultPara.style.background = "lime";
      } else {
        if (confidence >= 90) {
          resultPara.textContent = `That sounds not too bad. (confidence : ${confidence} %)`;
          resultPara.style.background = "orange";
        } else {
          if (confidence > 80) {
            resultPara.textContent = `That sounds weird but why not... (confidence : ${confidence} %)`;
            resultPara.style.background = "purple";
          } else {
            resultPara.textContent = `That didn't sound right. (confidence : ${confidence} %)`;
            resultPara.style.background = "red";
          }
        }
      }
    };

    recognition.onspeechend = function () {
      recognition.stop();
      testBtn.disabled = false;
      testBtn.textContent = "Start new test";
    };

    recognition.onerror = function (event) {
      testBtn.disabled = false;
      testBtn.textContent = "Start new test";
      diagnosticPara.textContent =
        "Error occurred in recognition: " + event.error;
    };

    recognition.onaudiostart = function (event) {
      //Fired when the user agent has started to capture audio.
      console.log("SpeechRecognition.onaudiostart");
    };

    recognition.onaudioend = function (event) {
      //Fired when the user agent has finished capturing audio.
      console.log("SpeechRecognition.onaudioend");
    };

    recognition.onend = function (event) {
      //Fired when the speech recognition service has disconnected.
      console.log("SpeechRecognition.onend");
    };

    recognition.onnomatch = function (event) {
      //Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
      console.log("SpeechRecognition.onnomatch");
    };

    recognition.onsoundstart = function (event) {
      //Fired when any sound — recognisable speech or not — has been detected.
      console.log("SpeechRecognition.onsoundstart");
    };

    recognition.onsoundend = function (event) {
      //Fired when any sound — recognisable speech or not — has stopped being detected.
      console.log("SpeechRecognition.onsoundend");
    };

    recognition.onspeechstart = function (event) {
      //Fired when sound that is recognised by the speech recognition service as speech has been detected.
      console.log("SpeechRecognition.onspeechstart");
    };
    recognition.onstart = function (event) {
      //Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition.
      console.log("SpeechRecognition.onstart");
    };
  }
}

testSpeechRecognition();

              
            
!
999px

Console