This document is tutorial using speech to text function to use in react native.
First I will show you the simple work, and then customize for using in iOS and android at same time.
Simple implement:
Step 1: Navigate to your project folder using terminate/command line.
Step 2: Import react-native-voice to your project by command:
npm install --save react-native-voice
Step 3: Link library:
react-native link react-native-voice
Step 4: In project, create file to handle voice, in this case I will use name VoiceButton.js
Step 5: Implement simple component with button with click function
import React, { Component } from 'react';
import {
StyleSheet,
Text,
Image,
View,
TouchableOpacity
} from 'react-native';
export default class VoiceButton extends Component {
constructor(props){
super(props);
this.state={
result: "",
listening: false
};
}
shouldComponentUpdate(nextState){
if (this.state.listening!=nextState.listening)
{
return true;
}
returnfalse;
}
render(){
return (
{
}} disabled={this.state.listening}>
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
},
buttonCommon:{
resizeMode: 'contain',
width:30,
height: 30,
},
buttonEnabled: {
opacity: 1.0,
},
buttonDisabled: {
opacity: 0.5,
},
});
Step 7: Implement Voice detected:
Import Voice from react-native-voice
import Voice from 'react-native-voice';
Add voice handle in constructor function:
Voice.onSpeechEnd = this.onSpeechEnd.bind(this);
Voice.onSpeechResults=this.onSpeechResults.bind(this);
Add two functions that handle voice:
onSpeechResults(e) {
this.setState({result:e.value});
}
onSpeechEnd(e){
}
Add function to start/stop recording
async startRecording()
{
try{
await Voice.start('ja'); // I use japanese in voice detected, but you can set language you want
}catch (e) {
console.error(e);
}
this.setState({ listening:true});
}
async stopRecording()
{
try{
await Voice.stop();
} catch (e) {
console.error(e);
}
}
Modify render function for start recording when press button
render() {
return (
{
this.startRecording();
}} disabled={this.state.listening}>
);
Now you can get text from what you speak from this.state.result
But there are some problem here:
1. Both return an array, but in Android it is array of possible text, in iOS is just one text.
2. onSpeechResults function is called so many time in iOS just to update each word you said. In android, it is called just once, and then it finish recording.
3. Android will auto check what you speak and stop, then auto stop recording for you.
But in iOS, listening will continue until you call stop function or 60s passed
Example: when I say: “I will go home at night”
Android: “I will go home at night, I will go home at nine, …”
iOS: “I” then “I will” then “I will go” … then “I will go home at night”
After that we will fix those:
1 will be fixed by using the first value in array:
onSpeechResults(e) {
this.setState({result:e.value});
}
=>
onSpeechResults(e) {
this.setState({result:e.value[0]});
}
2 will be fixed by separate iOS and android with difference behavior
import {
StyleSheet,
Text,
Image,
View,
TouchableOpacity
} from 'react-native';
=>
import {
StyleSheet,
Text,
Image,
View,
TouchableOpacity,
Platform
} from 'react-native';
and
onSpeechResults(e) {
this.setState({result:e.value});
}
onSpeechEnd(e){
}
=>
onSpeechResults(e) {
if (Platform.OS==='ios')
{
this.setState({result:e.value[0]});
}
else
{
// do what you want with the result (for android)
}
}
onSpeechEnd(e){
if (Platform.OS==='ios')
{
if (this.state.result!=null&&this.state.result!='')
{
// do what you want with the result (for iOS)
}
}
}
3 will be fix by add timer for stopping in iOS
import {
StyleSheet,
Text,
Image,
View,
TouchableOpacity
} from 'react-native';
=>
import {
StyleSheet,
Text,
Image,
View,
TouchableOpacity,
Platform
} from 'react-native';
let timer = null;
and
onSpeechResults(e) {
this.setState({result:e.value});
}
onSpeechEnd(e){
}
=>
onSpeechResults(e) {
if (Platform.OS==='ios')
{
if (timer!==null)
{
clearTimeout(timer);
}
timer=setTimeout(()=>{
this.stopRecording();
},2000);
}
else
{
this.setState({ listening:false});
}
}
onSpeechEnd(e){
if (Platform.OS==='ios')
{
timer=null;
this.setState({ listening:false});
}
}
import {
StyleSheet,
Text,
Image,
View,
TouchableOpacity,
Platform
} from 'react-native';
import Voice from 'react-native-voice';
let timer = null;
export default class VoiceButton extends Component {
constructor(props){
super(props);
this.state={
result: "",
listening: false
};
Voice.onSpeechEnd = this.onSpeechEnd.bind(this);
Voice.onSpeechResults=this.onSpeechResults.bind(this);
}
onSpeechResults(e) {
if (Platform.OS==='ios')
{
this.setState({result:e.value[0]});
if (timer!==null)
{
clearTimeout(timer);
}
timer=setTimeout(()=>{
this.stopRecording();
},2000);
}
else
{
this.setState({ listening:false});
// Do what you want with the result (for android)
}
}
onSpeechEnd(e){
if (Platform.OS==='ios')
{
timer=null;
this.setState({ listening:false});
if (this.state.result!=null&&this.state.result!='')
{
// do what you want with the result (for iOS)
}
}
}
shouldComponentUpdate(nextState){
if (this.state.listening!=nextState.listening)
{
return true;
}
returnfalse;
}
async startRecording()
{
try{
await Voice.start('ja'); // I use japanese in voice detected, but you can set language you want
}catch (e) {
console.error(e);
}
this.setState({ listening:true});
}
async stopRecording()
{
try{
await Voice.stop();
} catch (e) {
console.error(e);
}
}
componentWillUnmount() {
Voice.destroy().then(Voice.removeAllListeners);
}
render() {
return (
{
}} disabled={this.state.listening}>
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
alignItems: 'center',
justifyContent: 'center',
},
buttonCommon:{
resizeMode: 'contain',
width:30,
height: 30,
},
buttonEnabled: {
opacity: 1.0,
},
buttonDisabled: {
opacity: 0.5,
},
});
That all for my tutorial, happy coding.
Source reference:
https://github.com/wenkesj/react-native-voice
Would you like to work with us in NeosCorp, please contact us here!
http://neoscorp.vn/vi/contact
You need to login in order to like this post: click here
YOU MIGHT ALSO LIKE