2014-02-11 44 views
1

我一直在引用Android Speech Recognition as a service on Android 4.1 & 4.2文章来尝试和实现服务中的语音识别。将语音发送到文本Android

我想我说得对。在我的设备上运行时,我得到了“准备言语”吐司消息,这是我在onReadyForSpeech()函数中声明的。

根据Hoan Nguyen为上述帖子提供ans的人,只要调用onReadyForSpeech()函数,我们就可以开始讲话。

我的问题是我不知道如何获得我们正在讲话的讲话并将其转换为文本以及在哪里做。

有人知道该怎么做吗?我知道这是一个非常蹩脚的问题,但我第一次使用语音识别。所以请耐心等待。

对此非常感谢。感谢提前:)

public class MyService extends Service 
    { 
     protected AudioManager mAudioManager; 
     protected SpeechRecognizer mSpeechRecognizer; 
     protected Intent mSpeechRecognizerIntent; 
     protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this)); 

     protected boolean mIsListening; 
     protected volatile boolean mIsCountDownOn; 

     static final int MSG_RECOGNIZER_START_LISTENING = 1; 
     static final int MSG_RECOGNIZER_CANCEL = 2; 

    @Override 
    public void onCreate() 
    { 
     super.onCreate(); 
     mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE); 
     mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this); 
     mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener()); 
     mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH); 
     mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, 
             RecognizerIntent.LANGUAGE_MODEL_FREE_FORM); 
     mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE, 
             this.getPackageName()); 

     mSpeechRecognizer.startListening(mSpeechRecognizerIntent); 
     //Toast.makeText(this, "onCreate", Toast.LENGTH_SHORT).show(); 
     Log.d("onCreate","Entered"); 
    } 


    protected static class IncomingHandler extends Handler 
    { 
     private WeakReference<MyService> mtarget; 

     IncomingHandler(MyService target) 
     { 
      mtarget = new WeakReference<MyService>(target); 

      Log.d("IncomingHandler","Entered"); 
     } 


     @Override 
     public void handleMessage(Message msg) 
     { 
      Log.d("handleMessage","Entered"); 

      final MyService target = mtarget.get(); 

      switch (msg.what) 
      { 
       case MSG_RECOGNIZER_START_LISTENING: 

        if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) 
        { 
         // turn off beep sound 
         target.mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, true); 
        } 
        if (!target.mIsListening) 
        { 
         target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent); 
         target.mIsListening = true; 
         Log.d("TAG", "message start listening"); 
         //$NON-NLS-1$ 
        } 
        break; 

       case MSG_RECOGNIZER_CANCEL: 
         target.mSpeechRecognizer.cancel(); 
         target.mIsListening = false; 
         Log.d("TAG", "message canceled recognizer"); //$NON-NLS-1$ 
         break; 
      } 
     } 
    } 

    // Count down timer for Jelly Bean work around 
    protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000) 
    { 

     @Override 
     public void onTick(long millisUntilFinished) 
     { 
      // TODO Auto-generated method stub 
      Log.d("onTick","Entered"); 
     } 

     @Override 
     public void onFinish() 
     { 
      Log.d("onFinish","Entered"); 

      mIsCountDownOn = false; 
      Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL); 
      try 
      { 
       mServerMessenger.send(message); 
       message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING); 
       mServerMessenger.send(message); 
      } 
      catch (RemoteException e) 
      { 

      } 
     } 
    }; 

    @Override 
    public int onStartCommand(Intent intent, int flags, int startId) { 
     // TODO Auto-generated method stub 
     //mSpeechRecognizer.startListening(mSpeechRecognizerIntent); 

     try 
     { 
      Message msg = new Message(); 
      msg.what = MSG_RECOGNIZER_START_LISTENING; 
      mServerMessenger.send(msg); 
     } 
     catch (RemoteException e) 
     { 
      Log.d("msg",""+e); 
     } 
     return START_NOT_STICKY; 
     //return super.onStartCommand(intent, flags, startId); 
    } 

    @Override 
    public void onDestroy() 
    { 
     super.onDestroy(); 

     if (mIsCountDownOn) 
     { 
      mNoSpeechCountDown.cancel(); 
     } 
     if (mSpeechRecognizer != null) 
     { 
      mSpeechRecognizer.destroy(); 
     } 

     Log.d("onDestroy","Entered"); 
    } 

    protected class SpeechRecognitionListener implements RecognitionListener 
    { 

     private static final String TAG = "Sppech---->"; 

     @Override 
     public void onBeginningOfSpeech() 
     { 
      // speech input will be processed, so there is no need for count down anymore 
      if (mIsCountDownOn) 
      { 
       mIsCountDownOn = false; 
       mNoSpeechCountDown.cancel(); 
      }    
      //Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$ 
      Log.d("onBeginningOfSpeech","Entered"); 
     } 

     @Override 
     public void onBufferReceived(byte[] buffer) 
     { 
      String sTest = ""; 
      Log.d("onBufferReceived","Entered"); 
     } 

     @Override 
     public void onEndOfSpeech() 
     { 
      //Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$ 
      Log.d("onEndOfSpeech","Entered"); 
     } 

     @Override 
     public void onError(int error) 
     { 
      if (mIsCountDownOn) 
      { 
       mIsCountDownOn = false; 
       mNoSpeechCountDown.cancel(); 
      } 
      mIsListening = false; 
      Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING); 
      try 
      { 
        mServerMessenger.send(message); 
      } 
      catch (RemoteException e) 
      { 

      } 
      //Log.d(TAG, "error = " + error); //$NON-NLS-1$ 
      Log.d("onError","Entered"); 
     } 

     @Override 
     public void onEvent(int eventType, Bundle params) 
     { 

     } 

     @Override 
     public void onPartialResults(Bundle partialResults) 
     { 

     } 

     @Override 
     public void onReadyForSpeech(Bundle params) 
     { 
      if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) 
      { 
       mIsCountDownOn = true; 
       mNoSpeechCountDown.start(); 
       mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, false); 
      } 
      //Log.d("TAG", "onReadyForSpeech"); 
      Toast.makeText(getApplicationContext(), "Ready for Speech", Toast.LENGTH_SHORT).show(); 
      Log.d("onReadyForSpeech","Entered");//$NON-NLS-1$ 
     } 

     @Override 
     public void onResults(Bundle results) 
     { 
      //Log.d(TAG, "onResults"); //$NON-NLS-1$ 

     } 

     @Override 
     public void onRmsChanged(float rmsdB) 
     { 

     } 



    } 

    @Override 
    public IBinder onBind(Intent intent) { 
     // TODO Auto-generated method stub 
     return null; 
    } 
} 

回答

1

你得到它onResult(Bundle result),然后你就可以得到什么用户对着一个ArrayList

ArrayList<String> matches = result.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION); 
+0

它的工作,并感谢了很多。我只知道你会回答这个问题。虽然你看不见它,但现在我的脸上有一个很大的笑容:P – ik024

+0

现在,我刚刚复制了你的代码,但没有理解我真正想了解你所做的每件事情的大部分内容。可以通过提供一些链接来帮助我,让我能理解它。 – ik024

+0

刚刚阅读RecognitionListener,RecognizerIntent和SpeechRecognizer在 http://developer.android.com/reference/android/speech/package-summary.html –