2017-03-17 85 views
-1

我正在做我的语音识别项目。录制代码运行良好,但我们不知道如何编写代码以将音频存储在Firebase中。用于语音识别的IOS Swift 3

你能帮我一下吗?

import UIKit 
import AVFoundation 
import Firebase 
import Speech 
import FirebaseStorage 


class SpeechViewController: UIViewController,AVAudioPlayerDelegate,AVAudioRecorderDelegate { 
    var usersStorageRef: FIRStorageReference! 
    var audioPlayer: AVAudioPlayer? 
    var audioRecorder: AVAudioRecorder? 
    private let speechRecognizer = SFSpeechRecognizer(locale: 
     Locale(identifier: "en-US"))! 

private var speechRecognitionRequest: 
SFSpeechAudioBufferRecognitionRequest? 
private var speechRecognitionTask: SFSpeechRecognitionTask? 
private let audioEngine = AVAudioEngine() 




@IBOutlet weak var myTextView: UITextView! 



@IBOutlet weak var StopButton: UIButton! 
@IBOutlet weak var StartButton: UIButton! 
override func viewDidLoad() { 
    super.viewDidLoad() 
    authorizeSR() 

    let storage = FIRStorage.storage() 

    let storageRef = storage.reference(forURL: "gs://login-92e0b.appspot.com") 
    //let localFile = URL(string: "sound.caf")! 


    usersStorageRef = storageRef.child("users") 


    StopButton.isEnabled = false 

    let fileMgr = FileManager.default 

    let dirPaths = fileMgr.urls(for: .documentDirectory, 
           in: .userDomainMask) 

    let soundFileURL = dirPaths[0].appendingPathComponent("sound.caf") 

    let recordSettings = 
     [AVEncoderAudioQualityKey: AVAudioQuality.min.rawValue, 
     AVEncoderBitRateKey: 16, 
     AVNumberOfChannelsKey: 2, 
     AVSampleRateKey: 44100.0] as [String : Any] 

    let audioSession = AVAudioSession.sharedInstance() 

    do { 
     try audioSession.setCategory(
      AVAudioSessionCategoryPlayAndRecord) 
    } catch let error as NSError { 
     print("audioSession error: \(error.localizedDescription)") 
    } 

    do { 
     try audioRecorder = AVAudioRecorder(url: soundFileURL, 
              settings: recordSettings as [String : AnyObject]) 
     audioRecorder?.prepareToRecord() 
    } catch let error as NSError { 
     print("audioSession error: \(error.localizedDescription)") 
    } 


    // Do any additional setup after loading the view. 
} 
     func authorizeSR() { 
     SFSpeechRecognizer.requestAuthorization { authStatus in 

      OperationQueue.main.addOperation { 
       switch authStatus { 
       case .authorized: 
        self.StartButton.isEnabled = true 

       case .denied: 
        self.StartButton.isEnabled = false 
        self.StartButton.setTitle("Speech recognition access denied by user", for: .disabled) 

       case .restricted: 
        self.StartButton.isEnabled = false 
        self.StartButton.setTitle("Speech recognition restricted on device", for: .disabled) 

       case .notDetermined: 
        self.StartButton.isEnabled = false 
        self.StartButton.setTitle("Speech recognition not authorized", for: .disabled) 
       } 
      } 
     } 
    } 


override func didReceiveMemoryWarning() { 
    super.didReceiveMemoryWarning() 
    // Dispose of any resources that can be recreated. 
} 


@IBAction func StartTranscribing(_ sender: Any) { 
    if audioRecorder?.isRecording == false { 
        StopButton.isEnabled = true 

     audioRecorder?.record() 

    } 
    StopButton.isEnabled = true 

    StartButton.isEnabled = false 
    try! startSession() 



} 
    func startSession() throws { 

     if let recognitionTask = speechRecognitionTask { 
      recognitionTask.cancel() 
      self.speechRecognitionTask = nil 

     } 

     let audioSession = AVAudioSession.sharedInstance() 
     try audioSession.setCategory(AVAudioSessionCategoryRecord) 

     speechRecognitionRequest = SFSpeechAudioBufferRecognitionRequest() 

     guard let recognitionRequest = speechRecognitionRequest else { fatalError("SFSpeechAudioBufferRecognitionRequest object creation failed") } 

     guard let inputNode = audioEngine.inputNode else { fatalError("Audio engine has no input node") } 

     recognitionRequest.shouldReportPartialResults = true 

     speechRecognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in 

      var finished = false 

      if let result = result { 
       self.myTextView.text = 
        result.bestTranscription.formattedString 
       finished = result.isFinal 
      } 

      if error != nil || finished { 
       self.audioEngine.stop() 
       inputNode.removeTap(onBus: 0) 

       self.speechRecognitionRequest = nil 
       self.speechRecognitionTask = nil 

       self.StartButton.isEnabled = true 
      } 
     } 

     let recordingFormat = inputNode.outputFormat(forBus: 0) 
     inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in 

      self.speechRecognitionRequest?.append(buffer) 
     } 

     audioEngine.prepare() 
     try audioEngine.start() 
    } 


@IBAction func StopTranscribing(_ sender: Any) { 

      if audioEngine.isRunning { 
     audioEngine.stop() 
     speechRecognitionRequest?.endAudio() 
     StartButton.isEnabled = true 
     StopButton.isEnabled = false 
        } 

    StopButton.isEnabled = false 

    StartButton.isEnabled = true 


    if audioRecorder?.isRecording == true { 
     audioRecorder?.stop() 
    } else { 
     audioPlayer?.stop() 
    } 
    displayAlertMessage(messageToDisplay: "SUCESSFUL") 

} 
func displayAlertMessage(messageToDisplay: String) 
{ 
    let alertController = UIAlertController(title: "Alert", message: messageToDisplay, preferredStyle: .alert) 

    let OKAction = UIAlertAction(title: "OK", style: .default) { (action:UIAlertAction!) in 

     // Code in this block will trigger when OK button tapped. 
     let storyBoard: UIStoryboard = UIStoryboard(name: "Main", bundle: nil) 
     let newView1 = storyBoard.instantiateViewController(withIdentifier: "loggedVC") as! loggedViewController 

     self.present(newView1, animated: true, completion: nil) 

     print("Ok button tapped"); 

    } 
    alertController.addAction(OKAction) 

    self.present(alertController, animated: true, completion:nil) 



} 




    func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) { 
    StartButton.isEnabled = true 
    StopButton.isEnabled = false 

} 

func audioPlayerDecodeErrorDidOccur(_ player: AVAudioPlayer, error: Error?) { 
    print("Audio Play Decode Error") 
} 

func audioRecorderDidFinishRecording(_ recorder: AVAudioRecorder, successfully flag: Bool) { 
} 

func audioRecorderEncodeErrorDidOccur(_ recorder: AVAudioRecorder, error: Error?) { 
    print("Audio Record Encode Error") 
} 
} 
+0

评论不延长讨论;这个谈话已经[转移到聊天](http://chat.stackoverflow.com/rooms/138404/discussion-on-question-by-kalpana-anandan-ios-swift-3-for-speech-recognition)。 –

回答

1

FUNC uploadAudioFireBase(){

 let urlnew = NSURL(fileURLWithPath: Bundle.main.path(forResource: "Song", ofType: "mp3")!) 

do { 
     let audioData = try Data.init(contentsOf: urlnew as URL) 
     let filePath = FIRAuth.auth()!.currentUser!.uid + "/youraudio.mp3" 
     if audioData.count > 0 { 
      self.uploadRecordToFireBaseStorage(audioData: audioData as Data, to: filePath) 
     } 
    } 
    catch { 
     print("Cancel") 
    } 
} 

///上传音频 FUNC uploadRecordToFireBaseStorage(audioData:数据,文件路径来:字符串){

let storageRef = FIRStorage.storage().reference() 

    storageRef.child(filePath).put(audioData, metadata: nil) { (metaData, error) in 
     if let error = error { 

      print("Error uploading: \(error)") 

      print("Upload Failed") 

      return 
     } 
     else { 
      print("Success") 
     } 
    } 
} 
+0

你可以通过你的文件路径,而不是捆绑路径 –

+0

我用你的代码它运行良好,没有任何错误,但它不保存到firebase .... –