Commit cbe2f15d authored by lmj_521aiau@163.com's avatar lmj_521aiau@163.com

no message

parent 1d38dd26
......@@ -72,6 +72,9 @@ class SHAVAudioManager: NSObject, SFSpeechRecognizerDelegate {
private var recorderTimer: SHTimer?
var resultTxts: [String] = []
var bestRestlt: String = ""
var startTime: CFTimeInterval = CACurrentMediaTime()
var recoderResiltCallBack:((String)->Void)?
private func configRecorder(){
......@@ -104,21 +107,24 @@ class SHAVAudioManager: NSObject, SFSpeechRecognizerDelegate {
// 用于检查识别是否结束
var isFinal = false
// 如果 result 不是 nil,
var ss = ""
if result != nil {
ss = result?.bestTranscription.formattedString ?? ""
print("result?.bestTranscription.formattedString ==== \(ss)")
if result != nil {
self.bestRestlt = result?.bestTranscription.formattedString ?? ""
// print("result?.bestTranscription.formattedString ==== \(self.bestRestlt)")
self.resultRecoderTxt(self.resultTxts, self.bestRestlt)
// 如果 result 是最终,将 isFinal 设置为 true
isFinal = (result?.isFinal)!
}
// 如果没有错误发生,或者 result 已经结束,停止audioEngine 录音,终止 recognitionRequest 和 recognitionTask
if error != nil || isFinal {
if ss.length > 0 {
self.resultTxts.append(ss)
print("self.resultTxts ==== \(self.resultTxts)")
if self.bestRestlt.length > 0 {
self.resultTxts.append(self.bestRestlt)
self.bestRestlt = ""
// print("self.resultTxts ==== \(self.resultTxts)")
self.resultRecoderTxt(self.resultTxts)
self.configSpeechTask()
}
}
})
......@@ -143,51 +149,64 @@ class SHAVAudioManager: NSObject, SFSpeechRecognizerDelegate {
self.monitor?.updateMeters()
// 获得0声道的音量,完全没有声音-160.0,0是最大音量
let decibels = (self.monitor?.peakPower(forChannel: 0))!
print("decibels == \(decibels)")
if decibels > -24 {
// print("decibels == \(decibels)")
if decibels > -44 {
if recognitionTask?.isCancelled == true {
start()
}
}else{
// recorderTimer?.invalidate()
// audioEngine.stop()
// recognitionRequest = nil
recognitionTask = nil
recorderDataSourceHandler()
let endTime: CFTimeInterval = CACurrentMediaTime()
if endTime - startTime > 3 {
pause()
go_on()
}
}
func recorderDataSourceHandler(){
// start()
// recorderTimer?.invalidate()
// monitor?.stop()
// monitor?.deleteRecording()
// recorder?.stop()
// recorder?.deleteRecording()
}
func start(){
// if recognitionTask?.state{
//
// }
self.configRecorder()
self.configSpeechTask()
// self.recorder?.record()
self.monitor?.record()
startTime = CACurrentMediaTime()
recorderTimer = SHTimer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(updateMeters), userInfo: nil, repeats: true)
}
func go_on(){
startTime = CACurrentMediaTime()
recorderTimer = SHTimer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(updateMeters), userInfo: nil, repeats: true)
}
func pause(){
// monitor?.stop()
// monitor?.deleteRecording()
// recorder?.stop()
// recorder?.deleteRecording()
private func pause(){
recorderTimer?.invalidate()
recognitionTask?.cancel()
}
func manualPause(){
recorderTimer?.invalidate()
recognitionTask?.cancel()
self.resultTxts.append("\n")
}
func stop(){
// monitor?.stop()
// monitor?.deleteRecording()
// recorder?.stop()
// recorder?.deleteRecording()
pause()
monitor?.stop()
recognitionTask = nil
audioEngine.stop()
}
recorderTimer?.invalidate()
func resultRecoderTxt(_ results:[String], _ processStr:String = ""){
let recorderResult = results.reduce("", {
if $0.length == 0{
return $0 + $1
}else{
return $0 + " " + $1
}}) + " " + processStr
print("recorderResult ===== \(recorderResult)")
recoderResiltCallBack?(recorderResult)
}
}
......@@ -56,7 +56,7 @@ class SHRecordListViewController: SHBaseViewController {
override func viewDidLoad() {
super.viewDidLoad()
// SHAVAudioManager.shared.start()
SHAVAudioManager.shared.start()
let url = FileManager.default.url(forUbiquityContainerIdentifier: nil)
print(url as Any)
......
......@@ -53,29 +53,29 @@ class SHRecordViewController: SHBaseViewController{
var image_file_paths: [String] = []
var image_indexs: [NSInteger] = []
var session: AVAudioSession {
let session:AVAudioSession = AVAudioSession.sharedInstance()
do {
try session.setCategory(AVAudioSession.Category.playAndRecord, options: .defaultToSpeaker)
}catch{
print("session config failed")
}
return session
}
lazy var recorder: AVAudioRecorder? = self.getRecorder()
// var session: AVAudioSession {
// let session:AVAudioSession = AVAudioSession.sharedInstance()
// do {
// try session.setCategory(AVAudioSession.Category.playAndRecord, options: .defaultToSpeaker)
// }catch{
// print("session config failed")
// }
// return session
// }
// lazy var recorder: AVAudioRecorder? = self.getRecorder()
var recorder_mp3: SHMp3RecordManager = SHMp3RecordManager.shared()
// 创建语音识别器,指定语音识别的语言环境 locale ,将来会转化为什么语言,这里是使用的当前区域,那肯定就是简体汉语啦
// private let speechRecognizer = SFSpeechRecognizer(locale: Locale.autoupdatingCurrent)
private let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "zh-CN"))
// 发起语音识别请求,为语音识别器指定一个音频输入源,这里是在音频缓冲器中提供的识别语音。
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
// 语音识别任务,可监控识别进度。通过他可以取消或终止当前的语音识别任务
private var recognitionTask: SFSpeechRecognitionTask?
// 语音引擎,负责提供录音输入
private var audioEngine = AVAudioEngine()
// // 创建语音识别器,指定语音识别的语言环境 locale ,将来会转化为什么语言,这里是使用的当前区域,那肯定就是简体汉语啦
//// private let speechRecognizer = SFSpeechRecognizer(locale: Locale.autoupdatingCurrent)
// private let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "zh-CN"))
//
// // 发起语音识别请求,为语音识别器指定一个音频输入源,这里是在音频缓冲器中提供的识别语音。
// private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
// // 语音识别任务,可监控识别进度。通过他可以取消或终止当前的语音识别任务
// private var recognitionTask: SFSpeechRecognitionTask?
// // 语音引擎,负责提供录音输入
// private var audioEngine = AVAudioEngine()
// 文本数据
private var recognitionTaskText: [String] = []
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment