短视频平台源码,iOS 仿微信语音输入动画
短视频平台源码,iOS 仿微信语音输入动画实现的相关代码
```handlebars // // PBSpeechRecognizer.h // ParkBest // // Created by summerxx27 on 2018/10/30. // Copyright © 2018年 summerxx27. All rights reserved. // #import <Foundation/Foundation.h> NS_ASSUME_NONNULL_BEGIN @protocol PBSpeechRecognizerProtocol <NSObject> @optional - (void)recognitionSuccess:(NSString *)result; - (void)recognitionFail:(NSString *)result; - (void)level:(float)value; @end @interface PBSpeechRecognizer : NSObject @property(nonatomic,weak) id<PBSpeechRecognizerProtocol> delegate; - (void)startR; - (void)stopR; @end NS_ASSUME_NONNULL_END ```
```handlebars // // PBSpeechRecognizer.m // ParkBest // // Created by summerxx27 on 2018/10/30. // Copyright © 2018年 summerxx27. All rights reserved. // #import "PBSpeechRecognizer.h" #import <Speech/Speech.h> API_AVAILABLE(ios(10.0)) @interface PBSpeechRecognizer() @property (nonatomic, strong) AVAudioEngine *audioEngine; @property (nonatomic, strong) SFSpeechRecognizer *speechRecognizer; @property (nonatomic, strong) SFSpeechAudioBufferRecognitionRequest *recognitionRequest; @property (nonatomic, strong) AVAudioRecorder *recorder; @property (nonatomic, strong) NSTimer *levelTimer; @end @implementation PBSpeechRecognizer - (void)startR { if (!self.speechRecognizer) { // 设置语言 NSLocale *locale = [NSLocale localeWithLocaleIdentifier:@"zh-CN"]; if (@available(iOS 10.0, *)) { self.speechRecognizer = [[SFSpeechRecognizer alloc] initWithLocale:locale]; } else { // Fallback on earlier versions } } if (!self.audioEngine) { self.audioEngine = [[AVAudioEngine alloc] init]; } AVAudioSession *audioSession = [AVAudioSession sharedInstance]; if (@available(iOS 10.0, *)) { [audioSession setCategory:AVAudioSessionCategoryRecord mode:AVAudioSessionModeMeasurement options:AVAudioSessionCategoryOptionDuckOthers error:nil]; } else { // Fallback on earlier versions } [audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil]; if (self.recognitionRequest) { [self.recognitionRequest endAudio]; self.recognitionRequest = nil; } if (@available(iOS 10.0, *)) { self.recognitionRequest = [[SFSpeechAudioBufferRecognitionRequest alloc] init]; } else { // Fallback on earlier versions } self.recognitionRequest.shouldReportPartialResults = YES; // 实时翻译 if (@available(iOS 10.0, *)) { [self.speechRecognizer recognitionTaskWithRequest:self.recognitionRequest resultHandler:^(SFSpeechRecognitionResult * _Nullable result, NSError * _Nullable error) { if (result.isFinal) { NSLog(@"is final: %d result: %@", result.isFinal, result.bestTranscription.formattedString); if ([self.delegate respondsToSelector:@selector(recognitionSuccess:)]) { [self.delegate recognitionSuccess:result.bestTranscription.formattedString]; } }else { if ([self.delegate respondsToSelector:@selector(recognitionFail:)]) { // [self.delegate recognitionFail:error.domain]; } } }]; } else { // Fallback on earlier versions } AVAudioFormat *recordingFormat = [[self.audioEngine inputNode] outputFormatForBus:0]; [[self.audioEngine inputNode] installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when) { [self.recognitionRequest appendAudioPCMBuffer:buffer]; }]; [self.audioEngine prepare]; [self.audioEngine startAndReturnError:nil]; /// 检测声音 [[AVAudioSession sharedInstance] setCategory: AVAudioSessionCategoryPlayAndRecord error: nil]; /// 不需要保存录音文件 NSURL *url = [NSURL fileURLWithPath:@"/dev/null"]; NSDictionary *settings = [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithFloat: 44100.0], AVSampleRateKey, [NSNumber numberWithInt: kAudioFormatAppleLossless], AVFormatIDKey, [NSNumber numberWithInt: 2], AVNumberOfChannelsKey, [NSNumber numberWithInt: AVAudioQualityMax], AVEncoderAudioQualityKey, nil]; NSError *error; _recorder = [[AVAudioRecorder alloc] initWithURL:url settings:settings error:&error]; if (_recorder) { [_recorder prepareToRecord]; _recorder.meteringEnabled = YES; [_recorder record]; _levelTimer = [NSTimer scheduledTimerWithTimeInterval: 1 target: self selector: @selector(levelTimerCallback:) userInfo: nil repeats: YES]; } else { NSLog(@"%@", [error description]); } } /// 开始语音输入后, 开启一个定时器, 来检测声音的大小 - (void)levelTimerCallback:(NSTimer *)timer { [_recorder updateMeters]; float level; // The linear 0.0 .. 1.0 value we need. float minDecibels = -80.0f; // Or use -60dB, which I measured in a silent room. float decibels = [_recorder averagePowerForChannel:0]; if (decibels < minDecibels) { level = 0.0f; } else if (decibels >= 0.0f) { level = 1.0f; } else { float root = 2.0f; float minAmp = powf(10.0f, 0.05f * minDecibels); float inverseAmpRange = 1.0f / (1.0f - minAmp); float amp = powf(10.0f, 0.05f * decibels); float adjAmp = (amp - minAmp) * inverseAmpRange; level = powf(adjAmp, 1.0f / root); } /// level 范围[0 ~ 1], 转为[0 ~120] 之间 /// 通过这个delegate来回调到使用的类中 if ([self.delegate respondsToSelector:@selector(level:)]) { [self.delegate level:120 * level]; } } - (void)stopR { [_levelTimer invalidate]; [[self.audioEngine inputNode] removeTapOnBus:0]; [self.audioEngine stop]; [self.recognitionRequest endAudio]; self.recognitionRequest = nil; } @end ```
通过Value的值来动态切换图片就可以了, 或者不使用图片而自己绘制话筒旁边的小横线.
```handlebars - (void)level:(float)value { if (0 < value && value < 10) { _voiceView.image = [UIImage imageNamed:@"v_1"]; }else if (value > 10 && value < 20) { _voiceView.image = [UIImage imageNamed:@"v_2"]; }else if (value > 20 && value < 25) { _voiceView.image = [UIImage imageNamed:@"v_3"]; }else if (value > 25 && value < 35) { _voiceView.image = [UIImage imageNamed:@"v_4"]; }else if (value > 35 && value < 45) { _voiceView.image = [UIImage imageNamed:@"v_5"]; }else if (value > 45 ) { _voiceView.image = [UIImage imageNamed:@"v_6"]; } } ```
这里是长按方法
```handlebars - (void)longPress:(UILongPressGestureRecognizer *)gestureRecognizer{ CGPoint point = [gestureRecognizer locationInView:self.view]; if(gestureRecognizer.state == UIGestureRecognizerStateBegan) { [self startRecording]; } else if(gestureRecognizer.state == UIGestureRecognizerStateEnded) { [self stopRecording]; } else if(gestureRecognizer.state == UIGestureRecognizerStateChanged) { NSLog(@"y ========== %f", point.y); /// 判断y滑动到一定的值, 且取消语音的识别, 这里可以通过逻辑简单控制下 if (point.y < 513) { _cancel = @"yes"; NSLog(@"voice cencel"); } } else if (gestureRecognizer.state == UIGestureRecognizerStateFailed) { } else if (gestureRecognizer.state == UIGestureRecognizerStateCancelled) { } } ```
以上就是 短视频平台源码,iOS 仿微信语音输入动画实现的相关代码,更多内容欢迎关注之后的文章
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· AI与.NET技术实操系列:基于图像分类模型对图像进行分类
· go语言实现终端里的倒计时
· 如何编写易于单元测试的代码
· 10年+ .NET Coder 心语,封装的思维:从隐藏、稳定开始理解其本质意义
· .NET Core 中如何实现缓存的预热?
· 分享一个免费、快速、无限量使用的满血 DeepSeek R1 模型,支持深度思考和联网搜索!
· 基于 Docker 搭建 FRP 内网穿透开源项目(很简单哒)
· ollama系列01:轻松3步本地部署deepseek,普通电脑可用
· 25岁的心里话
· 按钮权限的设计及实现