SpeechAnalyzer
public convenience init(inputAudioFile: AVFAudio.AVAudioFile, modules: [any Speech.SpeechModule], options: Speech.SpeechAnalyzer.Options? = nil, analysisContext: Speech.AnalysisContext = .init(), finishAfterFile: Swift.Bool = false, volatileRangeChangedHandler: sending ((_ range: CoreMedia.CMTimeRange, _ changedStart: Swift.Bool, _ changedEnd: Swift.Bool) -> Swift.Void)? = nil) async throws
SpeechAnalyzer
public convenience init(inputAudioFile: AVFAudio.AVAudioFile, modules: [any Speech.SpeechModule], options: Speech.SpeechAnalyzer.Options? = nil, analysisContext: Speech.AnalysisContext = .init(), finishAfterFile: Swift.Bool = false, volatileRangeChangedHandler: __owned ((_ range: CoreMedia.CMTimeRange, _ changedStart: Swift.Bool, _ changedEnd: Swift.Bool) -> Swift.Void)? = nil) async throws
SFSpeechLanguageModelConfiguration
- (nonnull instancetype)initWithLanguageModel:(nonnull NSURL *)languageModel vocabulary:(NSURL * _Nullable)vocabulary weight:(NSNumber * _Nullable)weight;