API Differences in Speech in iOS 26.0

It can be difficult to find what changed in Apple SDKs. These pages are algorithmically generated and show what is new in Speech in the iOS 26.0 SDK.

Table of Contents

Protocols

LocaleDependentSpeechModule
SpeechModule
SpeechModuleResult

Classes

AnalysisContext
AssetInstallationRequest
AssetInventory
DictationTranscriber
SpeechDetector
SpeechTranscriber

Structs

AnalyzerInput
Foundation.AttributeScopes
SpeechAttributes
SpeechAnalyzer
Options

Enums

SpeechModels

Typealiases

ReportingOption
public typealias AllCases = [Speech.DictationTranscriber.ReportingOption]
ReportingOption
public typealias AllCases = [Speech.SpeechTranscriber.ReportingOption]
ResultAttributeOption
public typealias AllCases = [Speech.DictationTranscriber.ResultAttributeOption]
ResultAttributeOption
public typealias AllCases = [Speech.SpeechTranscriber.ResultAttributeOption]
SensitivityLevel
public typealias AllCases = [Speech.SpeechDetector.SensitivityLevel]
SensitivityLevel
public typealias RawValue = Swift.Int
TranscriptionOption
public typealias AllCases = [Speech.DictationTranscriber.TranscriptionOption]
TranscriptionOption
public typealias AllCases = [Speech.SpeechTranscriber.TranscriptionOption]

Functions & Methods

Foundation.AttributedString
public func rangeOfAudioTimeRangeAttributes(intersecting timeRange: CoreMedia.CMTimeRange) -> Swift.Range<Foundation.AttributedString.Index>?
SpeechAnalyzer
final public func analyzeSequence(from audioFile: AVFAudio.AVAudioFile) async throws -> CoreMedia.CMTime?
SpeechAnalyzer
final public func start(inputAudioFile audioFile: AVFAudio.AVAudioFile, finishAfterFile: Swift.Bool = false) async throws

Properties & Constants

SFSpeechError.Code
public static var assetLocaleNotAllocated: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var audioDisordered: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var cannotAllocateUnsupportedLocale: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var incompatibleAudioFormats: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var insufficientResources: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var moduleOutputFailed: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var noModel: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var tooManyAssetLocalesAllocated: Speech.SFSpeechError.Code
SFSpeechError.Code
public static var unexpectedAudioFormat: Speech.SFSpeechError.Code
SpeechModuleResult
public var isFinal: Swift.Bool
@_semantics("defaultActor") nonisolated final public var unownedExecutor: _Concurrency.UnownedSerialExecutor
SFSpeechLanguageModelConfiguration
@property(nonatomic, copy, readonly, nullable) NSNumber *weight;

Conformances

AssetInventory.Status
Swift.Hashable
SpeechDetector.SensitivityLevel
Swift.RawRepresentable

Initializers

SpeechAnalyzer
public convenience init(inputAudioFile: AVFAudio.AVAudioFile, modules: [any Speech.SpeechModule], options: Speech.SpeechAnalyzer.Options? = nil, analysisContext: Speech.AnalysisContext = .init(), finishAfterFile: Swift.Bool = false, volatileRangeChangedHandler: sending ((_ range: CoreMedia.CMTimeRange, _ changedStart: Swift.Bool, _ changedEnd: Swift.Bool) -> Swift.Void)? = nil) async throws
SpeechAnalyzer
public convenience init(inputAudioFile: AVFAudio.AVAudioFile, modules: [any Speech.SpeechModule], options: Speech.SpeechAnalyzer.Options? = nil, analysisContext: Speech.AnalysisContext = .init(), finishAfterFile: Swift.Bool = false, volatileRangeChangedHandler: __owned ((_ range: CoreMedia.CMTimeRange, _ changedStart: Swift.Bool, _ changedEnd: Swift.Bool) -> Swift.Void)? = nil) async throws
SFSpeechLanguageModelConfiguration
- (nonnull instancetype)initWithLanguageModel:(nonnull NSURL *)languageModel vocabulary:(NSURL * _Nullable)vocabulary weight:(NSNumber * _Nullable)weight;