Skip to content

Speech iOS xcode15.0 b1

Haritha Mohan edited this page Sep 20, 2023 · 3 revisions

#Speech.framework https://github.com/xamarin/xamarin-macios/pull/19075

diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFAnalysisContext_Private.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFAnalysisContext_Private.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFAnalysisContext_Private.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFAnalysisContext_Private.h	2023-05-19 21:38:42
@@ -0,0 +1,66 @@
+//
+//  SFAnalysisContext_Private.h
+//  Speech
+//
+//  Copyright © 2023 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+NS_ASSUME_NONNULL_BEGIN
+
+@class AnalysisContext;
+
+extern NSString *const SFAnalysisContextTagLeftContext;
+extern NSString *const SFAnalysisContextTagRightContext;
+extern NSString *const SFAnalysisContextTagSelectedText;
+extern NSString *const SFAnalysisContextTagGeoLMRegionID;
+extern NSString *const SFAnalysisContextTagContextualNamedEntities;
+/*
+ */
+
+NS_SWIFT_SENDABLE
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFContextualNamedEntity : NSObject
+// Wrapper around ContextualNamedEntity.
+
+- (instancetype)initWithPersonalizationPortraitName:(NSString *)name
+                                              score:(double)score
+                                           category:(NSUInteger)category
+                                           language:(NSString *)language;
+
+- (instancetype)initWithPeopleSuggesterRecipientDisplayName:(NSString *)displayName;
+
+- (instancetype)init NS_UNAVAILABLE;
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFAnalysisContext : NSObject
+// Wrapper around AnalysisContext.
+
+- (instancetype)initWithAnalysisContext:(AnalysisContext *)analysisContext;
+
+- (instancetype)init NS_UNAVAILABLE;
+
+/// Returns nil if the key is not found.
+- (NSArray<NSString *> *_Nullable)contextualStringsForKey:(NSString *)key;
+/// @param contextualStrings Pass nil to remove the key and associated value.
+- (void)setContextualStrings:(NSArray<NSString *>*_Nullable)contextualStrings forKey:(NSString *)key;
+
+/// Returns nil if the key is not found.
+- (id _Nullable)userDataForKey:(NSString *)key;
+/**
+    The user data should be `Sendable`. In particular, it should be a property list type, that is,
+    a type returned by `StringProtocol.propertyList()`.
+    @param userData Pass nil to remove the key and associated value.
+ */
+///
+- (void)setUserData:(id _Nullable)userData forKey:(NSString *)key;
+
+@property (nonatomic, copy, nullable) NSString *geoLMRegionID;
+@property (nonatomic, copy, nullable) NSArray<_SFContextualNamedEntity *> *contextualNamedEntities;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFCommandRecognizerArgumentPresence.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFCommandRecognizerArgumentPresence.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFCommandRecognizerArgumentPresence.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFCommandRecognizerArgumentPresence.h	2023-05-19 21:38:42
@@ -0,0 +1,17 @@
+//
+//  SFCommandRecognizerArgumentPresence.h
+//  Speech
+//
+//  Copyright © 2023 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import <stdint.h>
+
+NS_SWIFT_SENDABLE
+typedef NS_ENUM(int8_t, SFCommandRecognizerArgumentPresence) {
+    SFCommandRecognizerArgumentPresencePresentAndDelimited = 0,
+    SFCommandRecognizerArgumentPresencePresentMaybeIncomplete = 1,
+    SFCommandRecognizerArgumentPresenceMissingMaybeExpected = 2,
+    SFCommandRecognizerArgumentPresenceMissing = 3
+} API_AVAILABLE(macos(14), ios(17), macCatalyst(17));
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFEARResultType_Private.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFEARResultType_Private.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFEARResultType_Private.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFEARResultType_Private.h	2023-05-19 21:38:41
@@ -0,0 +1,27 @@
+//
+//  SFEARResultType_Private.h
+//  Speech
+//
+//  Copyright © 2023 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+NS_SWIFT_SENDABLE
+typedef NS_ENUM(NSUInteger, _SFEARResultType) {
+    /// An EAR partial (or preliminary) result, without alternatives or confidences.
+    _SFEARResultTypePartial = 0,
+
+    /// A result candidate for an incomplete utterance.
+    _SFEARResultTypeCandidate = 1,
+
+    /// The final (generally non-volatile) result for a span of audio or utterance, but not the final (terminal) result of the recognition. Accompanied by a change to the transcriber's volatile range. (The EAR package's `isFinal` flag is false.)
+    _SFEARResultTypeFinal = 2,
+
+    /// The final, non-volatile and terminal, result for the recognition. Accompanied by a change to the transcriber's volatile range. (The EAR package's `isFinal` flag is true.)
+    _SFEARResultTypeFinalAndTerminal = 3,
+    
+    /// An otherwise-empty result indicating that recognition has paused.
+    _SFEARResultTypePauseConfirmation = 4
+
+} API_AVAILABLE(macos(14), ios(17), macCatalyst(17));
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h	2023-05-19 21:38:40
@@ -0,0 +1,52 @@
+//
+//  SFErrors.h
+//  SpeechRecognition
+//
+//  Created by Donovan Voss on 10/25/22.
+//  Copyright © 2022 Apple, Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+NS_ASSUME_NONNULL_BEGIN
+
+extern NSErrorDomain const SFSpeechErrorDomain;
+typedef NS_ERROR_ENUM (SFSpeechErrorDomain, SFSpeechErrorCode) {
+    /** Error may include `NSUnderlyingErrorKey` in `userInfo`.*/
+    SFSpeechErrorCodeInternalServiceError,
+
+    /** Audio input timestamp overlaps or precedes prior audio input. */
+    SFSpeechErrorCodeAudioDisordered,
+    
+    /**
+     Audio input is in unexpected format.
+     
+     The back end may be able to cope by loading matching models on demand, but this is still an error the client really ought to resolve on its end.
+     */
+    SFSpeechErrorCodeUnexpectedAudioFormat,
+    
+    /** Selected locale/options does not have an appropriate model available or downloadable. */
+    SFSpeechErrorCodeNoModel,
+    
+    /** The selected modules do not have an audio format in common. */
+    SFSpeechErrorCodeIncompatibleAudioFormats,
+  
+    /** Querying the SpeechRecognizerWorker's JIT LME profile failed. */
+    SFSpeechErrorCodeInvalidJitProfile,
+    
+    // MARK: CustomLM data related errors
+    /** templates were malformed **/
+    SFSpeechErrorCodeUndefinedTemplateClassName,
+
+    /** A custom language model file was malformed **/
+    SFSpeechErrorCodeMalformedSupplementalModel,
+    
+    /** An abstract base class method was called **/
+    SFSpeechErrorCodeUnimplementedFunctionality,
+    
+    // MARK: Objective-C output wrapper related errors
+    /** Module's result task failed  **/
+    SFSpeechErrorCodeModuleOutputFailed
+};
+
+NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFInputSequencer_Private.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFInputSequencer_Private.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFInputSequencer_Private.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFInputSequencer_Private.h	2023-05-19 21:38:41
@@ -0,0 +1,20 @@
+//
+//  SFInputSequencer_Private.h
+//  Speech
+//
+//  Created by Festus Ojo on 1/10/23.
+//  Copyright © 2023 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import <AVFoundation/AVFoundation.h>
+
+@class SpeechAnalyzerClientInputSequencerInternal;
+
+@interface _SFInputSequencer : NSObject
+-(instancetype _Nonnull)init;
+-(void)addAudio:(AVAudioPCMBuffer *_Nonnull)audioBuffer;
+-(void)finishAudio;
+
+@property (nonatomic, readonly, strong, nonnull) SpeechAnalyzerClientInputSequencerInternal *underlyingObject;
+@end
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzerOutputWrapper_Private.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzerOutputWrapper_Private.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzerOutputWrapper_Private.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzerOutputWrapper_Private.h	2023-05-19 21:38:40
@@ -0,0 +1,13 @@
+//
+//  SFSpeechAnalyzerOutputWrapper_Private.h
+//  Speech
+//
+//  Created by Festus Ojo on 12/22/22.
+//  Copyright © 2022 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+NS_ASSUME_NONNULL_BEGIN
+
+NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzer_Private.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzer_Private.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzer_Private.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechAnalyzer_Private.h	2023-05-19 21:38:41
@@ -0,0 +1,408 @@
+//
+//  SFSpeechAnalyzer_Private.h
+//  Speech
+//
+//  Copyright © 2023 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+#import <AVFoundation/AVFoundation.h>
+#import <CoreMedia/CoreMedia.h>
+#import <Speech/SFCommandRecognizerArgumentPresence.h>
+#import <Speech/SFEARResultType_Private.h>
+#import <Speech/SFSpeechAnalyzerOutputWrapper_Private.h>
+#import <Speech/SFSpeechRecognitionTaskHint.h>
+#import <Speech/SFTranscriptionOptions_Private.h>
+#import <Speech/SFTranscriptionResultAttributeOptions_Private.h>
+
+NS_ASSUME_NONNULL_BEGIN
+
+@class EARVoiceCommandActiveSet;
+@class _SFAnalysisContext;
+@class _SFContextualNamedEntity;
+@class _SFAnalyzerTranscriptionSegment;
+@class _SFCommandRecognizerArgument;
+@class _SFCommandRecognizerInterpretation;
+@class _STCommandRecognizerResult;
+@class _SFEndpointingResult;
+@class _SFInputSequencer;
+@class _SFModelDownloadRequest;
+@class _SFSpeechAnalyzerCommandRecognizerOptions;
+@class _SFSpeechAnalyzerOptions;
+@class _SFSpeechAnalyzerOptionsLoggingInfo;
+@class _SFSpeechAnalyzerOptionsPowerContext;
+@class _SFSpeechAnalyzerTranscriberOptions;
+@class _SFTranscriberModelOptions;
+@class _STTranscriberMultisegmentResult;
+@class _SFTranscriberResult;
+@class _SFToken;
+@protocol _SFSpeechAnalyzerTranscriberResultDelegate;
+@protocol _SFSpeechAnalyzerEndpointingResultDelegate;
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFSpeechAnalyzer : NSObject
+
++ (nullable _SFModelDownloadRequest *)modelDownloadRequestForClientIdentifier:(NSString *)clientIdentifier
+        transcriberOptions:(_SFSpeechAnalyzerTranscriberOptions *)transcriberOptions;
+
+/**
+    @param transcriberOptions `transcriptionOptions` should not contain
+    `_SFTranscriptionOptionsNormalizedTranscription` or `_SFTranscriptionOptionsContextualizedTranscription`.
+    Both types of output will be provided.
+ */
+- (instancetype)initWithClientIdentifier:(NSString *)clientIdentifier
+        inputSequence:(_SFInputSequencer *)inputSequence
+        audioFormat:(AVAudioFormat *)audioFormat
+        transcriberResultDelegate:(id<_SFSpeechAnalyzerTranscriberResultDelegate>)transcriberResultDelegate
+        endpointingResultDelegate:(id<_SFSpeechAnalyzerEndpointingResultDelegate>)endpointingResultDelegate
+        queue:(NSOperationQueue *)queue
+        transcriberOptions:(_SFSpeechAnalyzerTranscriberOptions *)transcriberOptions
+        commandRecognizerOptions:(_SFSpeechAnalyzerCommandRecognizerOptions *)commandRecognizerOptions
+        options:(_SFSpeechAnalyzerOptions *_Nullable)options
+        restrictedLogging:(BOOL)restrictedLogging
+        geoLMRegionID:(NSString *_Nullable)geoLMRegionID
+        contextualNamedEntities:(NSArray <_SFContextualNamedEntity *> *_Nullable)contextualNamedEntities
+        didChangeVolatileRange:(void (^ _Nullable)(CMTimeRange, BOOL, BOOL))didChangeVolatileRange;
+
+// TODO: Remove once Core Speech updated to use initializer with contextualNamedEntities and w/o personalizedLMPath.
+- (instancetype)initWithClientIdentifier:(NSString *)clientIdentifier
+        inputSequence:(_SFInputSequencer *)inputSequence
+        audioFormat:(AVAudioFormat *)audioFormat
+        transcriberResultDelegate:(id<_SFSpeechAnalyzerTranscriberResultDelegate>)transcriberResultDelegate
+        endpointingResultDelegate:(id<_SFSpeechAnalyzerEndpointingResultDelegate>)endpointingResultDelegate
+        queue:(NSOperationQueue *)queue
+        transcriberOptions:(_SFSpeechAnalyzerTranscriberOptions *)transcriberOptions
+        commandRecognizerOptions:(_SFSpeechAnalyzerCommandRecognizerOptions *)commandRecognizerOptions
+        options:(_SFSpeechAnalyzerOptions *_Nullable)options
+        restrictedLogging:(BOOL)restrictedLogging
+        geoLMRegionID:(NSString *_Nullable)geoLMRegionID
+        personalizedLMPath:(NSString *_Nullable)personalizedLMPath
+        didChangeVolatileRange:(void (^ _Nullable)(CMTimeRange, BOOL, BOOL))didChangeVolatileRange;
+
+// TODO: Remove once Core Speech updated to use initializer w/o personalizedLMPath.
+- (instancetype)initWithClientIdentifier:(NSString *)clientIdentifier
+        inputSequence:(_SFInputSequencer *)inputSequence
+        audioFormat:(AVAudioFormat *)audioFormat
+        transcriberResultDelegate:(id<_SFSpeechAnalyzerTranscriberResultDelegate>)transcriberResultDelegate
+        endpointingResultDelegate:(id<_SFSpeechAnalyzerEndpointingResultDelegate>)endpointingResultDelegate
+        queue:(NSOperationQueue *)queue
+        transcriberOptions:(_SFSpeechAnalyzerTranscriberOptions *)transcriberOptions
+        commandRecognizerOptions:(_SFSpeechAnalyzerCommandRecognizerOptions *)commandRecognizerOptions
+        options:(_SFSpeechAnalyzerOptions *_Nullable)options
+        restrictedLogging:(BOOL)restrictedLogging
+        geoLMRegionID:(NSString *_Nullable)geoLMRegionID
+        contextualNamedEntities:(NSArray <_SFContextualNamedEntity *> *_Nullable)contextualNamedEntities
+        personalizedLMPath:(NSString *_Nullable)personalizedLMPath
+        didChangeVolatileRange:(void (^ _Nullable)(CMTimeRange, BOOL, BOOL))didChangeVolatileRange;
+
+- (instancetype)init NS_UNAVAILABLE;
+
+// The input sequence cannot be changed after the analyzer is created.
+
+@property (readonly) _SFInputSequencer *inputSequence;
+
+// Does not include bestAudioFormat and bestAvailableAudioFormat, because ESConnection
+// expects to be able to use 8k and 16k sampling rates.
+
+// No getter for volatileRange.
+
+// TODO: inputAudioFormat getter?
+
+- (void)cancelInputTask;
+/// The time value will be invalid if there is no start time. Use `CMTIME_IS_INVALID` to check.
+- (void)getNextBufferStartTimeWithCompletion:(void (^)(CMTime))completion;
+
+- (void)setDidChangeVolatileRange:(void (^ _Nullable)(CMTimeRange, BOOL, BOOL))handler completion:(void (^)(void))completion;
+
+- (void)cancelPendingResultsAndPauseWithCompletion:(void (^)(NSError *_Nullable))completion;
+
+- (void)resumeWithCompletion:(void (^)(NSError *_Nullable))completion;
+
+- (void)finalizeWithCompletion:(void (^)(NSError *_Nullable))completion;
+
+/// @param time If invalid, it's the same as omitting it.
+- (void)finalizeThrough:(CMTime)time completion:(void (^)(NSError *_Nullable))completion;
+
+- (void)finalizeAndFinishWithCompletion:(void (^)(NSError *_Nullable))completion;
+
+/// @param time If invalid, it's the same as omitting it.
+- (void)finalizeAndFinishThrough:(CMTime)time completion:(void (^)(NSError *_Nullable))completion;
+
+- (void)finalizeAndFinishThroughEndOfInputWithCompletion:(void (^)(NSError *_Nullable))completion;
+
+/// @param times An array of CMTime values.
+- (void)requestResultAtEndpointTimes:(NSArray<NSValue *> *)times;
+
+- (void)getModelInfoTasksWithCompletion:(void (^)(NSSet<NSString *> *))completion;
+- (void)getModelInfoLanguageWithCompletion:(void (^)(NSString *))completion;
+- (void)getRecognitionStatisticsWithCompletion:(void (^)(NSDictionary *))completion;
+- (void)getRecognitionUtterenceStatisticsWithCompletion:(void (^)(NSDictionary *))completion;
+
+/**
+    Each time this is called, a new object is returned. However, different `_SFAnalysisContext` objects
+    may or may not refer to the same `AnalysisContext` object, and there's no way to tell. This will be fixed later.
+*/
+- (void)getContextWithCompletion:(void (^)(_SFAnalysisContext *))completion;
+
+- (void)prepareToAnalyzeReportingInto:(NSProgress *_Nullable)progress
+        completion:(void (^)(NSError *_Nullable))completion;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+NS_SWIFT_SENDABLE
+@protocol _SFSpeechAnalyzerTranscriberResultDelegate
+- (void)speechAnalyzer:(_SFSpeechAnalyzer *)speechAnalyzer
+        didProduceTranscriberResult:(_SFTranscriberResult *)transcriberResult;
+- (void)speechAnalyzer:(_SFSpeechAnalyzer *)speechAnalyzer
+        didStopTranscriptionWithError:(NSError *)error;
+// TODO: Can remove @optional once Core Speech updated to implement delegate.
+@optional
+- (void)speechAnalyzerDidProduceAllTranscriberResults:(_SFSpeechAnalyzer *)speechAnalyzer;
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+NS_SWIFT_SENDABLE
+@protocol _SFSpeechAnalyzerEndpointingResultDelegate
+- (void)speechAnalyzer:(_SFSpeechAnalyzer *)speechAnalyzer
+        didProduceEndpointingResult:(_SFEndpointingResult *)endpointingResult;
+- (void)speechAnalyzer:(_SFSpeechAnalyzer *)speechAnalyzer
+        didStopEndpointingWithError:(NSError *)error;
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFModelDownloadRequest : NSObject
+// Wrapper for ModelDownloadRequest.
+
+@property (nonatomic, readonly) NSProgress *progress;
+
+- (void)downloadWithCompletion:(void (^)(NSError *_Nullable))completion;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFSpeechAnalyzerTranscriberOptions : NSObject
+
+@property (nonatomic, copy) NSLocale *locale;
+@property (nonatomic) SFSpeechRecognitionTaskHint taskHint;
+@property (nonatomic, copy, nullable) _SFTranscriberModelOptions *modelOptions;
+@property (nonatomic) _SFTranscriptionOptions transcriptionOptions;
+@property (nonatomic) _SFTranscriptionResultAttributeOptions attributeOptions;
+
+// TODO: add reportingOptions?
+
+- (instancetype)init;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFSpeechAnalyzerCommandRecognizerOptions : NSObject
+
+@property (nonatomic, copy) EARVoiceCommandActiveSet *voiceCommandActiveSet;
+
+- (instancetype)initWithVoiceCommandActiveSet:(EARVoiceCommandActiveSet *)voiceCommandActiveSet;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFSpeechAnalyzerOptions : NSObject<NSCopying>
+// corresponds to AnalysisOptions
+
+// This is similar to _SFAnalysisOptions, but that's used for XPC, and is not a part of the API.
+
+@property (nonatomic, readonly) BOOL highPriority;
+@property (nonatomic, readonly, copy, nullable) _SFSpeechAnalyzerOptionsLoggingInfo *loggingInfo;
+@property (nonatomic, readonly, copy, nullable) _SFSpeechAnalyzerOptionsPowerContext *powerContext;
+
+- (instancetype)initWithHighPriority:(BOOL)highPriority
+        loggingInfo:(_SFSpeechAnalyzerOptionsLoggingInfo *_Nullable)loggingInfo
+        powerContext:(_SFSpeechAnalyzerOptionsPowerContext *_Nullable)powerContext;
+
+@end
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFSpeechAnalyzerOptionsLoggingInfo : NSObject<NSCopying>
+
+@property (nonatomic, readonly, copy) NSUUID *asrID;
+@property (nonatomic, readonly, copy) NSUUID *requestID;
+
+- (instancetype)initWithAsrID:(NSUUID *)asrID
+        requestID:(NSUUID *)requestID;
+
+@end
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFSpeechAnalyzerOptionsPowerContext : NSObject<NSCopying>
+
+@property (nonatomic, readonly, copy) NSString *ane;
+@property (nonatomic, readonly, copy) NSString *cpu;
+@property (nonatomic, readonly, copy) NSString *gpu;
+
+- (instancetype)initWithAne:(NSString *)ane
+        cpu:(NSString *)cpu
+        gpu:(NSString *)gpu;
+
+@end
+
+
+@interface _SFTranscriberModelOptions : NSObject<NSCopying>
+// corresponds to Transcriber.ModelOptions
+
+@property (nonatomic, readonly, copy, nullable) NSURL *supplementalModelURL;
+@property (nonatomic, readonly) BOOL farField;
+@property (nonatomic, readonly, copy, nullable) NSURL *modelOverrideURL;
+@property (nonatomic, readonly, copy, nullable) NSString *taskForMemoryLock;
+@property (nonatomic, readonly, copy) NSArray<NSURL *> *speechProfileURLs;
+
+- (instancetype)initWithSupplementalModelURL:(NSURL *_Nullable)supplementalModelURL
+        farField:(BOOL)farField
+        modelOverrideURL:(NSURL *_Nullable)modelOverrideURL
+        speechProfileURLs:(NSArray<NSURL *> *)speechProfileURLs
+        taskForMemoryLock:(NSString *_Nullable)taskForMemoryLock;
+
+// TODO: Remove once CESA provides speechProfileURLs parameter.
+- (instancetype)initWithSupplementalModelURL:(NSURL *_Nullable)supplementalModelURL
+        farField:(BOOL)farField
+        modelOverrideURL:(NSURL *_Nullable)modelOverrideURL
+        taskForMemoryLock:(NSString *_Nullable)taskForMemoryLock;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFTranscriberResult : NSObject
+@property (nonatomic, readonly) CMTimeRange range;
+@property (nonatomic, readonly, copy) _STTranscriberMultisegmentResult *normalizedTranscriberMultisegmentResult;
+@property (nonatomic, readonly, copy, nullable) _STCommandRecognizerResult *normalizedCommandRecognizerResult;
+@property (nonatomic, readonly, copy) _STTranscriberMultisegmentResult *contextualizedTranscriberMultisegmentResult;
+@property (nonatomic, readonly, copy, nullable) _STCommandRecognizerResult *contextualizedCommandRecognizerResult;
+
+- (instancetype)initWithRange:(CMTimeRange)range
+        normalizedTranscriberMultisegmentResult:(_STTranscriberMultisegmentResult *)normalizedTranscriberMultisegmentResult
+        normalizedCommandRecognizerResult:(nullable _STCommandRecognizerResult *)normalizedCommandRecognizerResult
+        contextualizedTranscriberMultisegmentResult:(_STTranscriberMultisegmentResult *)contextualizedTranscriberMultisegmentResult
+        contextualizedCommandRecognizerResult:(nullable _STCommandRecognizerResult *)contextualizedCommandRecognizerResult;
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _STTranscriberMultisegmentResult : NSObject<NSCopying>
+// corresponds to Transcriber.MultisegmentResult, except for the range property.
+
+@property (nonatomic, readonly, copy) NSArray<_SFAnalyzerTranscriptionSegment *> *segments;
+@property (nonatomic, readonly, copy) NSArray<NSArray<_SFToken *> *> *transcriptions;
+@property (nonatomic, readonly) _SFEARResultType earResultType;
+@property (nonatomic, readonly, copy) NSArray<NSIndexPath *> *nBestChoices;
+@property (nonatomic, readonly) CMTimeRange recognitionAudioRange;
+
+- (instancetype)initWithSegments:(NSArray<_SFAnalyzerTranscriptionSegment *> *)segments
+        transcriptions:(NSArray<NSArray<_SFToken *> *> *)transcriptions
+        earResultType:(_SFEARResultType)earResultType
+        nBestChoices:(NSArray<NSIndexPath *> *)nBestChoices
+        recognitionAudioRange:(CMTimeRange)recognitionAudioRange;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFAnalyzerTranscriptionSegment : NSObject
+// corresponds to TranscriptionSegment
+// Similar to _SFTranscriptionSegment, but in a different format.
+
+@property (nonatomic, readonly, copy) NSArray<_SFToken *> *text;
+@property (nonatomic, readonly, copy) NSArray<NSArray<_SFToken *> *> *alternatives;
+
+- (instancetype)initWithText:(NSArray<_SFToken *> *)text
+        alternatives:(NSArray<NSArray<_SFToken *> *> *)alternatives;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _STCommandRecognizerResult : NSObject<NSCopying>
+// Corresponds to CommandRecognizer.Result, except for the range property.
+
+- (instancetype)initWithTranscriptionCommands:(NSArray<NSArray<_SFCommandRecognizerInterpretation *> *> *)transcriptionCommands;
+
+@property (nonatomic, readonly) NSArray<NSArray<_SFCommandRecognizerInterpretation *> *> *transcriptionCommands;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFCommandRecognizerInterpretation : NSObject
+// corresponds to CommandRecognizer.Interpretation
+
+@property (nonatomic, readonly, copy) NSString *commandIdentifier;
+@property (nonatomic, readonly, copy) NSSet<NSString *> *suiteIdentifiers;
+@property (nonatomic, readonly) NSRange range;
+@property (nonatomic, readonly, copy) NSIndexSet *verbIndexes;
+@property (nonatomic, readonly, copy) NSArray<_SFCommandRecognizerArgument *> *arguments;
+
+- (instancetype)initWithCommandIdentifier:(NSString *)commandIdentifier
+        suiteIdentifiers:(NSSet<NSString *> *)suiteIdentifiers
+        range:(NSRange)range
+        verbIndexes:(NSIndexSet *)verbIndexes
+        arguments:(NSArray<_SFCommandRecognizerArgument *> *)arguments;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFCommandRecognizerArgument : NSObject
+// corresponds to CommandRecognizer.Argument
+
+@property (nonatomic, readonly) SFCommandRecognizerArgumentPresence presence;
+@property (nonatomic, readonly, copy) NSIndexSet *indexes;
+@property (nonatomic, readonly, copy) NSIndexSet *adpositionIndexes;
+
+- (instancetype)initWithPresence:(SFCommandRecognizerArgumentPresence)presence
+        indexes:(NSIndexSet *)indexes
+        adpositionIndexes:(NSIndexSet *)adpositionIndexes;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFEndpointingResult : NSObject
+// corresponds to EndpointDetector.ModuleOutput
+
+@property (nonatomic, readonly) CMTimeRange range;
+@property (nonatomic, readonly) NSInteger wordCount;
+@property (nonatomic, readonly) double eosLikelihood;
+@property (nonatomic, readonly, copy) NSArray<NSNumber *> *pauseCounts;
+@property (nonatomic, readonly) double silencePosterior;
+
+- (instancetype)initWithRange:(CMTimeRange)range
+        wordCount:(NSInteger)wordCount
+        eosLikelihood:(double)eosLikelihood
+        pauseCounts:(NSArray<NSNumber *> *)pauseCounts
+        silencePosterior:(double)silencePosterior;
+
+@end
+
+
+API_AVAILABLE(macos(14), ios(17), macCatalyst(17))
+@interface _SFToken : NSObject<NSCopying>
+
+@property(nonatomic, readonly, copy) NSString *text;
+@property(nonatomic, readonly) double confidence;
+@property(nonatomic, readonly) double startTime;
+@property(nonatomic, readonly) double duration;
+
+- (instancetype)initWithText:(NSString *)text
+                    confidence:(double)confidence
+                    startTime:(double)startTime
+                    duration:(double)duration;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h	2023-05-19 21:38:41
@@ -0,0 +1,35 @@
+//
+//  SFSpeechLanguageModel.h
+//  Speech
+//
+//  Created by Ethan Peters on 8/1/22.
+//  Copyright © 2022 Apple, Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+NS_ASSUME_NONNULL_BEGIN
+
+API_AVAILABLE(ios(17), macos(14))
+NS_SWIFT_SENDABLE
+NS_SWIFT_NAME(SFSpeechLanguageModel.Configuration)
+@interface SFSpeechLanguageModelConfiguration : NSObject <NSCopying>
+
+@property (nonatomic, readonly, copy) NSURL *languageModel;
+@property (nonatomic, readonly, nullable, copy) NSURL *vocabulary;
+
+- (instancetype)initWithLanguageModel:(NSURL *)languageModel;
+- (instancetype)initWithLanguageModel:(NSURL *)languageModel vocabulary:(NSURL * __nullable)vocabulary;
+
+@end
+
+API_AVAILABLE(ios(17), macos(14))
+@interface SFSpeechLanguageModel : NSObject
+  
++ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration completion:(void(^)(NSError * __nullable error))completion;
+
++ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration ignoresCache:(BOOL)ignoresCache completion:(void(^)(NSError * __nullable error))completion;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h	2023-03-09 23:53:09
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h	2023-05-19 21:38:42
@@ -8,6 +8,7 @@
 #import <Foundation/Foundation.h>
 
 #import <Speech/SFSpeechRecognitionTaskHint.h>
+#import <Speech/SFSpeechLanguageModel.h>
 
 NS_ASSUME_NONNULL_BEGIN
 
@@ -35,6 +36,8 @@
 
 // If true, punctuations will be automatically included in the recognition results
 @property (nonatomic) BOOL addsPunctuation API_AVAILABLE(ios(16), macos(13));
+
+@property (nonatomic, copy, nullable) SFSpeechLanguageModelConfiguration *customizedLanguageModel API_AVAILABLE(ios(17), macos(14));
 
 @end
 
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionOptions_Private.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionOptions_Private.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionOptions_Private.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionOptions_Private.h	2023-05-19 21:38:42
@@ -0,0 +1,17 @@
+//
+//  SFTranscriptionOptions_Private.h
+//  Speech
+//
+//  Copyright © 2023 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+NS_SWIFT_SENDABLE
+typedef NS_OPTIONS(NSUInteger, _SFTranscriptionOptions) {
+    _SFTranscriptionOptionsNormalizedTranscription = 1UL << 0,
+    _SFTranscriptionOptionsContextualizedTranscription = 1UL << 1,
+    _SFTranscriptionOptionsPunctuation = 1UL << 2,
+    _SFTranscriptionOptionsEmoji = 1UL << 3,
+    _SFTranscriptionOptionsEtiquetteReplacements = 1UL << 4,
+} API_AVAILABLE(macos(14), ios(17), macCatalyst(17));
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionResultAttributeOptions_Private.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionResultAttributeOptions_Private.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionResultAttributeOptions_Private.h	1969-12-31 19:00:00
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionResultAttributeOptions_Private.h	2023-05-19 21:38:42
@@ -0,0 +1,14 @@
+//
+//  SFTranscriptionResultAttributeOptions_Private.h
+//  Speech
+//
+//  Copyright © 2023 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+NS_SWIFT_SENDABLE
+typedef NS_OPTIONS(NSUInteger, _SFTranscriptionResultAttributeOptions) {
+    _SFTranscriptionResultAttributeOptionsConfidence = 1UL << 0,
+    _SFTranscriptionResultAttributeOptionsCmTime = 1UL << 1,
+} API_AVAILABLE(macos(14), ios(17), macCatalyst(17));
diff -ruN /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h
--- /Applications/Xcode_14.3.1.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h	2023-03-04 19:41:10
+++ /Applications/Xcode_15.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h	2023-05-19 21:38:41
@@ -1,19 +1,28 @@
 //
 //  Speech.h
 //
-//  Copyright (c) 2016 Apple, Inc. All rights reserved.
+//  Copyright © 2016 Apple Inc. All rights reserved.
 //
 
 #import <Foundation/Foundation.h>
 
-#import <Speech/SFVoiceAnalytics.h>
+#import <Speech/SFErrors.h>
+#import <Speech/SFSpeechLanguageModel.h>
 #import <Speech/SFSpeechRecognitionMetadata.h>
-#import <Speech/SFSpeechRecognitionResult.h>
 #import <Speech/SFSpeechRecognitionRequest.h>
+#import <Speech/SFSpeechRecognitionResult.h>
 #import <Speech/SFSpeechRecognitionTask.h>
 #import <Speech/SFSpeechRecognitionTaskHint.h>
 #import <Speech/SFSpeechRecognizer.h>
-#import <Speech/SFTranscriptionSegment.h>
 #import <Speech/SFTranscription.h>
+#import <Speech/SFTranscriptionSegment.h>
+#import <Speech/SFVoiceAnalytics.h>
 
-
+// For interoperability within Apple Inc.
+#import <Speech/SFAnalysisContext_Private.h>
+#import <Speech/SFEARResultType_Private.h>
+#import <Speech/SFInputSequencer_Private.h>
+#import <Speech/SFSpeechAnalyzerOutputWrapper_Private.h>
+#import <Speech/SFSpeechAnalyzer_Private.h>
+#import <Speech/SFTranscriptionOptions_Private.h>
+#import <Speech/SFTranscriptionResultAttributeOptions_Private.h>
Clone this wiki locally