-
Notifications
You must be signed in to change notification settings - Fork 514
Speech iOS xcode16.0 b1
Rolf Bjarne Kvinge edited this page Aug 20, 2024
·
3 revisions
#Speech.framework https://github.com/xamarin/xamarin-macios/pull/21077
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/PhoneticEmbedderEnums.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/PhoneticEmbedderEnums.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/PhoneticEmbedderEnums.h 1970-01-01 01:00:00
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/PhoneticEmbedderEnums.h 2024-05-30 10:54:54
@@ -0,0 +1,21 @@
+//
+// PhoneticEmbedderEnums.h
+// Speech
+//
+// Created by Festus Ojo on 1/19/24.
+// Copyright © 2024 Apple Inc. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+
+__attribute__ ((deprecated ("Use PhoneticEmbedder.InputFormat instead.")))
+typedef NS_ENUM(NSInteger, PhoneticEncoderType) {
+ PhoneticEncoderTypeGrapheme = 0,
+ PhoneticEncoderTypePhoneme = 1,
+} API_AVAILABLE(macos(15.0), ios(18.0), watchos(11.0), tvos(18), visionos(2.0));
+
+__attribute__ ((deprecated ("Use PhoneticEmbedder.LoadingOption instead.")))
+typedef NS_ENUM(NSInteger, PhoneticEmbedderInitFlag) {
+ PhoneticEmbedderInitFlagAll = 0,
+ PhoneticEmbedderInitFlagEmbedder = 1,
+} API_AVAILABLE(macos(15.0), ios(18.0), watchos(11.0), tvos(18), visionos(2.0));
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h 2024-04-13 15:41:12
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFErrors.h 2024-05-30 10:54:54
@@ -11,12 +11,14 @@
NS_ASSUME_NONNULL_BEGIN
extern NSErrorDomain const SFSpeechErrorDomain
-API_AVAILABLE(macos(14), ios(17));
+API_AVAILABLE(macos(14), ios(17), tvos(18));
typedef NS_ERROR_ENUM (SFSpeechErrorDomain, SFSpeechErrorCode) {
/** Error may include `NSUnderlyingErrorKey` in `userInfo`.*/
SFSpeechErrorCodeInternalServiceError = 1,
-
+ /** Failed to read audio file **/
+ SFSpeechErrorCodeAudioReadFailed = 2,
+
// MARK: CustomLM data related errors
/** Templates were malformed **/
@@ -24,6 +26,6 @@
/** A custom language model file was malformed **/
SFSpeechErrorCodeMalformedSupplementalModel = 8,
-} API_AVAILABLE(macos(14), ios(17));
+} API_AVAILABLE(macos(14), ios(17), tvos(18));
NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechLanguageModel.h 2024-05-30 10:54:54
@@ -10,7 +10,7 @@
NS_ASSUME_NONNULL_BEGIN
-API_AVAILABLE(ios(17), macos(14))
+API_AVAILABLE(ios(17), macos(14), tvos(18))
NS_SWIFT_SENDABLE
NS_SWIFT_NAME(SFSpeechLanguageModel.Configuration)
@interface SFSpeechLanguageModelConfiguration : NSObject <NSCopying>
@@ -23,7 +23,7 @@
@end
-API_AVAILABLE(ios(17), macos(14))
+API_AVAILABLE(ios(17), macos(14), tvos(18))
@interface SFSpeechLanguageModel : NSObject
+ (void)prepareCustomLanguageModelForUrl:(NSURL *)asset clientIdentifier:(NSString *)clientIdentifier configuration:(SFSpeechLanguageModelConfiguration *)configuration completion:(void(^)(NSError * __nullable error))completion;
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionMetadata.h 2024-05-30 10:54:54
@@ -10,7 +10,7 @@
@class SFVoiceAnalytics;
-API_AVAILABLE(ios(14.5), macos(11.3))
+API_AVAILABLE(ios(14.5), macos(11.3), tvos(18))
@interface SFSpeechRecognitionMetadata : NSObject <NSCopying, NSSecureCoding>
// Measures the number of words spoken per minute
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionRequest.h 2024-05-30 10:54:54
@@ -13,7 +13,7 @@
NS_ASSUME_NONNULL_BEGIN
// A request for a speech recognition from an audio source
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFSpeechRecognitionRequest : NSObject
@property (nonatomic) SFSpeechRecognitionTaskHint taskHint;
@@ -32,17 +32,17 @@
// This will reduce accuracy but enables certain applications where it is
// inappropriate to transmit user speech to a remote service.
// Default is false
-@property (nonatomic) BOOL requiresOnDeviceRecognition API_AVAILABLE(ios(13), macos(10.15));
+@property (nonatomic) BOOL requiresOnDeviceRecognition API_AVAILABLE(ios(13), macos(10.15), tvos(18));
// If true, punctuations will be automatically included in the recognition results
-@property (nonatomic) BOOL addsPunctuation API_AVAILABLE(ios(16), macos(13));
+@property (nonatomic) BOOL addsPunctuation API_AVAILABLE(ios(16), macos(13), tvos(18));
-@property (nonatomic, copy, nullable) SFSpeechLanguageModelConfiguration *customizedLanguageModel API_AVAILABLE(ios(17), macos(14));
+@property (nonatomic, copy, nullable) SFSpeechLanguageModelConfiguration *customizedLanguageModel API_AVAILABLE(ios(17), macos(14), tvos(18));
@end
// A request to recognize speech from a recorded audio file
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFSpeechURLRecognitionRequest : SFSpeechRecognitionRequest
- (instancetype)init NS_UNAVAILABLE;
@@ -55,7 +55,7 @@
@end
// A request to recognize speech from arbitrary audio buffers
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFSpeechAudioBufferRecognitionRequest : SFSpeechRecognitionRequest
// Preferred audio format for optimal speech recognition
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionResult.h 2024-05-30 10:54:54
@@ -12,7 +12,7 @@
@class SFSpeechRecognitionMetadata;
// A recognized utterance, corresponding to a segment of recorded audio with speech and containing one or more transcriptions hypotheses
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFSpeechRecognitionResult : NSObject <NSCopying, NSSecureCoding>
@property (nonatomic, readonly, copy) SFTranscription *bestTranscription;
@@ -23,7 +23,7 @@
// True if the hypotheses will not change; speech processing is complete.
@property (nonatomic, readonly, getter=isFinal) BOOL final;
-@property (nonatomic, nullable, readonly) SFSpeechRecognitionMetadata *speechRecognitionMetadata API_AVAILABLE(ios(14.0), macos(11.0));
+@property (nonatomic, nullable, readonly) SFSpeechRecognitionMetadata *speechRecognitionMetadata API_AVAILABLE(ios(14.0), macos(11.0), tvos(18));
@end
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTask.h 2024-05-30 10:54:54
@@ -18,9 +18,9 @@
SFSpeechRecognitionTaskStateFinishing = 2, // No more audio is being recorded, but more recognition results may arrive
SFSpeechRecognitionTaskStateCanceling = 3, // No more recognition results will arrive, but recording may not have stopped yet
SFSpeechRecognitionTaskStateCompleted = 4, // No more results will arrive, and recording is stopped.
-} API_AVAILABLE(ios(10.0), macos(10.15));
+} API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFSpeechRecognitionTask : NSObject
@property (nonatomic, readonly) SFSpeechRecognitionTaskState state;
@@ -41,7 +41,7 @@
@end
// Recognition result receiver, to be used for complex or multi-utterance speech recognition requests
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@protocol SFSpeechRecognitionTaskDelegate <NSObject>
@optional
@@ -64,6 +64,9 @@
// Called when recognition of all requested utterances is finished.
// If successfully is false, the error property of the task will contain error information
- (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didFinishSuccessfully:(BOOL)successfully;
+
+// Returns amount of audio processed by the task
+- (void)speechRecognitionTask:(SFSpeechRecognitionTask *)task didProcessAudioDuration:(NSTimeInterval)duration API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
@end
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognitionTaskHint.h 2024-05-30 10:54:54
@@ -11,4 +11,4 @@
SFSpeechRecognitionTaskHintDictation = 1, // General dictation/keyboard-style
SFSpeechRecognitionTaskHintSearch = 2, // Search-style requests
SFSpeechRecognitionTaskHintConfirmation = 3, // Short, confirmation-style requests ("Yes", "No", "Maybe")
-} API_AVAILABLE(ios(10.0), macos(10.15));
+} API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFSpeechRecognizer.h 2024-05-30 10:54:54
@@ -24,9 +24,9 @@
SFSpeechRecognizerAuthorizationStatusDenied,
SFSpeechRecognizerAuthorizationStatusRestricted,
SFSpeechRecognizerAuthorizationStatusAuthorized,
-} API_AVAILABLE(ios(10.0), macos(10.15));
+} API_AVAILABLE(ios(10.0), macos(10.15), tvos(18));
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFSpeechRecognizer : NSObject
// Locales which support speech recognition.
@@ -47,7 +47,7 @@
@property (nonatomic, readonly, copy) NSLocale *locale;
// True if this recognition can handle requests with requiresOnDeviceRecognition set to true
-@property (nonatomic) BOOL supportsOnDeviceRecognition API_AVAILABLE(ios(13));
+@property (nonatomic) BOOL supportsOnDeviceRecognition API_AVAILABLE(ios(13), tvos(18));
@property (nonatomic, weak) id<SFSpeechRecognizerDelegate> delegate;
@@ -71,7 +71,7 @@
@end
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@protocol SFSpeechRecognizerDelegate <NSObject>
@optional
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscription.h 2024-05-30 10:54:54
@@ -10,7 +10,7 @@
@class SFTranscriptionSegment;
// A hypothesized text form of a speech recording
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFTranscription : NSObject <NSCopying, NSSecureCoding>
// Contains the entire recognition, formatted into a single user-displayable string
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFTranscriptionSegment.h 2024-05-30 10:54:54
@@ -10,7 +10,7 @@
@class SFVoiceAnalytics;
// Substrings of a hypothesized transcription
-API_AVAILABLE(ios(10.0), macos(10.15))
+API_AVAILABLE(ios(10.0), macos(10.15), tvos(18))
@interface SFTranscriptionSegment : NSObject <NSCopying, NSSecureCoding>
@property (nonatomic, readonly, copy) NSString *substring;
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h 2024-04-13 15:41:13
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/SFVoiceAnalytics.h 2024-05-30 10:54:54
@@ -7,7 +7,7 @@
NS_ASSUME_NONNULL_BEGIN
// An acoustic feature
-API_AVAILABLE(ios(13), macos(10.15))
+API_AVAILABLE(ios(13), macos(10.15), tvos(18))
@interface SFAcousticFeature : NSObject <NSCopying, NSSecureCoding>
// Array of feature values per audio frame, corresponding to a segment of recorded audio
@@ -19,7 +19,7 @@
@end
// Voice analytics corresponding to a segment of recorded audio
-API_AVAILABLE(ios(13), macos(10.15))
+API_AVAILABLE(ios(13), macos(10.15), tvos(18))
@interface SFVoiceAnalytics : NSObject <NSCopying, NSSecureCoding>
// Jitter measures vocal stability and is measured as an absolute difference between consecutive periods, divided by the average period. It is expressed as a percentage
diff -ruN /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h
--- /Applications/Xcode_15.4.0.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h 2024-04-13 15:41:12
+++ /Applications/Xcode_16.0.0-beta.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk/System/Library/Frameworks/Speech.framework/Headers/Speech.h 2024-05-30 10:54:54
@@ -17,3 +17,4 @@
#import <Speech/SFTranscription.h>
#import <Speech/SFTranscriptionSegment.h>
#import <Speech/SFVoiceAnalytics.h>
+#import <Speech/PhoneticEmbedderEnums.h>
- README
- xcode13.0 Binding Status
- xcode13.1 Binding Status
- xcode13.2 Binding Status
- xcode13.3 Binding Status
- xcode13.4 Binding Status
- xcode14.0 Binding Status
- xcode14.1 Binding Status
- xcode14.2 Binding Status
- xcode14.3 Binding Status
- xcode15.0 Binding Status
- xcode15.1 Binding Status
- xcode15.3 Binding Status
- xcode15.4 Binding Status
- xcode16.0 Binding Status
- xcode16.1 Binding Status
- xcode16.2 Binding Status