Skip to content

SensorKit macOS xcode15.1 b2

Alex Soto edited this page Oct 26, 2023 · 1 revision

#SensorKit.framework

diff -ruN /Applications/Xcode_15.0.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSensors.h /Applications/Xcode_15.1.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSensors.h
--- /Applications/Xcode_15.0.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSensors.h	2023-08-06 07:29:58
+++ /Applications/Xcode_15.1.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSensors.h	2023-10-13 17:02:15
@@ -189,7 +189,7 @@
  * @/textblock
  * This steam does not store any raw audio nor any audio or data from other parties.
  *
- * Fetches from this stream return objects of type SFSpeechRecognitionResult as defined in the Speech framework.
+ * Fetches from this stream return objects of type \c SRSpeechMetrics
  */
 SR_EXTERN SRSensor const SRSensorSiriSpeechMetrics API_AVAILABLE(ios(15.0)) API_UNAVAILABLE(watchos, macos, visionos) API_UNAVAILABLE(tvos);
 
@@ -208,7 +208,7 @@
  *
  * This steam does not store any raw audio nor any audio or data from other parties.
  *
- * Fetches from this stream return objects of type SFSpeechRecognitionResult as defined in the Speech framework.
+ * Fetches from this stream return objects of type \c SRSpeechMetrics
  */
 SR_EXTERN SRSensor const SRSensorTelephonySpeechMetrics API_AVAILABLE(ios(15.0)) API_UNAVAILABLE(watchos, macos, visionos) API_UNAVAILABLE(tvos);
 
diff -ruN /Applications/Xcode_15.0.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSpeechMetrics.h /Applications/Xcode_15.1.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSpeechMetrics.h
--- /Applications/Xcode_15.0.0.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSpeechMetrics.h	2023-08-06 07:29:58
+++ /Applications/Xcode_15.1.0-beta2.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/SensorKit.framework/Headers/SRSpeechMetrics.h	2023-10-13 17:02:15
@@ -137,6 +137,16 @@
  */
 @property (nonatomic, readonly, strong) NSDate *timestamp;
 
+/*!
+ * @property timeSinceAudioStart
+ * @brief The number of seconds since the start of the audio stream
+ *
+ * @discussion
+ * When an audio stream like a phone call starts, \c SRSpeechMetrics samples are collected
+ * periodically. This field can be used to determine where each sample falls in the audio stream
+ */
+@property (nonatomic, readonly, assign) NSTimeInterval timeSinceAudioStart API_AVAILABLE(ios(17.2)) API_UNAVAILABLE(watchos) API_UNAVAILABLE(tvos, macos);
+
 @property (nonatomic, nullable, readonly, strong) SRAudioLevel *audioLevel;
 @property (nonatomic, nullable, readonly, strong) SFSpeechRecognitionResult *speechRecognition;
 @property (nonatomic, nullable, readonly, strong) SNClassificationResult *soundClassification;
Clone this wiki locally