diff --git a/examples/audio_player_interaction/.metadata b/examples/audio_player_interaction/.metadata index 56bfc2c4..87b4b9ab 100644 --- a/examples/audio_player_interaction/.metadata +++ b/examples/audio_player_interaction/.metadata @@ -4,7 +4,27 @@ # This file should be version controlled and should not be manually edited. version: - revision: f4abaa0735eba4dfd8f33f73363911d63931fe03 - channel: stable + revision: "68bfaea224880b488c617afe30ab12091ea8fa4e" + channel: "stable" project_type: app + +# Tracks metadata for the flutter migrate command +migration: + platforms: + - platform: root + create_revision: 68bfaea224880b488c617afe30ab12091ea8fa4e + base_revision: 68bfaea224880b488c617afe30ab12091ea8fa4e + - platform: macos + create_revision: 68bfaea224880b488c617afe30ab12091ea8fa4e + base_revision: 68bfaea224880b488c617afe30ab12091ea8fa4e + + # User provided section + + # List of Local paths (relative to this file) that should be + # ignored by the migrate tool. + # + # Files that are not part of the templates will be ignored by default. + unmanaged_files: + - 'lib/main.dart' + - 'ios/Runner.xcodeproj/project.pbxproj' diff --git a/examples/audio_player_interaction/ios/Podfile b/examples/audio_player_interaction/ios/Podfile index 54e2d561..6f9ba5a5 100644 --- a/examples/audio_player_interaction/ios/Podfile +++ b/examples/audio_player_interaction/ios/Podfile @@ -1,5 +1,5 @@ # Uncomment this line to define a global platform for your project -# platform :ios, '11.0' +# platform :ios, '12.0' # CocoaPods analytics sends network stats synchronously affecting flutter build latency. ENV['COCOAPODS_DISABLE_STATS'] = 'true' diff --git a/examples/audio_player_interaction/ios/Podfile.lock b/examples/audio_player_interaction/ios/Podfile.lock index 0c3a5f89..0af54890 100644 --- a/examples/audio_player_interaction/ios/Podfile.lock +++ b/examples/audio_player_interaction/ios/Podfile.lock @@ -7,6 +7,7 @@ PODS: - FlutterMacOS - speech_to_text (0.0.1): - Flutter + - FlutterMacOS - Try - Try (2.1.1) @@ -14,7 +15,7 @@ DEPENDENCIES: - audioplayers_darwin (from `.symlinks/plugins/audioplayers_darwin/ios`) - Flutter (from `Flutter`) - path_provider_foundation (from `.symlinks/plugins/path_provider_foundation/darwin`) - - speech_to_text (from `.symlinks/plugins/speech_to_text/ios`) + - speech_to_text (from `.symlinks/plugins/speech_to_text/darwin`) SPEC REPOS: trunk: @@ -28,15 +29,15 @@ EXTERNAL SOURCES: path_provider_foundation: :path: ".symlinks/plugins/path_provider_foundation/darwin" speech_to_text: - :path: ".symlinks/plugins/speech_to_text/ios" + :path: ".symlinks/plugins/speech_to_text/darwin" SPEC CHECKSUMS: audioplayers_darwin: 877d9a4d06331c5c374595e46e16453ac7eafa40 - Flutter: f04841e97a9d0b0a8025694d0796dd46242b2854 + Flutter: e0871f40cf51350855a761d2e70bf5af5b9b5de7 path_provider_foundation: 3784922295ac71e43754bd15e0653ccfd36a147c - speech_to_text: b43a7d99aef037bd758ed8e45d79bbac035d2dfe + speech_to_text: 627d3fd2194770b51abb324ba45c2d39398f24a8 Try: 5ef669ae832617b3cee58cb2c6f99fb767a4ff96 -PODFILE CHECKSUM: c527ae5525e255413e90864467a7354714ff7b65 +PODFILE CHECKSUM: 7f3058696676819c0a0b2776032327aec03da512 -COCOAPODS: 1.12.1 +COCOAPODS: 1.15.2 diff --git a/examples/audio_player_interaction/ios/Runner.xcodeproj/project.pbxproj b/examples/audio_player_interaction/ios/Runner.xcodeproj/project.pbxproj index 8b83fac2..14ac55fc 100644 --- a/examples/audio_player_interaction/ios/Runner.xcodeproj/project.pbxproj +++ b/examples/audio_player_interaction/ios/Runner.xcodeproj/project.pbxproj @@ -155,7 +155,7 @@ 97C146E61CF9000F007C117D /* Project object */ = { isa = PBXProject; attributes = { - LastUpgradeCheck = 1430; + LastUpgradeCheck = 1510; ORGANIZATIONNAME = ""; TargetAttributes = { 97C146ED1CF9000F007C117D = { @@ -342,7 +342,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; @@ -420,7 +420,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = iphoneos; @@ -469,7 +469,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - IPHONEOS_DEPLOYMENT_TARGET = 11.0; + IPHONEOS_DEPLOYMENT_TARGET = 12.0; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = iphoneos; SUPPORTED_PLATFORMS = iphoneos; diff --git a/examples/audio_player_interaction/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme b/examples/audio_player_interaction/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme index b52b2e69..e67b2808 100644 --- a/examples/audio_player_interaction/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme +++ b/examples/audio_player_interaction/ios/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme @@ -1,6 +1,6 @@ :debug, + 'Profile' => :release, + 'Release' => :release, +} + +def flutter_root + generated_xcode_build_settings_path = File.expand_path(File.join('..', 'Flutter', 'ephemeral', 'Flutter-Generated.xcconfig'), __FILE__) + unless File.exist?(generated_xcode_build_settings_path) + raise "#{generated_xcode_build_settings_path} must exist. If you're running pod install manually, make sure \"flutter pub get\" is executed first" + end + + File.foreach(generated_xcode_build_settings_path) do |line| + matches = line.match(/FLUTTER_ROOT\=(.*)/) + return matches[1].strip if matches + end + raise "FLUTTER_ROOT not found in #{generated_xcode_build_settings_path}. Try deleting Flutter-Generated.xcconfig, then run \"flutter pub get\"" +end + +require File.expand_path(File.join('packages', 'flutter_tools', 'bin', 'podhelper'), flutter_root) + +flutter_macos_podfile_setup + +target 'Runner' do + use_frameworks! + use_modular_headers! + + flutter_install_all_macos_pods File.dirname(File.realpath(__FILE__)) + target 'RunnerTests' do + inherit! :search_paths + end +end + +post_install do |installer| + installer.pods_project.targets.each do |target| + flutter_additional_macos_build_settings(target) + end +end diff --git a/examples/audio_player_interaction/macos/Podfile.lock b/examples/audio_player_interaction/macos/Podfile.lock new file mode 100644 index 00000000..1c9f719e --- /dev/null +++ b/examples/audio_player_interaction/macos/Podfile.lock @@ -0,0 +1,43 @@ +PODS: + - audioplayers_darwin (0.0.1): + - FlutterMacOS + - FlutterMacOS (1.0.0) + - path_provider_foundation (0.0.1): + - Flutter + - FlutterMacOS + - speech_to_text (0.0.1): + - Flutter + - FlutterMacOS + - Try + - Try (2.1.1) + +DEPENDENCIES: + - audioplayers_darwin (from `Flutter/ephemeral/.symlinks/plugins/audioplayers_darwin/macos`) + - FlutterMacOS (from `Flutter/ephemeral`) + - path_provider_foundation (from `Flutter/ephemeral/.symlinks/plugins/path_provider_foundation/darwin`) + - speech_to_text (from `Flutter/ephemeral/.symlinks/plugins/speech_to_text/darwin`) + +SPEC REPOS: + trunk: + - Try + +EXTERNAL SOURCES: + audioplayers_darwin: + :path: Flutter/ephemeral/.symlinks/plugins/audioplayers_darwin/macos + FlutterMacOS: + :path: Flutter/ephemeral + path_provider_foundation: + :path: Flutter/ephemeral/.symlinks/plugins/path_provider_foundation/darwin + speech_to_text: + :path: Flutter/ephemeral/.symlinks/plugins/speech_to_text/darwin + +SPEC CHECKSUMS: + audioplayers_darwin: dcad41de4fbd0099cb3749f7ab3b0cb8f70b810c + FlutterMacOS: 8f6f14fa908a6fb3fba0cd85dbd81ec4b251fb24 + path_provider_foundation: 3784922295ac71e43754bd15e0653ccfd36a147c + speech_to_text: 627d3fd2194770b51abb324ba45c2d39398f24a8 + Try: 5ef669ae832617b3cee58cb2c6f99fb767a4ff96 + +PODFILE CHECKSUM: 9ebaf0ce3d369aaa26a9ea0e159195ed94724cf3 + +COCOAPODS: 1.15.2 diff --git a/examples/audio_player_interaction/macos/Runner.xcodeproj/project.pbxproj b/examples/audio_player_interaction/macos/Runner.xcodeproj/project.pbxproj new file mode 100644 index 00000000..0a9f4bec --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner.xcodeproj/project.pbxproj @@ -0,0 +1,803 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 54; + objects = { + +/* Begin PBXAggregateTarget section */ + 33CC111A2044C6BA0003C045 /* Flutter Assemble */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 33CC111B2044C6BA0003C045 /* Build configuration list for PBXAggregateTarget "Flutter Assemble" */; + buildPhases = ( + 33CC111E2044C6BF0003C045 /* ShellScript */, + ); + dependencies = ( + ); + name = "Flutter Assemble"; + productName = FLX; + }; +/* End PBXAggregateTarget section */ + +/* Begin PBXBuildFile section */ + 2CEEC6BEE4BE6F65A50427AB /* Pods_RunnerTests.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 614BF7986401FFA61E16D6C8 /* Pods_RunnerTests.framework */; }; + 331C80D8294CF71000263BE5 /* RunnerTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 331C80D7294CF71000263BE5 /* RunnerTests.swift */; }; + 335BBD1B22A9A15E00E9071D /* GeneratedPluginRegistrant.swift in Sources */ = {isa = PBXBuildFile; fileRef = 335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */; }; + 33CC10F12044A3C60003C045 /* AppDelegate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 33CC10F02044A3C60003C045 /* AppDelegate.swift */; }; + 33CC10F32044A3C60003C045 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 33CC10F22044A3C60003C045 /* Assets.xcassets */; }; + 33CC10F62044A3C60003C045 /* MainMenu.xib in Resources */ = {isa = PBXBuildFile; fileRef = 33CC10F42044A3C60003C045 /* MainMenu.xib */; }; + 33CC11132044BFA00003C045 /* MainFlutterWindow.swift in Sources */ = {isa = PBXBuildFile; fileRef = 33CC11122044BFA00003C045 /* MainFlutterWindow.swift */; }; + B1A9CC94BA946458355C65E6 /* Pods_Runner.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = BCC0C40FAC833E0143A83B88 /* Pods_Runner.framework */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 331C80D9294CF71000263BE5 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 33CC10E52044A3C60003C045 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 33CC10EC2044A3C60003C045; + remoteInfo = Runner; + }; + 33CC111F2044C79F0003C045 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 33CC10E52044A3C60003C045 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 33CC111A2044C6BA0003C045; + remoteInfo = FLX; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXCopyFilesBuildPhase section */ + 33CC110E2044A8840003C045 /* Bundle Framework */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = ""; + dstSubfolderSpec = 10; + files = ( + ); + name = "Bundle Framework"; + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 1D820DFBD5F4C991A82CDA4E /* Pods-Runner.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Runner.release.xcconfig"; path = "Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig"; sourceTree = ""; }; + 331C80D5294CF71000263BE5 /* RunnerTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = RunnerTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 331C80D7294CF71000263BE5 /* RunnerTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RunnerTests.swift; sourceTree = ""; }; + 333000ED22D3DE5D00554162 /* Warnings.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Warnings.xcconfig; sourceTree = ""; }; + 335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GeneratedPluginRegistrant.swift; sourceTree = ""; }; + 33CC10ED2044A3C60003C045 /* audio_player_interaction.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = audio_player_interaction.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 33CC10F02044A3C60003C045 /* AppDelegate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AppDelegate.swift; sourceTree = ""; }; + 33CC10F22044A3C60003C045 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; name = Assets.xcassets; path = Runner/Assets.xcassets; sourceTree = ""; }; + 33CC10F52044A3C60003C045 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = Base; path = Base.lproj/MainMenu.xib; sourceTree = ""; }; + 33CC10F72044A3C60003C045 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; name = Info.plist; path = Runner/Info.plist; sourceTree = ""; }; + 33CC11122044BFA00003C045 /* MainFlutterWindow.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MainFlutterWindow.swift; sourceTree = ""; }; + 33CEB47222A05771004F2AC0 /* Flutter-Debug.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "Flutter-Debug.xcconfig"; sourceTree = ""; }; + 33CEB47422A05771004F2AC0 /* Flutter-Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "Flutter-Release.xcconfig"; sourceTree = ""; }; + 33CEB47722A0578A004F2AC0 /* Flutter-Generated.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; name = "Flutter-Generated.xcconfig"; path = "ephemeral/Flutter-Generated.xcconfig"; sourceTree = ""; }; + 33E51913231747F40026EE4D /* DebugProfile.entitlements */ = {isa = PBXFileReference; lastKnownFileType = text.plist.entitlements; path = DebugProfile.entitlements; sourceTree = ""; }; + 33E51914231749380026EE4D /* Release.entitlements */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.entitlements; path = Release.entitlements; sourceTree = ""; }; + 33E5194F232828860026EE4D /* AppInfo.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = AppInfo.xcconfig; sourceTree = ""; }; + 524A6E47A5FD65FC97C4B86C /* Pods-RunnerTests.profile.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-RunnerTests.profile.xcconfig"; path = "Target Support Files/Pods-RunnerTests/Pods-RunnerTests.profile.xcconfig"; sourceTree = ""; }; + 600836A9D977B61D9FC24614 /* Pods-RunnerTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-RunnerTests.debug.xcconfig"; path = "Target Support Files/Pods-RunnerTests/Pods-RunnerTests.debug.xcconfig"; sourceTree = ""; }; + 614BF7986401FFA61E16D6C8 /* Pods_RunnerTests.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_RunnerTests.framework; sourceTree = BUILT_PRODUCTS_DIR; }; + 7AFA3C8E1D35360C0083082E /* Release.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = Release.xcconfig; sourceTree = ""; }; + 8E8162639F1DC8BDFD050C0A /* Pods-RunnerTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-RunnerTests.release.xcconfig"; path = "Target Support Files/Pods-RunnerTests/Pods-RunnerTests.release.xcconfig"; sourceTree = ""; }; + 9740EEB21CF90195004384FC /* Debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Debug.xcconfig; sourceTree = ""; }; + A2FD30BA1930BE1F8BB57651 /* Pods-Runner.profile.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Runner.profile.xcconfig"; path = "Target Support Files/Pods-Runner/Pods-Runner.profile.xcconfig"; sourceTree = ""; }; + BCC0C40FAC833E0143A83B88 /* Pods_Runner.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Pods_Runner.framework; sourceTree = BUILT_PRODUCTS_DIR; }; + CF6DC46E13D58920FA975903 /* Pods-Runner.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-Runner.debug.xcconfig"; path = "Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig"; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 331C80D2294CF70F00263BE5 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 2CEEC6BEE4BE6F65A50427AB /* Pods_RunnerTests.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 33CC10EA2044A3C60003C045 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + B1A9CC94BA946458355C65E6 /* Pods_Runner.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 331C80D6294CF71000263BE5 /* RunnerTests */ = { + isa = PBXGroup; + children = ( + 331C80D7294CF71000263BE5 /* RunnerTests.swift */, + ); + path = RunnerTests; + sourceTree = ""; + }; + 33BA886A226E78AF003329D5 /* Configs */ = { + isa = PBXGroup; + children = ( + 33E5194F232828860026EE4D /* AppInfo.xcconfig */, + 9740EEB21CF90195004384FC /* Debug.xcconfig */, + 7AFA3C8E1D35360C0083082E /* Release.xcconfig */, + 333000ED22D3DE5D00554162 /* Warnings.xcconfig */, + ); + path = Configs; + sourceTree = ""; + }; + 33CC10E42044A3C60003C045 = { + isa = PBXGroup; + children = ( + 33FAB671232836740065AC1E /* Runner */, + 33CEB47122A05771004F2AC0 /* Flutter */, + 331C80D6294CF71000263BE5 /* RunnerTests */, + 33CC10EE2044A3C60003C045 /* Products */, + D73912EC22F37F3D000D13A0 /* Frameworks */, + C44C8BF173719EFDD80C1463 /* Pods */, + ); + sourceTree = ""; + }; + 33CC10EE2044A3C60003C045 /* Products */ = { + isa = PBXGroup; + children = ( + 33CC10ED2044A3C60003C045 /* audio_player_interaction.app */, + 331C80D5294CF71000263BE5 /* RunnerTests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + 33CC11242044D66E0003C045 /* Resources */ = { + isa = PBXGroup; + children = ( + 33CC10F22044A3C60003C045 /* Assets.xcassets */, + 33CC10F42044A3C60003C045 /* MainMenu.xib */, + 33CC10F72044A3C60003C045 /* Info.plist */, + ); + name = Resources; + path = ..; + sourceTree = ""; + }; + 33CEB47122A05771004F2AC0 /* Flutter */ = { + isa = PBXGroup; + children = ( + 335BBD1A22A9A15E00E9071D /* GeneratedPluginRegistrant.swift */, + 33CEB47222A05771004F2AC0 /* Flutter-Debug.xcconfig */, + 33CEB47422A05771004F2AC0 /* Flutter-Release.xcconfig */, + 33CEB47722A0578A004F2AC0 /* Flutter-Generated.xcconfig */, + ); + path = Flutter; + sourceTree = ""; + }; + 33FAB671232836740065AC1E /* Runner */ = { + isa = PBXGroup; + children = ( + 33CC10F02044A3C60003C045 /* AppDelegate.swift */, + 33CC11122044BFA00003C045 /* MainFlutterWindow.swift */, + 33E51913231747F40026EE4D /* DebugProfile.entitlements */, + 33E51914231749380026EE4D /* Release.entitlements */, + 33CC11242044D66E0003C045 /* Resources */, + 33BA886A226E78AF003329D5 /* Configs */, + ); + path = Runner; + sourceTree = ""; + }; + C44C8BF173719EFDD80C1463 /* Pods */ = { + isa = PBXGroup; + children = ( + CF6DC46E13D58920FA975903 /* Pods-Runner.debug.xcconfig */, + 1D820DFBD5F4C991A82CDA4E /* Pods-Runner.release.xcconfig */, + A2FD30BA1930BE1F8BB57651 /* Pods-Runner.profile.xcconfig */, + 600836A9D977B61D9FC24614 /* Pods-RunnerTests.debug.xcconfig */, + 8E8162639F1DC8BDFD050C0A /* Pods-RunnerTests.release.xcconfig */, + 524A6E47A5FD65FC97C4B86C /* Pods-RunnerTests.profile.xcconfig */, + ); + path = Pods; + sourceTree = ""; + }; + D73912EC22F37F3D000D13A0 /* Frameworks */ = { + isa = PBXGroup; + children = ( + BCC0C40FAC833E0143A83B88 /* Pods_Runner.framework */, + 614BF7986401FFA61E16D6C8 /* Pods_RunnerTests.framework */, + ); + name = Frameworks; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 331C80D4294CF70F00263BE5 /* RunnerTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = 331C80DE294CF71000263BE5 /* Build configuration list for PBXNativeTarget "RunnerTests" */; + buildPhases = ( + 606087F40819902F660219E1 /* [CP] Check Pods Manifest.lock */, + 331C80D1294CF70F00263BE5 /* Sources */, + 331C80D2294CF70F00263BE5 /* Frameworks */, + 331C80D3294CF70F00263BE5 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + 331C80DA294CF71000263BE5 /* PBXTargetDependency */, + ); + name = RunnerTests; + productName = RunnerTests; + productReference = 331C80D5294CF71000263BE5 /* RunnerTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; + 33CC10EC2044A3C60003C045 /* Runner */ = { + isa = PBXNativeTarget; + buildConfigurationList = 33CC10FB2044A3C60003C045 /* Build configuration list for PBXNativeTarget "Runner" */; + buildPhases = ( + 6626714A5E90C2AA2176B076 /* [CP] Check Pods Manifest.lock */, + 33CC10E92044A3C60003C045 /* Sources */, + 33CC10EA2044A3C60003C045 /* Frameworks */, + 33CC10EB2044A3C60003C045 /* Resources */, + 33CC110E2044A8840003C045 /* Bundle Framework */, + 3399D490228B24CF009A79C7 /* ShellScript */, + A10F8989352E99DAE63B1E57 /* [CP] Embed Pods Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + 33CC11202044C79F0003C045 /* PBXTargetDependency */, + ); + name = Runner; + productName = Runner; + productReference = 33CC10ED2044A3C60003C045 /* audio_player_interaction.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 33CC10E52044A3C60003C045 /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = YES; + LastSwiftUpdateCheck = 0920; + LastUpgradeCheck = 1510; + ORGANIZATIONNAME = ""; + TargetAttributes = { + 331C80D4294CF70F00263BE5 = { + CreatedOnToolsVersion = 14.0; + TestTargetID = 33CC10EC2044A3C60003C045; + }; + 33CC10EC2044A3C60003C045 = { + CreatedOnToolsVersion = 9.2; + LastSwiftMigration = 1100; + ProvisioningStyle = Automatic; + SystemCapabilities = { + com.apple.Sandbox = { + enabled = 1; + }; + }; + }; + 33CC111A2044C6BA0003C045 = { + CreatedOnToolsVersion = 9.2; + ProvisioningStyle = Manual; + }; + }; + }; + buildConfigurationList = 33CC10E82044A3C60003C045 /* Build configuration list for PBXProject "Runner" */; + compatibilityVersion = "Xcode 9.3"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 33CC10E42044A3C60003C045; + productRefGroup = 33CC10EE2044A3C60003C045 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 33CC10EC2044A3C60003C045 /* Runner */, + 331C80D4294CF70F00263BE5 /* RunnerTests */, + 33CC111A2044C6BA0003C045 /* Flutter Assemble */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 331C80D3294CF70F00263BE5 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 33CC10EB2044A3C60003C045 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 33CC10F32044A3C60003C045 /* Assets.xcassets in Resources */, + 33CC10F62044A3C60003C045 /* MainMenu.xib in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 3399D490228B24CF009A79C7 /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + alwaysOutOfDate = 1; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + ); + outputFileListPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "echo \"$PRODUCT_NAME.app\" > \"$PROJECT_DIR\"/Flutter/ephemeral/.app_filename && \"$FLUTTER_ROOT\"/packages/flutter_tools/bin/macos_assemble.sh embed\n"; + }; + 33CC111E2044C6BF0003C045 /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + Flutter/ephemeral/FlutterInputs.xcfilelist, + ); + inputPaths = ( + Flutter/ephemeral/tripwire, + ); + outputFileListPaths = ( + Flutter/ephemeral/FlutterOutputs.xcfilelist, + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"$FLUTTER_ROOT\"/packages/flutter_tools/bin/macos_assemble.sh && touch Flutter/ephemeral/tripwire"; + }; + 606087F40819902F660219E1 /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + "${PODS_PODFILE_DIR_PATH}/Podfile.lock", + "${PODS_ROOT}/Manifest.lock", + ); + name = "[CP] Check Pods Manifest.lock"; + outputFileListPaths = ( + ); + outputPaths = ( + "$(DERIVED_FILE_DIR)/Pods-RunnerTests-checkManifestLockResult.txt", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; + 6626714A5E90C2AA2176B076 /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + "${PODS_PODFILE_DIR_PATH}/Podfile.lock", + "${PODS_ROOT}/Manifest.lock", + ); + name = "[CP] Check Pods Manifest.lock"; + outputFileListPaths = ( + ); + outputPaths = ( + "$(DERIVED_FILE_DIR)/Pods-Runner-checkManifestLockResult.txt", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; + A10F8989352E99DAE63B1E57 /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-input-files.xcfilelist", + ); + name = "[CP] Embed Pods Frameworks"; + outputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks-${CONFIGURATION}-output-files.xcfilelist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-Runner/Pods-Runner-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 331C80D1294CF70F00263BE5 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 331C80D8294CF71000263BE5 /* RunnerTests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 33CC10E92044A3C60003C045 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 33CC11132044BFA00003C045 /* MainFlutterWindow.swift in Sources */, + 33CC10F12044A3C60003C045 /* AppDelegate.swift in Sources */, + 335BBD1B22A9A15E00E9071D /* GeneratedPluginRegistrant.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 331C80DA294CF71000263BE5 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 33CC10EC2044A3C60003C045 /* Runner */; + targetProxy = 331C80D9294CF71000263BE5 /* PBXContainerItemProxy */; + }; + 33CC11202044C79F0003C045 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 33CC111A2044C6BA0003C045 /* Flutter Assemble */; + targetProxy = 33CC111F2044C79F0003C045 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin PBXVariantGroup section */ + 33CC10F42044A3C60003C045 /* MainMenu.xib */ = { + isa = PBXVariantGroup; + children = ( + 33CC10F52044A3C60003C045 /* Base */, + ); + name = MainMenu.xib; + path = Runner; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 331C80DB294CF71000263BE5 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 600836A9D977B61D9FC24614 /* Pods-RunnerTests.debug.xcconfig */; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + CURRENT_PROJECT_VERSION = 1; + GENERATE_INFOPLIST_FILE = YES; + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.csdcorp.app.audioPlayerInteraction.RunnerTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_VERSION = 5.0; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/audio_player_interaction.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/audio_player_interaction"; + }; + name = Debug; + }; + 331C80DC294CF71000263BE5 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 8E8162639F1DC8BDFD050C0A /* Pods-RunnerTests.release.xcconfig */; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + CURRENT_PROJECT_VERSION = 1; + GENERATE_INFOPLIST_FILE = YES; + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.csdcorp.app.audioPlayerInteraction.RunnerTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_VERSION = 5.0; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/audio_player_interaction.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/audio_player_interaction"; + }; + name = Release; + }; + 331C80DD294CF71000263BE5 /* Profile */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 524A6E47A5FD65FC97C4B86C /* Pods-RunnerTests.profile.xcconfig */; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + CURRENT_PROJECT_VERSION = 1; + GENERATE_INFOPLIST_FILE = YES; + MARKETING_VERSION = 1.0; + PRODUCT_BUNDLE_IDENTIFIER = com.csdcorp.app.audioPlayerInteraction.RunnerTests; + PRODUCT_NAME = "$(TARGET_NAME)"; + SWIFT_VERSION = 5.0; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/audio_player_interaction.app/$(BUNDLE_EXECUTABLE_FOLDER_PATH)/audio_player_interaction"; + }; + name = Profile; + }; + 338D0CE9231458BD00FA5F75 /* Profile */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CODE_SIGN_IDENTITY = "-"; + COPY_PHASE_STRIP = NO; + DEAD_CODE_STRIPPING = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 10.14; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = macosx; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_OPTIMIZATION_LEVEL = "-O"; + }; + name = Profile; + }; + 338D0CEA231458BD00FA5F75 /* Profile */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CLANG_ENABLE_MODULES = YES; + CODE_SIGN_ENTITLEMENTS = Runner/DebugProfile.entitlements; + CODE_SIGN_STYLE = Automatic; + COMBINE_HIDPI_IMAGES = YES; + INFOPLIST_FILE = Runner/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/../Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 10.15; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_VERSION = 5.0; + }; + name = Profile; + }; + 338D0CEB231458BD00FA5F75 /* Profile */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Manual; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Profile; + }; + 33CC10F92044A3C60003C045 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 9740EEB21CF90195004384FC /* Debug.xcconfig */; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CODE_SIGN_IDENTITY = "-"; + COPY_PHASE_STRIP = NO; + DEAD_CODE_STRIPPING = YES; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 10.14; + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + SDKROOT = macosx; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + }; + name = Debug; + }; + 33CC10FA2044A3C60003C045 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 7AFA3C8E1D35360C0083082E /* Release.xcconfig */; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ASSETCATALOG_COMPILER_GENERATE_SWIFT_ASSET_SYMBOL_EXTENSIONS = YES; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CODE_SIGN_IDENTITY = "-"; + COPY_PHASE_STRIP = NO; + DEAD_CODE_STRIPPING = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_USER_SCRIPT_SANDBOXING = NO; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 10.14; + MTL_ENABLE_DEBUG_INFO = NO; + SDKROOT = macosx; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_OPTIMIZATION_LEVEL = "-O"; + }; + name = Release; + }; + 33CC10FC2044A3C60003C045 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CLANG_ENABLE_MODULES = YES; + CODE_SIGN_ENTITLEMENTS = Runner/DebugProfile.entitlements; + CODE_SIGN_STYLE = Automatic; + COMBINE_HIDPI_IMAGES = YES; + INFOPLIST_FILE = Runner/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/../Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 10.15; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_VERSION = 5.0; + }; + name = Debug; + }; + 33CC10FD2044A3C60003C045 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 33E5194F232828860026EE4D /* AppInfo.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CLANG_ENABLE_MODULES = YES; + CODE_SIGN_ENTITLEMENTS = Runner/Release.entitlements; + CODE_SIGN_STYLE = Automatic; + COMBINE_HIDPI_IMAGES = YES; + INFOPLIST_FILE = Runner/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/../Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 10.15; + PROVISIONING_PROFILE_SPECIFIER = ""; + SWIFT_VERSION = 5.0; + }; + name = Release; + }; + 33CC111C2044C6BA0003C045 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Manual; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 33CC111D2044C6BA0003C045 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 331C80DE294CF71000263BE5 /* Build configuration list for PBXNativeTarget "RunnerTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 331C80DB294CF71000263BE5 /* Debug */, + 331C80DC294CF71000263BE5 /* Release */, + 331C80DD294CF71000263BE5 /* Profile */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 33CC10E82044A3C60003C045 /* Build configuration list for PBXProject "Runner" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 33CC10F92044A3C60003C045 /* Debug */, + 33CC10FA2044A3C60003C045 /* Release */, + 338D0CE9231458BD00FA5F75 /* Profile */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 33CC10FB2044A3C60003C045 /* Build configuration list for PBXNativeTarget "Runner" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 33CC10FC2044A3C60003C045 /* Debug */, + 33CC10FD2044A3C60003C045 /* Release */, + 338D0CEA231458BD00FA5F75 /* Profile */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 33CC111B2044C6BA0003C045 /* Build configuration list for PBXAggregateTarget "Flutter Assemble" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 33CC111C2044C6BA0003C045 /* Debug */, + 33CC111D2044C6BA0003C045 /* Release */, + 338D0CEB231458BD00FA5F75 /* Profile */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 33CC10E52044A3C60003C045 /* Project object */; +} diff --git a/examples/audio_player_interaction/macos/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/examples/audio_player_interaction/macos/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist new file mode 100644 index 00000000..18d98100 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist @@ -0,0 +1,8 @@ + + + + + IDEDidComputeMac32BitWarning + + + diff --git a/examples/audio_player_interaction/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme b/examples/audio_player_interaction/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme new file mode 100644 index 00000000..dce5d08a --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme @@ -0,0 +1,98 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/audio_player_interaction/macos/Runner.xcworkspace/contents.xcworkspacedata b/examples/audio_player_interaction/macos/Runner.xcworkspace/contents.xcworkspacedata new file mode 100644 index 00000000..21a3cc14 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,10 @@ + + + + + + + diff --git a/examples/audio_player_interaction/macos/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/examples/audio_player_interaction/macos/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist new file mode 100644 index 00000000..18d98100 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist @@ -0,0 +1,8 @@ + + + + + IDEDidComputeMac32BitWarning + + + diff --git a/examples/audio_player_interaction/macos/Runner/AppDelegate.swift b/examples/audio_player_interaction/macos/Runner/AppDelegate.swift new file mode 100644 index 00000000..d53ef643 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/AppDelegate.swift @@ -0,0 +1,9 @@ +import Cocoa +import FlutterMacOS + +@NSApplicationMain +class AppDelegate: FlutterAppDelegate { + override func applicationShouldTerminateAfterLastWindowClosed(_ sender: NSApplication) -> Bool { + return true + } +} diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 00000000..a2ec33f1 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,68 @@ +{ + "images" : [ + { + "size" : "16x16", + "idiom" : "mac", + "filename" : "app_icon_16.png", + "scale" : "1x" + }, + { + "size" : "16x16", + "idiom" : "mac", + "filename" : "app_icon_32.png", + "scale" : "2x" + }, + { + "size" : "32x32", + "idiom" : "mac", + "filename" : "app_icon_32.png", + "scale" : "1x" + }, + { + "size" : "32x32", + "idiom" : "mac", + "filename" : "app_icon_64.png", + "scale" : "2x" + }, + { + "size" : "128x128", + "idiom" : "mac", + "filename" : "app_icon_128.png", + "scale" : "1x" + }, + { + "size" : "128x128", + "idiom" : "mac", + "filename" : "app_icon_256.png", + "scale" : "2x" + }, + { + "size" : "256x256", + "idiom" : "mac", + "filename" : "app_icon_256.png", + "scale" : "1x" + }, + { + "size" : "256x256", + "idiom" : "mac", + "filename" : "app_icon_512.png", + "scale" : "2x" + }, + { + "size" : "512x512", + "idiom" : "mac", + "filename" : "app_icon_512.png", + "scale" : "1x" + }, + { + "size" : "512x512", + "idiom" : "mac", + "filename" : "app_icon_1024.png", + "scale" : "2x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_1024.png b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_1024.png new file mode 100644 index 00000000..82b6f9d9 Binary files /dev/null and b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_1024.png differ diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_128.png b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_128.png new file mode 100644 index 00000000..13b35eba Binary files /dev/null and b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_128.png differ diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_16.png b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_16.png new file mode 100644 index 00000000..0a3f5fa4 Binary files /dev/null and b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_16.png differ diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_256.png b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_256.png new file mode 100644 index 00000000..bdb57226 Binary files /dev/null and b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_256.png differ diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_32.png b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_32.png new file mode 100644 index 00000000..f083318e Binary files /dev/null and b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_32.png differ diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_512.png b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_512.png new file mode 100644 index 00000000..326c0e72 Binary files /dev/null and b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_512.png differ diff --git a/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_64.png b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_64.png new file mode 100644 index 00000000..2f1632cf Binary files /dev/null and b/examples/audio_player_interaction/macos/Runner/Assets.xcassets/AppIcon.appiconset/app_icon_64.png differ diff --git a/examples/audio_player_interaction/macos/Runner/Base.lproj/MainMenu.xib b/examples/audio_player_interaction/macos/Runner/Base.lproj/MainMenu.xib new file mode 100644 index 00000000..80e867a4 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Base.lproj/MainMenu.xib @@ -0,0 +1,343 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/audio_player_interaction/macos/Runner/Configs/AppInfo.xcconfig b/examples/audio_player_interaction/macos/Runner/Configs/AppInfo.xcconfig new file mode 100644 index 00000000..7b6c05a9 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Configs/AppInfo.xcconfig @@ -0,0 +1,14 @@ +// Application-level settings for the Runner target. +// +// This may be replaced with something auto-generated from metadata (e.g., pubspec.yaml) in the +// future. If not, the values below would default to using the project name when this becomes a +// 'flutter create' template. + +// The application's name. By default this is also the title of the Flutter window. +PRODUCT_NAME = audio_player_interaction + +// The application's bundle identifier +PRODUCT_BUNDLE_IDENTIFIER = com.csdcorp.app.audioPlayerInteraction + +// The copyright displayed in application information +PRODUCT_COPYRIGHT = Copyright © 2024 com.csdcorp.app. All rights reserved. diff --git a/examples/audio_player_interaction/macos/Runner/Configs/Debug.xcconfig b/examples/audio_player_interaction/macos/Runner/Configs/Debug.xcconfig new file mode 100644 index 00000000..36b0fd94 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Configs/Debug.xcconfig @@ -0,0 +1,2 @@ +#include "../../Flutter/Flutter-Debug.xcconfig" +#include "Warnings.xcconfig" diff --git a/examples/audio_player_interaction/macos/Runner/Configs/Release.xcconfig b/examples/audio_player_interaction/macos/Runner/Configs/Release.xcconfig new file mode 100644 index 00000000..dff4f495 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Configs/Release.xcconfig @@ -0,0 +1,2 @@ +#include "../../Flutter/Flutter-Release.xcconfig" +#include "Warnings.xcconfig" diff --git a/examples/audio_player_interaction/macos/Runner/Configs/Warnings.xcconfig b/examples/audio_player_interaction/macos/Runner/Configs/Warnings.xcconfig new file mode 100644 index 00000000..42bcbf47 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Configs/Warnings.xcconfig @@ -0,0 +1,13 @@ +WARNING_CFLAGS = -Wall -Wconditional-uninitialized -Wnullable-to-nonnull-conversion -Wmissing-method-return-type -Woverlength-strings +GCC_WARN_UNDECLARED_SELECTOR = YES +CLANG_UNDEFINED_BEHAVIOR_SANITIZER_NULLABILITY = YES +CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE +CLANG_WARN__DUPLICATE_METHOD_MATCH = YES +CLANG_WARN_PRAGMA_PACK = YES +CLANG_WARN_STRICT_PROTOTYPES = YES +CLANG_WARN_COMMA = YES +GCC_WARN_STRICT_SELECTOR_MATCH = YES +CLANG_WARN_OBJC_REPEATED_USE_OF_WEAK = YES +CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES +GCC_WARN_SHADOW = YES +CLANG_WARN_UNREACHABLE_CODE = YES diff --git a/examples/audio_player_interaction/macos/Runner/DebugProfile.entitlements b/examples/audio_player_interaction/macos/Runner/DebugProfile.entitlements new file mode 100644 index 00000000..aa7828c7 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/DebugProfile.entitlements @@ -0,0 +1,14 @@ + + + + + com.apple.security.app-sandbox + + com.apple.security.cs.allow-jit + + com.apple.security.device.audio-input + + com.apple.security.network.server + + + diff --git a/examples/audio_player_interaction/macos/Runner/Info.plist b/examples/audio_player_interaction/macos/Runner/Info.plist new file mode 100644 index 00000000..f673fb89 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Info.plist @@ -0,0 +1,36 @@ + + + + + CFBundleDevelopmentRegion + $(DEVELOPMENT_LANGUAGE) + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIconFile + + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + APPL + CFBundleShortVersionString + $(FLUTTER_BUILD_NAME) + CFBundleVersion + $(FLUTTER_BUILD_NUMBER) + LSMinimumSystemVersion + $(MACOSX_DEPLOYMENT_TARGET) + NSHumanReadableCopyright + $(PRODUCT_COPYRIGHT) + NSMainNibFile + MainMenu + NSPrincipalClass + NSApplication + NSMicrophoneUsageDescription + The app uses the microphone to allow you to use your voice to respond to notifications and prompts without having to touch the screen. + NSSpeechRecognitionUsageDescription + The app uses speech recognition to listen for and understand your voice responses to notifications and prompts. + + diff --git a/examples/audio_player_interaction/macos/Runner/MainFlutterWindow.swift b/examples/audio_player_interaction/macos/Runner/MainFlutterWindow.swift new file mode 100644 index 00000000..3cc05eb2 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/MainFlutterWindow.swift @@ -0,0 +1,15 @@ +import Cocoa +import FlutterMacOS + +class MainFlutterWindow: NSWindow { + override func awakeFromNib() { + let flutterViewController = FlutterViewController() + let windowFrame = self.frame + self.contentViewController = flutterViewController + self.setFrame(windowFrame, display: true) + + RegisterGeneratedPlugins(registry: flutterViewController) + + super.awakeFromNib() + } +} diff --git a/examples/audio_player_interaction/macos/Runner/Release.entitlements b/examples/audio_player_interaction/macos/Runner/Release.entitlements new file mode 100644 index 00000000..f29279b3 --- /dev/null +++ b/examples/audio_player_interaction/macos/Runner/Release.entitlements @@ -0,0 +1,10 @@ + + + + + com.apple.security.app-sandbox + + com.apple.security.device.audio-input + + + diff --git a/examples/audio_player_interaction/macos/RunnerTests/RunnerTests.swift b/examples/audio_player_interaction/macos/RunnerTests/RunnerTests.swift new file mode 100644 index 00000000..5418c9f5 --- /dev/null +++ b/examples/audio_player_interaction/macos/RunnerTests/RunnerTests.swift @@ -0,0 +1,12 @@ +import FlutterMacOS +import Cocoa +import XCTest + +class RunnerTests: XCTestCase { + + func testExample() { + // If you add code to the Runner application, consider adding tests here. + // See https://developer.apple.com/documentation/xctest for more information about using XCTest. + } + +} diff --git a/examples/audio_player_interaction/pubspec.lock b/examples/audio_player_interaction/pubspec.lock index efed5058..d97f8a8b 100644 --- a/examples/audio_player_interaction/pubspec.lock +++ b/examples/audio_player_interaction/pubspec.lock @@ -13,58 +13,58 @@ packages: dependency: "direct main" description: name: audioplayers - sha256: c05c6147124cd63e725e861335a8b4d57300b80e6e92cea7c145c739223bbaef + sha256: "752039d6aa752597c98ec212e9759519061759e402e7da59a511f39d43aa07d2" url: "https://pub.dev" source: hosted - version: "5.2.1" + version: "6.0.0" audioplayers_android: dependency: transitive description: name: audioplayers_android - sha256: b00e1a0e11365d88576320ec2d8c192bc21f1afb6c0e5995d1c57ae63156acb5 + sha256: de576b890befe27175c2f511ba8b742bec83765fa97c3ce4282bba46212f58e4 url: "https://pub.dev" source: hosted - version: "4.0.3" + version: "5.0.0" audioplayers_darwin: dependency: transitive description: name: audioplayers_darwin - sha256: "3034e99a6df8d101da0f5082dcca0a2a99db62ab1d4ddb3277bed3f6f81afe08" + sha256: e507887f3ff18d8e5a10a668d7bedc28206b12e10b98347797257c6ae1019c3b url: "https://pub.dev" source: hosted - version: "5.0.2" + version: "6.0.0" audioplayers_linux: dependency: transitive description: name: audioplayers_linux - sha256: "60787e73fefc4d2e0b9c02c69885402177e818e4e27ef087074cf27c02246c9e" + sha256: "3d3d244c90436115417f170426ce768856d8fe4dfc5ed66a049d2890acfa82f9" url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "4.0.0" audioplayers_platform_interface: dependency: transitive description: name: audioplayers_platform_interface - sha256: "365c547f1bb9e77d94dd1687903a668d8f7ac3409e48e6e6a3668a1ac2982adb" + sha256: "6834dd48dfb7bc6c2404998ebdd161f79cd3774a7e6779e1348d54a3bfdcfaa5" url: "https://pub.dev" source: hosted - version: "6.1.0" + version: "7.0.0" audioplayers_web: dependency: transitive description: name: audioplayers_web - sha256: "22cd0173e54d92bd9b2c80b1204eb1eb159ece87475ab58c9788a70ec43c2a62" + sha256: db8fc420dadf80da18e2286c18e746fb4c3b2c5adbf0c963299dde046828886d url: "https://pub.dev" source: hosted - version: "4.1.0" + version: "5.0.0" audioplayers_windows: dependency: transitive description: name: audioplayers_windows - sha256: "9536812c9103563644ada2ef45ae523806b0745f7a78e89d1b5fb1951de90e1a" + sha256: "8605762dddba992138d476f6a0c3afd9df30ac5b96039929063eceed416795c2" url: "https://pub.dev" source: hosted - version: "3.1.0" + version: "4.0.0" boolean_selector: dependency: transitive description: @@ -156,10 +156,10 @@ packages: dependency: transitive description: name: http - sha256: a2bbf9d017fcced29139daa8ed2bba4ece450ab222871df93ca9eec6f80c34ba + sha256: "761a297c042deedc1ffbb156d6e2af13886bb305c2a343a4d972504cd67dd938" url: "https://pub.dev" source: hosted - version: "1.2.0" + version: "1.2.1" http_parser: dependency: transitive description: @@ -172,10 +172,10 @@ packages: dependency: transitive description: name: js - sha256: f2c445dce49627136094980615a031419f7f3eb393237e4ecd97ac15dea343f3 + sha256: c1b2e9b5ea78c45e1a0788d29606ba27dc5f71f019f32ca5140f61ef071838cf url: "https://pub.dev" source: hosted - version: "0.6.7" + version: "0.7.1" json_annotation: dependency: transitive description: @@ -184,38 +184,62 @@ packages: url: "https://pub.dev" source: hosted version: "4.8.1" + leak_tracker: + dependency: transitive + description: + name: leak_tracker + sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + url: "https://pub.dev" + source: hosted + version: "10.0.0" + leak_tracker_flutter_testing: + dependency: transitive + description: + name: leak_tracker_flutter_testing + sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + url: "https://pub.dev" + source: hosted + version: "2.0.1" + leak_tracker_testing: + dependency: transitive + description: + name: leak_tracker_testing + sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + url: "https://pub.dev" + source: hosted + version: "2.0.1" matcher: dependency: transitive description: name: matcher - sha256: "1803e76e6653768d64ed8ff2e1e67bea3ad4b923eb5c56a295c3e634bad5960e" + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb url: "https://pub.dev" source: hosted - version: "0.12.16" + version: "0.12.16+1" material_color_utilities: dependency: transitive description: name: material_color_utilities - sha256: "9528f2f296073ff54cb9fee677df673ace1218163c3bc7628093e7eed5203d41" + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.5.0" + version: "0.8.0" meta: dependency: transitive description: name: meta - sha256: a6e590c838b18133bb482a2745ad77c5bb7715fb0451209e1a7567d416678b8e + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.10.0" + version: "1.11.0" path: dependency: transitive description: name: path - sha256: "8829d8a55c13fc0e37127c29fedf290c102f4e40ae94ada574091fe0ff96c917" + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" url: "https://pub.dev" source: hosted - version: "1.8.3" + version: "1.9.0" path_provider: dependency: transitive description: @@ -307,15 +331,7 @@ packages: path: "../../speech_to_text" relative: true source: path - version: "6.6.0-dev" - speech_to_text_macos: - dependency: transitive - description: - name: speech_to_text_macos - sha256: e685750f7542fcaa087a5396ee471e727ec648bf681f4da83c84d086322173f6 - url: "https://pub.dev" - source: hosted - version: "1.1.0" + version: "6.6.2" speech_to_text_platform_interface: dependency: transitive description: @@ -404,14 +420,22 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.4" + vm_service: + dependency: transitive + description: + name: vm_service + sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 + url: "https://pub.dev" + source: hosted + version: "13.0.0" web: dependency: transitive description: name: web - sha256: afe077240a270dcfd2aafe77602b4113645af95d0ad31128cc02bce5ac5d5152 + sha256: "97da13628db363c635202ad97068d47c5b8aa555808e7a9411963c533b449b27" url: "https://pub.dev" source: hosted - version: "0.3.0" + version: "0.5.1" win32: dependency: transitive description: @@ -429,5 +453,5 @@ packages: source: hosted version: "1.0.4" sdks: - dart: ">=3.2.0 <4.0.0" - flutter: ">=3.10.0" + dart: ">=3.3.0 <4.0.0" + flutter: ">=3.19.0" diff --git a/examples/audio_player_interaction/pubspec.yaml b/examples/audio_player_interaction/pubspec.yaml index dc7bee51..3a97c2c1 100644 --- a/examples/audio_player_interaction/pubspec.yaml +++ b/examples/audio_player_interaction/pubspec.yaml @@ -23,7 +23,7 @@ environment: dependencies: flutter: sdk: flutter - audioplayers: ^5.2.1 + audioplayers: ^6.0.0 # speech_to_text: ^4.2.1 speech_to_text: path: ../../speech_to_text diff --git a/speech_to_text/README.md b/speech_to_text/README.md index 4392b3f4..d685951e 100644 --- a/speech_to_text/README.md +++ b/speech_to_text/README.md @@ -4,17 +4,17 @@ A library that exposes device specific speech recognition capability. -This plugin contains a set of classes that make it easy to use the speech recognition -capabilities of the underlying platform in Flutter. It supports Android, iOS and web. The +This plugin contains a set of classes that make it easy to use the speech recognition +capabilities of the underlying platform in Flutter. It supports Android, iOS and web. The target use cases for this library are commands and short phrases, not continuous spoken -conversion or always on listening. +conversion or always on listening. ## Platform Support - -| Support | Android | iOS | MacOS | Web* | Linux | Windows | -| :-----: | :-----: | :-: | :---: | :-: | :---: | :-----: | -| build | ✅ | ✅ | ✅ | ✅ | ✘ | ✘ | -| speech | ✅ | ✅ | ✘ | ✅ | ✘ | ✘ | + +| Support | Android | iOS | MacOS | Web\* | Linux | Windows | +| :-----: | :-----: | :-: | :---: | :---: | :---: | :-----: | +| build | ✅ | ✅ | ✅ | ✅ | ✘ | ✘ | +| speech | ✅ | ✅ | ✅ | ✅ | ✘ | ✘ | _build: means you can build and run with the plugin on that platform_ @@ -25,17 +25,18 @@ _speech: means most speech recognition features work. Platforms with build but n ## Recent Updates 6.6.0 `listen` now uses 'SpeechListenOptions' to specify the options for the current listen session, including new -options for controlling haptics and punctuation during recognition on iOS. +options for controlling haptics and punctuation during recognition on iOS. -6.5.0 New `initialize` option to improve support for some mobile browsers, `SpeechToText.webDoNotAggregate`. Test the browser user agent to see if it should be used. +6.5.0 New `initialize` option to improve support for some mobile browsers, `SpeechToText.webDoNotAggregate`. Test the browser user agent to see if it should be used. -*Note*: Feedback from any test devices is welcome. +_Note_: Feedback from any test devices is welcome. ## Using -To recognize text from the microphone import the package and call the plugin, like so: +To recognize text from the microphone import the package and call the plugin, like so: + +### Minimal -### Minimal ```dart import 'package:speech_to_text/speech_to_text.dart' as stt; @@ -52,6 +53,7 @@ import 'package:speech_to_text/speech_to_text.dart' as stt; ``` ### Complete Flutter example + ```dart import 'package:flutter/material.dart'; import 'package:speech_to_text/speech_recognition_result.dart'; @@ -169,56 +171,66 @@ class _MyHomePageState extends State { ### Example Apps -In the example directory you'll find a few different example apps that demonstrate how to use the -plugin. +In the example directory you'll find a few different example apps that demonstrate how to use the +plugin. #### Basic example ([example/lib/main.dart](https://github.com/csdcorp/speech_to_text/blob/main/speech_to_text/example/lib/main.dart)) -This shows how to initialize and use the plugin and allows many of the options to be set through -a simple UI. This is probably the first example to look at to understand how to use the plugin. + +This shows how to initialize and use the plugin and allows many of the options to be set through +a simple UI. This is probably the first example to look at to understand how to use the plugin. #### Provide example ([example/lib/provider_example.dart](https://github.com/csdcorp/speech_to_text/blob/main/speech_to_text/example/lib/provider_example.dart)) -If you are using the (Provider)[https://pub.dev/packages/provider] package in Flutter then this example shows how -to use the plugin as a provider throught the `SpeechToTextProvider` class. + +If you are using the (Provider)[https://pub.dev/packages/provider] package in Flutter then this example shows how +to use the plugin as a provider throught the `SpeechToTextProvider` class. #### Plugin stress test ([example/lib/stress.dart](https://github.com/csdcorp/speech_to_text/blob/main/speech_to_text/example/lib/stress.dart)) -The plugin opens and closes several platform resources as it is used. To help ensure that the plugin -does not leak resources this stress test loops through various operations to make it easier to -track resource usage. This is mostly an internal development tool so not as useful for reference -purposes. -#### Audio player interaction ([examples/audio_player_interaction/lib/main.dart](https://github.com/csdcorp/speech_to_text/blob/main/examples/audio_player_interaction/lib/main.dart)) -A common use case is to have this plugin and an audio playback plugin working together. This example shows one -way to make them work well together. You can find this in +The plugin opens and closes several platform resources as it is used. To help ensure that the plugin +does not leak resources this stress test loops through various operations to make it easier to +track resource usage. This is mostly an internal development tool so not as useful for reference +purposes. +#### Audio player interaction ([examples/audio_player_interaction/lib/main.dart](https://github.com/csdcorp/speech_to_text/blob/main/examples/audio_player_interaction/lib/main.dart)) +A common use case is to have this plugin and an audio playback plugin working together. This example shows one +way to make them work well together. You can find this in ### Initialize once -The `initialize` method only needs to be called once per application session. After that `listen`, -`start`, `stop`, and `cancel` can be used to interact with the plugin. Subsequent calls to `initialize` + +The `initialize` method only needs to be called once per application session. After that `listen`, +`start`, `stop`, and `cancel` can be used to interact with the plugin. Subsequent calls to `initialize` are ignored which is safe but does mean that the `onStatus` and `onError` callbacks cannot be reset after -the first call to `initialize`. For that reason there should be only one instance of the plugin per -application. The `SpeechToTextProvider` is one way to create a single instance and easily reuse it in -multiple widgets. +the first call to `initialize`. For that reason there should be only one instance of the plugin per +application. The `SpeechToTextProvider` is one way to create a single instance and easily reuse it in +multiple widgets. ## Permissions -Applications using this plugin require user permissions. -### iOS +Applications using this plugin require user permissions. + +### iOS & macOS Add the following keys to your _Info.plist_ file, located in `/ios/Runner/Info.plist`: -* `NSSpeechRecognitionUsageDescription` - describe why your app uses speech recognition. This is called _Privacy - Speech Recognition Usage Description_ in the visual editor. -* `NSMicrophoneUsageDescription` - describe why your app needs access to the microphone. This is called _Privacy - Microphone Usage Description_ in the visual editor. +- `NSSpeechRecognitionUsageDescription` - describe why your app uses speech recognition. This is called _Privacy - Speech Recognition Usage Description_ in the visual editor. +- `NSMicrophoneUsageDescription` - describe why your app needs access to the microphone. This is called _Privacy - Microphone Usage Description_ in the visual editor. + +### Additional Warning for macOS + +When running the macOS app through VSCode, the app will crash when requesting permissions. This is a known issue with Flutter https://github.com/flutter/flutter/issues/70374. + +You can only request permissions if you run the app directly from Xcode. ### Android Add the record audio permission to your _AndroidManifest.xml_ file, located in `/android/app/src/main/AndroidManifest.xml`. -* `android.permission.RECORD_AUDIO` - this permission is required for microphone access. -* `android.permission.INTERNET` - this permission is required because speech recognition may use remote services. -* `android.permission.BLUETOOTH` - this permission is required because speech recognition can use bluetooth headsets when connected. -* `android.permission.BLUETOOTH_ADMIN` - this permission is required because speech recognition can use bluetooth headsets when connected. -* `android.permission.BLUETOOTH_CONNECT` - this permission is required because speech recognition can use bluetooth headsets when connected. +- `android.permission.RECORD_AUDIO` - this permission is required for microphone access. +- `android.permission.INTERNET` - this permission is required because speech recognition may use remote services. +- `android.permission.BLUETOOTH` - this permission is required because speech recognition can use bluetooth headsets when connected. +- `android.permission.BLUETOOTH_ADMIN` - this permission is required because speech recognition can use bluetooth headsets when connected. +- `android.permission.BLUETOOTH_CONNECT` - this permission is required because speech recognition can use bluetooth headsets when connected. ```xml @@ -230,7 +242,7 @@ Add the record audio permission to your _AndroidManifest.xml_ file, located in ` #### Android SDK 30 or later -If you are targeting Android SDK, i.e. you set your `targetSDKVersion` to 30 or later, then you will need to add the following to your `AndroidManifest.xml` right after the permissions section. See the example app for the complete usage. +If you are targeting Android SDK, i.e. you set your `targetSDKVersion` to 30 or later, then you will need to add the following to your `AndroidManifest.xml` right after the permissions section. See the example app for the complete usage. ```xml @@ -242,42 +254,45 @@ If you are targeting Android SDK, i.e. you set your `targetSDKVersion` to 30 or ## Adding Sounds for iOS (optional) -Android automatically plays system sounds when speech listening starts or stops but iOS does not. This plugin supports playing sounds to indicate listening status on iOS if sound files are available as assets in the application. To enable sounds in an application using this plugin add the sound files to the project and reference them in the assets section of the application `pubspec.yaml`. The location and filenames of the sound files must exactly match what -is shown below or they will not be found. The example application for the plugin shows the usage. *Note* These files should be very short as they delay -the start / end of the speech recognizer until the sound playback is complete. +Android automatically plays system sounds when speech listening starts or stops but iOS does not. This plugin supports playing sounds to indicate listening status on iOS if sound files are available as assets in the application. To enable sounds in an application using this plugin add the sound files to the project and reference them in the assets section of the application `pubspec.yaml`. The location and filenames of the sound files must exactly match what +is shown below or they will not be found. The example application for the plugin shows the usage. _Note_ These files should be very short as they delay +the start / end of the speech recognizer until the sound playback is complete. + ```yaml - assets: +assets: - assets/sounds/speech_to_text_listening.m4r - assets/sounds/speech_to_text_cancel.m4r - assets/sounds/speech_to_text_stop.m4r ``` -* `speech_to_text_listening.m4r` - played when the listen method is called. -* `speech_to_text_cancel.m4r` - played when the cancel method is called. -* `speech_to_text_stop.m4r` - played when the stop method is called. +- `speech_to_text_listening.m4r` - played when the listen method is called. +- `speech_to_text_cancel.m4r` - played when the cancel method is called. +- `speech_to_text_stop.m4r` - played when the stop method is called. ## Tips ### Switching Recognition Language -The speech_to_text plugin uses the default locale for the device for speech recognition by default. However it also -supports using any language installed on the device. To find the available languages and select a particular language + +The speech_to_text plugin uses the default locale for the device for speech recognition by default. However it also +supports using any language installed on the device. To find the available languages and select a particular language use these properties. -There's a `locales` property on the `SpeechToText` instance that provides the list of locales installed on the device +There's a `locales` property on the `SpeechToText` instance that provides the list of locales installed on the device as `LocaleName` instances. Then the `listen` method takes an optional `localeId` named param which would be the `localeId` - property of any of the values returned in `locales`. A call looks like this: - ```dart - var locales = await speech.locales(); +property of any of the values returned in `locales`. A call looks like this: + +```dart + var locales = await speech.locales(); - // Some UI or other code to select a locale from the list - // resulting in an index, selectedLocale + // Some UI or other code to select a locale from the list + // resulting in an index, selectedLocale - var selectedLocale = locales[selectedLocale]; - speech.listen( - onResult: resultListener, - localeId: selectedLocale.localeId, - ); - ``` + var selectedLocale = locales[selectedLocale]; + speech.listen( + onResult: resultListener, + localeId: selectedLocale.localeId, + ); +``` ## Troubleshooting @@ -286,18 +301,18 @@ as `LocaleName` instances. Then the `listen` method takes an optional `localeId` If speech recognition is not working on your simulator try going to the Settings app in the simulator: Accessibility -> Spoken content -> Voices -From there select any language and any speaker and it should download to the device. After that speech -recognition should work on the simulator. +From there select any language and any speaker and it should download to the device. After that speech +recognition should work on the simulator. ### Speech recognition stops after a brief pause on Android -Android speech recognition has a very short timeout when the speaker pauses. The duration seems to vary by device +Android speech recognition has a very short timeout when the speaker pauses. The duration seems to vary by device and version of the Android OS. In the devices I've used none have had a pause longer than 5 seconds. Unfortunately -there appears to be no way to change that behaviour. +there appears to be no way to change that behaviour. ### Android beeps on start/stop of speech recognition -This is a feature of the Android OS and there is no supported way to disable it. +This is a feature of the Android OS and there is no supported way to disable it. ### Duplicate results in browser recogntion @@ -305,49 +320,57 @@ On Android in Chrome and possibly other browsers, the speech API has been implem it requires a flag to be set on initialization. You can see the details in this [issue](https://github.com/csdcorp/speech_to_text/issues/521) ### Android build -Version 5.2.0 of the plugin and later require at least `compileSdkVersion 31` for the Android build. This property can be set in the `build.gradle` file. + +Version 5.2.0 of the plugin and later require at least `compileSdkVersion 31` for the Android build. This property can be set in the `build.gradle` file. ### Continuous speech recognition -There have been a number of questions about how to achieve continuous speech recognition using this plugin. Currently +There have been a number of questions about how to achieve continuous speech recognition using this plugin. Currently the plugin is designed for short intermittent use, like when expecting a response to a question, or issuing a single -voice command. Issue #63 is the current home for that discussion. There is not yet a way to achieve this goal using the -Android or iOS speech recognition capabilities. +voice command. Issue #63 is the current home for that discussion. There is not yet a way to achieve this goal using the +Android or iOS speech recognition capabilities. There are at least two separate use cases for continuous speech recognition: + 1. voice assistant style, where recognition of a particular phrase triggers an interaction; -2. dictation of text for input. +2. dictation of text for input. -Voice assistant style interaction is possibly better handled by integrating with the existing assistant capability on +Voice assistant style interaction is possibly better handled by integrating with the existing assistant capability on the device rather than building out a separate capability. Text dictation is available through the keyboard for standard -text input controls though there are other uses of dictation that are not currently well supported. +text input controls though there are other uses of dictation that are not currently well supported. + +### Browser support for speech recognition -### Browser support for speech recognition -Web browsers vary in their level of support for speech recognition. This -[issue](https://github.com/csdcorp/speech_to_text/issues/239) has some details. -The best lists I've seen are https://caniuse.com/speech-recognition and -https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition. In particular -in issue #239 it was reported that Brave Browser and Firefox for Linux do not -support speech recognition. +Web browsers vary in their level of support for speech recognition. This +[issue](https://github.com/csdcorp/speech_to_text/issues/239) has some details. +The best lists I've seen are https://caniuse.com/speech-recognition and +https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition. In particular +in issue #239 it was reported that Brave Browser and Firefox for Linux do not +support speech recognition. -### Speech recognition from recorded audio -There have been a number of questions about whether speech can be recognized from recorded audio. The short answer is -that this may be possible on iOS but doesn't appear to be on Android. There is an open issue on this here #205. +### Speech recognition from recorded audio + +There have been a number of questions about whether speech can be recognized from recorded audio. The short answer is +that this may be possible on iOS but doesn't appear to be on Android. There is an open issue on this here #205. ### iOS interactions with other sound plugins, crash when listening or initializing, pauses -On iOS the speech recognition plugin can interact with other sound plugins, things like WebRTC, or sound playback -or recording plugins. While this plugin tries hard to be a good citizen and properly share the various iOS sound -resources there is always room for interactions. One thing that might help is to add a brief delay between the end of -another sound plugin and starting to listen using SpeechToText. See this [issue](https://github.com/csdcorp/speech_to_text/issues/372) + +On iOS the speech recognition plugin can interact with other sound plugins, things like WebRTC, or sound playback +or recording plugins. While this plugin tries hard to be a good citizen and properly share the various iOS sound +resources there is always room for interactions. One thing that might help is to add a brief delay between the end of +another sound plugin and starting to listen using SpeechToText. See this [issue](https://github.com/csdcorp/speech_to_text/issues/372) for example. ### SDK version error trying to compile for Android + ``` -Manifest merger failed : uses-sdk:minSdkVersion 16 cannot be smaller than version 21 declared in library [:speech_to_text] +Manifest merger failed : uses-sdk:minSdkVersion 16 cannot be smaller than version 21 declared in library [:speech_to_text] ``` -The speech_to_text plugin requires at least Android SDK 21 because some of the speech functions in Android + +The speech_to_text plugin requires at least Android SDK 21 because some of the speech functions in Android were only introduced in that version. To fix this error you need to change the `build.gradle` entry to reflect this version. Here's what the relevant part of that file looked like as of this writing: + ``` defaultConfig { applicationId "com.example.app" @@ -361,10 +384,11 @@ this version. Here's what the relevant part of that file looked like as of this ### Recording audio on Android -It is not currently possible to record audio on Android while doing speech recognition. The only solution right now is to -stop recording while the speech recognizer is active and then start again after. +It is not currently possible to record audio on Android while doing speech recognition. The only solution right now is to +stop recording while the speech recognizer is active and then start again after. ### Incorrect Swift version trying to compile for iOS + ``` /Users/markvandergon/flutter/.pub-cache/hosted/pub.dartlang.org/speech_to_text-1.1.0/ios/Classes/SwiftSpeechToTextPlugin.swift:224:44: error: value of type 'SwiftSpeechToTextPlugin' has no member 'AVAudioSession' rememberedAudioCategory = self.AVAudioSession.Category @@ -372,29 +396,36 @@ stop recording while the speech recognizer is active and then start again after. /Users/markvandergon/flutter/.pub-cache/hosted/pub.dartlang.org/speech_to_text-1.1.0/ios/Classes/SwiftSpeechToTextPlugin.swift:227:63: error: type 'Int' has no member 'notifyOthersOnDeactivation' try self.audioSession.setActive(true, withFlags: .notifyOthersOnDeactivation) ``` + This happens when the Swift language version is not set correctly. See this thread for help https://github.com/csdcorp/speech_to_text/issues/45. ### Swift not supported trying to compile for iOS + ``` `speech_to_text` does not specify a Swift version and none of the targets (`Runner`) integrating it have the `SWIFT_VERSION` attribute set. ``` -This usually happens for older projects that only support Objective-C. See this thread for help https://github.com/csdcorp/speech_to_text/issues/88. + +This usually happens for older projects that only support Objective-C. See this thread for help https://github.com/csdcorp/speech_to_text/issues/88. ### Last word lost on Android -There's a discussion here https://github.com/csdcorp/speech_to_text/issues/434 about this known issue -with some Android speech recognition. This issue is up to Google and other Android implementers to -address, the plugin can't improve on their recognition quality. + +There's a discussion here https://github.com/csdcorp/speech_to_text/issues/434 about this known issue +with some Android speech recognition. This issue is up to Google and other Android implementers to +address, the plugin can't improve on their recognition quality. ### Not working on a particular Android device -The symptom for this issue is that the `initialize` method will always fail. If you turn on debug logging + +The symptom for this issue is that the `initialize` method will always fail. If you turn on debug logging using the `debugLogging: true` flag on the `initialize` method you'll see `'Speech recognition unavailable'` -in the Android log. There's a lengthy issue discussion here https://github.com/csdcorp/speech_to_text/issues/36 -about this. The issue seems to be that the recognizer is not always automatically enabled on the device. Two -key things helped resolve the issue in this case at least. +in the Android log. There's a lengthy issue discussion here https://github.com/csdcorp/speech_to_text/issues/36 +about this. The issue seems to be that the recognizer is not always automatically enabled on the device. Two +key things helped resolve the issue in this case at least. ### Not working on an Android emulator -The above tip about getting it working on an Android device is also useful for emulators. Some users have reported seeing another error on Android simulators - sdk gphone x86 (Pixel 3a API 30). + +The above tip about getting it working on an Android device is also useful for emulators. Some users have reported seeing another error on Android simulators - sdk gphone x86 (Pixel 3a API 30). AUDIO_RECORD perms were in Manifest, also manually set Mic perms in Android Settings. When running sample app, Initialize works, but Start failed the log looks as follows. + ``` D/SpeechToTextPlugin(12555): put partial D/SpeechToTextPlugin(12555): put languageTag @@ -403,24 +434,29 @@ D/SpeechToTextPlugin(12555): Cancel listening ``` #### Resolved by + Resolved it by Opening Google, clicking Mic icon and granting it perms, then everything on the App works... -#### First +#### First + 1. Go to Google Play 2. Search for 'Google' 3. You should find this app: https://play.google.com/store/apps/details?id=com.google.android.googlequicksearchbox -If 'Disabled' enable it + If 'Disabled' enable it This is the SO post that helped: https://stackoverflow.com/questions/28769320/how-to-check-wether-speech-recognition-is-available-or-not #### Second -Ensure the app has the required permissions. The symptom for this that you get a permanent error notification - 'error_audio_error` when starting a listen session. Here's a Stack Overflow post that addresses that - https://stackoverflow.com/questions/46376193/android-speechrecognizer-audio-recording-error - Here's the important excerpt: - >You should go to system setting, Apps, Google app, then enable its permission of microphone. + +Ensure the app has the required permissions. The symptom for this that you get a permanent error notification +'error_audio_error` when starting a listen session. Here's a Stack Overflow post that addresses that +https://stackoverflow.com/questions/46376193/android-speechrecognizer-audio-recording-error +Here's the important excerpt: + +> You should go to system setting, Apps, Google app, then enable its permission of microphone. #### User reported steps + From issue [#298](https://github.com/csdcorp/speech_to_text/issues/298) this is the detailed set of steps that resolved their issue: @@ -431,15 +467,17 @@ resolved their issue: 5. Delete the build folder from the root path of the project and run again ### iOS recognition guidelines -Apple has quite a good guide on the user experience for using speech, the original is here -https://developer.apple.com/documentation/speech/sfspeechrecognizer This is the section that I think is particularly relevant: ->#### Create a Great User Experience for Speech Recognition ->Here are some tips to consider when adding speech recognition support to your app. +Apple has quite a good guide on the user experience for using speech, the original is here +https://developer.apple.com/documentation/speech/sfspeechrecognizer This is the section that I think is particularly relevant: + +> #### Create a Great User Experience for Speech Recognition +> +> Here are some tips to consider when adding speech recognition support to your app. ->**Be prepared to handle failures caused by speech recognition limits.** Because speech recognition is a network-based service, limits are enforced so that the service can remain freely available to all apps. Individual devices may be limited in the number of recognitions that can be performed per day, and each app may be throttled globally based on the number of requests it makes per day. If a recognition request fails quickly (within a second or two of starting), check to see if the recognition service became unavailable. If it is, you may want to ask users to try again later. +> **Be prepared to handle failures caused by speech recognition limits.** Because speech recognition is a network-based service, limits are enforced so that the service can remain freely available to all apps. Individual devices may be limited in the number of recognitions that can be performed per day, and each app may be throttled globally based on the number of requests it makes per day. If a recognition request fails quickly (within a second or two of starting), check to see if the recognition service became unavailable. If it is, you may want to ask users to try again later. ->**Plan for a one-minute limit on audio duration.** Speech recognition places a relatively high burden on battery life and network usage. To minimize this burden, the framework stops speech recognition tasks that last longer than one minute. This limit is similar to the one for keyboard-related dictation. -Remind the user when your app is recording. For example, display a visual indicator and play sounds at the beginning and end of speech recognition to help users understand that they're being actively recorded. You can also display speech as it is being recognized so that users understand what your app is doing and see any mistakes made during the recognition process. +> **Plan for a one-minute limit on audio duration.** Speech recognition places a relatively high burden on battery life and network usage. To minimize this burden, the framework stops speech recognition tasks that last longer than one minute. This limit is similar to the one for keyboard-related dictation. +> Remind the user when your app is recording. For example, display a visual indicator and play sounds at the beginning and end of speech recognition to help users understand that they're being actively recorded. You can also display speech as it is being recognized so that users understand what your app is doing and see any mistakes made during the recognition process. ->**Do not perform speech recognition on private or sensitive information.** Some speech is not appropriate for recognition. Don't send passwords, health or financial data, and other sensitive speech for recognition. +> **Do not perform speech recognition on private or sensitive information.** Some speech is not appropriate for recognition. Don't send passwords, health or financial data, and other sensitive speech for recognition. diff --git a/speech_to_text/darwin/Classes/SpeechToTextPlugin.swift b/speech_to_text/darwin/Classes/SpeechToTextPlugin.swift new file mode 100644 index 00000000..15c37dc2 --- /dev/null +++ b/speech_to_text/darwin/Classes/SpeechToTextPlugin.swift @@ -0,0 +1,819 @@ +import Speech +import Try +import os.log + +#if os(OSX) + import FlutterMacOS + import Cocoa + import AVFoundation +#else + import Flutter + import UIKit +#endif + +public enum SwiftSpeechToTextMethods: String { + case has_permission + case initialize + case listen + case stop + case cancel + case locales + case unknown // just for testing +} + +public enum SwiftSpeechToTextCallbackMethods: String { + case textRecognition + case notifyStatus + case notifyError + case soundLevelChange +} + +public enum SpeechToTextStatus: String { + case listening + case notListening + case unavailable + case available + case done + case doneNoResult +} + +public enum SpeechToTextErrors: String { + case onDeviceError + case noRecognizerError + case listenFailedError + case missingOrInvalidArg +} + +public enum ListenMode: Int { + case deviceDefault = 0 + case dictation = 1 + case search = 2 + case confirmation = 3 +} + +struct SpeechRecognitionWords: Codable { + let recognizedWords: String + let confidence: Decimal +} + +struct SpeechRecognitionResult: Codable { + let alternates: [SpeechRecognitionWords] + let finalResult: Bool +} + +struct SpeechRecognitionError: Codable { + let errorMsg: String + let permanent: Bool +} + +enum SpeechToTextError: Error { + case runtimeError(String) +} + +@available(iOS 10.0, macOS 10.15, *) +public class SpeechToTextPlugin: NSObject, FlutterPlugin { + private var channel: FlutterMethodChannel + private var registrar: FlutterPluginRegistrar + private var recognizer: SFSpeechRecognizer? + private var currentRequest: SFSpeechAudioBufferRecognitionRequest? + private var currentTask: SFSpeechRecognitionTask? + private var listeningSound: AVAudioPlayer? + private var successSound: AVAudioPlayer? + private var cancelSound: AVAudioPlayer? + + #if os(iOS) + private var rememberedAudioCategory: AVAudioSession.Category? + private var rememberedAudioCategoryOptions: AVAudioSession.CategoryOptions? + private let audioSession = AVAudioSession.sharedInstance() + #endif + + private var previousLocale: Locale? + private var onPlayEnd: (() -> Void)? + private var returnPartialResults: Bool = true + private var failedListen: Bool = false + private var onDeviceStatus: Bool = false + private var listening = false + private var stopping = false + private var audioEngine: AVAudioEngine? + private var inputNode: AVAudioInputNode? + private let jsonEncoder = JSONEncoder() + private let busForNodeTap = 0 + private let speechBufferSize: AVAudioFrameCount = 1024 + private static var subsystem = Bundle.main.bundleIdentifier! + private let pluginLog = OSLog(subsystem: "com.csdcorp.speechToText", category: "plugin") + + public static func register(with registrar: FlutterPluginRegistrar) { + + var channel: FlutterMethodChannel + #if os(OSX) + channel = FlutterMethodChannel( + name: "plugin.csdcorp.com/speech_to_text", binaryMessenger: registrar.messenger) + #else + channel = FlutterMethodChannel( + name: "plugin.csdcorp.com/speech_to_text", binaryMessenger: registrar.messenger()) + + #endif + + let instance = SpeechToTextPlugin(channel, registrar: registrar) + registrar.addMethodCallDelegate(instance, channel: channel) + } + + init(_ channel: FlutterMethodChannel, registrar: FlutterPluginRegistrar) { + self.channel = channel + self.registrar = registrar + } + + public func handle(_ call: FlutterMethodCall, result: @escaping FlutterResult) { + switch call.method { + case SwiftSpeechToTextMethods.has_permission.rawValue: + hasPermission(result) + case SwiftSpeechToTextMethods.initialize.rawValue: + initialize(result) + case SwiftSpeechToTextMethods.listen.rawValue: + guard let argsArr = call.arguments as? [String: AnyObject], + let partialResults = argsArr["partialResults"] as? Bool, + let onDevice = argsArr["onDevice"] as? Bool, + let listenModeIndex = argsArr["listenMode"] as? Int, + let sampleRate = argsArr["sampleRate"] as? Int, + let autoPunctuation = argsArr["autoPunctuation"] as? Bool, + let enableHaptics = argsArr["enableHaptics"] as? Bool + else { + DispatchQueue.main.async { + result( + FlutterError( + code: SpeechToTextErrors.missingOrInvalidArg.rawValue, + message: + "Missing arg partialResults, onDevice, listenMode, autoPunctuatio, enableHaptics and sampleRate are required", + details: nil)) + } + return + } + var localeStr: String? = nil + if let localeParam = argsArr["localeId"] as? String { + localeStr = localeParam + } + guard let listenMode = ListenMode(rawValue: listenModeIndex) else { + DispatchQueue.main.async { + result( + FlutterError( + code: SpeechToTextErrors.missingOrInvalidArg.rawValue, + message: "invalid value for listenMode, must be 0-2, was \(listenModeIndex)", + details: nil)) + } + return + } + + listenForSpeech( + result, localeStr: localeStr, partialResults: partialResults, onDevice: onDevice, + listenMode: listenMode, sampleRate: sampleRate, autoPunctuation: autoPunctuation, + enableHaptics: enableHaptics) + case SwiftSpeechToTextMethods.stop.rawValue: + stopSpeech(result) + case SwiftSpeechToTextMethods.cancel.rawValue: + cancelSpeech(result) + case SwiftSpeechToTextMethods.locales.rawValue: + locales(result) + default: + os_log("Unrecognized method: %{PUBLIC}@", log: pluginLog, type: .error, call.method) + DispatchQueue.main.async { + result(FlutterMethodNotImplemented) + } + } + } + + private func hasPermission(_ result: @escaping FlutterResult) { + var has = + SFSpeechRecognizer.authorizationStatus() == SFSpeechRecognizerAuthorizationStatus.authorized + #if os(iOS) + has = has && self.audioSession.recordPermission == AVAudioSession.RecordPermission.granted + #else + has = has && AVCaptureDevice.authorizationStatus(for: .audio) == .authorized + #endif + + DispatchQueue.main.async { + result(has) + } + } + + private func initialize(_ result: @escaping FlutterResult) { + var success = false + let status = SFSpeechRecognizer.authorizationStatus() + switch status { + case SFSpeechRecognizerAuthorizationStatus.notDetermined: + SFSpeechRecognizer.requestAuthorization({ (status) -> Void in + success = status == SFSpeechRecognizerAuthorizationStatus.authorized + print("Success auth", success) + if success { + + #if os(iOS) + + self.audioSession.requestRecordPermission({ (granted: Bool) -> Void in + if granted { + self.setupSpeechRecognition(result) + } else { + self.sendBoolResult(false, result) + os_log("User denied permission", log: self.pluginLog, type: .info) + } + }) + + #else + self.requestMacOSMicrophonePermission { success in + if success { + self.setupSpeechRecognition(result) + } else { + self.sendBoolResult(false, result) + os_log("User denied permission", log: self.pluginLog, type: .info) + } + } + #endif + } else { + self.sendBoolResult(false, result) + } + }) + case SFSpeechRecognizerAuthorizationStatus.denied: + os_log("Permission permanently denied", log: self.pluginLog, type: .info) + sendBoolResult(false, result) + case SFSpeechRecognizerAuthorizationStatus.restricted: + os_log("Device restriction prevented initialize", log: self.pluginLog, type: .info) + sendBoolResult(false, result) + default: + os_log("Has permissions continuing with setup", log: self.pluginLog, type: .debug) + setupSpeechRecognition(result) + } + } + + fileprivate func sendBoolResult(_ value: Bool, _ result: @escaping FlutterResult) { + DispatchQueue.main.async { + result(value) + } + } + + fileprivate func setupListeningSound() { + listeningSound = loadSound("assets/sounds/speech_to_text_listening.m4r") + successSound = loadSound("assets/sounds/speech_to_text_stop.m4r") + cancelSound = loadSound("assets/sounds/speech_to_text_cancel.m4r") + } + + fileprivate func loadSound(_ assetPath: String) -> AVAudioPlayer? { + var player: AVAudioPlayer? = nil + let soundKey = registrar.lookupKey(forAsset: assetPath) + guard !soundKey.isEmpty else { + return player + } + if let soundPath = Bundle.main.path(forResource: soundKey, ofType: nil) { + let soundUrl = URL(fileURLWithPath: soundPath) + do { + player = try AVAudioPlayer(contentsOf: soundUrl) + player?.delegate = self + } catch { + // no audio + } + } + return player + } + + private func requestMacOSMicrophonePermission(completion: @escaping (Bool) -> Void) { + switch AVCaptureDevice.authorizationStatus(for: .audio) { + case .authorized: + completion(true) + + case .notDetermined: + AVCaptureDevice.requestAccess(for: .audio) { granted in + if granted { + completion(granted) + } else { + completion(granted) + } + } + + case .denied, .restricted: + completion(false) + + @unknown default: + completion(false) + } + } + + private func setupSpeechRecognition(_ result: @escaping FlutterResult) { + setupRecognizerForLocale(locale: Locale.current) + guard recognizer != nil else { + sendBoolResult(false, result) + return + } + if #available(iOS 13.0, *), let localRecognizer = recognizer { + onDeviceStatus = localRecognizer.supportsOnDeviceRecognition + } + recognizer?.delegate = self + setupListeningSound() + + sendBoolResult(true, result) + } + + private func initAudioEngine(_ result: @escaping FlutterResult) -> Bool { + audioEngine = AVAudioEngine() + inputNode = audioEngine?.inputNode + if inputNode == nil { + os_log("Error no input node", log: pluginLog, type: .error) + sendBoolResult(false, result) + } + return inputNode != nil + } + + private func setupRecognizerForLocale(locale: Locale) { + if previousLocale == locale { + return + } + previousLocale = locale + recognizer = SFSpeechRecognizer(locale: locale) + } + + private func getLocale(_ localeStr: String?) -> Locale { + guard let aLocaleStr = localeStr else { + return Locale.current + } + let locale = Locale(identifier: aLocaleStr) + return locale + } + + private func stopSpeech(_ result: @escaping FlutterResult) { + if !listening { + sendBoolResult(false, result) + return + } + stopping = true + stopAllPlayers() + self.currentTask?.finish() + if let sound = successSound { + onPlayEnd = { () -> Void in + self.stopCurrentListen() + self.sendBoolResult(true, result) + return + } + sound.play() + } else { + stopCurrentListen() + sendBoolResult(true, result) + } + } + + private func cancelSpeech(_ result: @escaping FlutterResult) { + if !listening { + sendBoolResult(false, result) + return + } + stopping = true + stopAllPlayers() + self.currentTask?.cancel() + if let sound = cancelSound { + onPlayEnd = { () -> Void in + self.stopCurrentListen() + self.sendBoolResult(true, result) + return + } + sound.play() + } else { + stopCurrentListen() + sendBoolResult(true, result) + } + } + + private func stopAllPlayers() { + cancelSound?.stop() + successSound?.stop() + listeningSound?.stop() + } + + private func stopCurrentListen() { + self.currentRequest?.endAudio() + stopAllPlayers() + do { + try trap { + self.audioEngine?.stop() + } + } catch { + os_log( + "Error stopping engine: %{PUBLIC}@", log: pluginLog, type: .error, + error.localizedDescription) + } + do { + try trap { + self.inputNode?.removeTap(onBus: self.busForNodeTap) + } + } catch { + os_log( + "Error removing trap: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription) + } + #if os(iOS) + do { + if let rememberedAudioCategory = rememberedAudioCategory, + let rememberedAudioCategoryOptions = rememberedAudioCategoryOptions + { + try self.audioSession.setCategory( + rememberedAudioCategory, options: rememberedAudioCategoryOptions) + } + } catch { + os_log( + "Error stopping listen: %{PUBLIC}@", log: pluginLog, type: .error, + error.localizedDescription) + } + do { + try self.audioSession.setActive(false, options: .notifyOthersOnDeactivation) + } catch { + os_log( + "Error deactivation: %{PUBLIC}@", log: pluginLog, type: .info, error.localizedDescription) + } + + #endif + self.invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.done.rawValue) + + currentRequest = nil + currentTask = nil + onPlayEnd = nil + listening = false + stopping = false + } + + private func listenForSpeech( + _ result: @escaping FlutterResult, localeStr: String?, partialResults: Bool, + onDevice: Bool, listenMode: ListenMode, sampleRate: Int, autoPunctuation: Bool, + enableHaptics: Bool + ) { + if nil != currentTask || listening { + sendBoolResult(false, result) + return + } + do { + // let inErrorTest = true + failedListen = false + stopping = false + returnPartialResults = partialResults + setupRecognizerForLocale(locale: getLocale(localeStr)) + guard let localRecognizer = recognizer else { + result( + FlutterError( + code: SpeechToTextErrors.noRecognizerError.rawValue, + message: "Failed to create speech recognizer", + details: nil)) + return + } + if onDevice { + if #available(iOS 13.0, *), !localRecognizer.supportsOnDeviceRecognition { + result( + FlutterError( + code: SpeechToTextErrors.onDeviceError.rawValue, + message: "on device recognition is not supported on this device", + details: nil)) + } + } + + #if os(iOS) + rememberedAudioCategory = self.audioSession.category + rememberedAudioCategoryOptions = self.audioSession.categoryOptions + try self.audioSession.setCategory( + AVAudioSession.Category.playAndRecord, + options: [.defaultToSpeaker, .allowBluetooth, .allowBluetoothA2DP, .mixWithOthers]) + // try self.audioSession.setMode(AVAudioSession.Mode.measurement) + if sampleRate > 0 { + try self.audioSession.setPreferredSampleRate(Double(sampleRate)) + } + try self.audioSession.setMode(AVAudioSession.Mode.default) + try self.audioSession.setActive(true, options: .notifyOthersOnDeactivation) + if #available(iOS 13.0, *) { + try self.audioSession.setAllowHapticsAndSystemSoundsDuringRecording(enableHaptics) + } + #endif + if let sound = listeningSound { + self.onPlayEnd = { () -> Void in + if !self.failedListen { + self.listening = true + self.invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, + arguments: SpeechToTextStatus.listening.rawValue) + + } + } + sound.play() + } + if !initAudioEngine(result) { + return + } + if inputNode?.inputFormat(forBus: 0).channelCount == 0 { + throw SpeechToTextError.runtimeError("Not enough available inputs.") + } + self.currentRequest = SFSpeechAudioBufferRecognitionRequest() + guard let currentRequest = self.currentRequest else { + sendBoolResult(false, result) + return + } + currentRequest.shouldReportPartialResults = true + if #available(iOS 13.0, *), onDevice { + currentRequest.requiresOnDeviceRecognition = true + } + switch listenMode { + case ListenMode.dictation: + currentRequest.taskHint = SFSpeechRecognitionTaskHint.dictation + break + case ListenMode.search: + currentRequest.taskHint = SFSpeechRecognitionTaskHint.search + break + case ListenMode.confirmation: + currentRequest.taskHint = SFSpeechRecognitionTaskHint.confirmation + break + default: + break + } + if #available(iOS 16.0, macOS 13, *) { + currentRequest.addsPunctuation = autoPunctuation + } + self.currentTask = self.recognizer?.recognitionTask(with: currentRequest, delegate: self) + let recordingFormat = inputNode?.outputFormat(forBus: self.busForNodeTap) + var fmt: AVAudioFormat! + #if os(iOS) + + let theSampleRate = audioSession.sampleRate + + fmt = AVAudioFormat( + commonFormat: recordingFormat!.commonFormat, sampleRate: theSampleRate, + channels: recordingFormat!.channelCount, interleaved: recordingFormat!.isInterleaved) + + #else + let bus = 0 + fmt = self.inputNode?.inputFormat(forBus: bus) + + #endif + try trap { + self.inputNode?.installTap( + onBus: self.busForNodeTap, bufferSize: self.speechBufferSize, format: fmt + ) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in + currentRequest.append(buffer) + self.updateSoundLevel(buffer: buffer) + } + } + // if ( inErrorTest ){ + // throw SpeechToTextError.runtimeError("for testing only") + // } + self.audioEngine?.prepare() + try self.audioEngine?.start() + if nil == listeningSound { + listening = true + self.invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, + arguments: SpeechToTextStatus.listening.rawValue) + } + sendBoolResult(true, result) + } catch { + failedListen = true + os_log( + "Error starting listen: %{PUBLIC}@", log: pluginLog, type: .error, + error.localizedDescription) + self.invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, + arguments: SpeechToTextStatus.notListening.rawValue) + stopCurrentListen() + sendBoolResult(false, result) + // ensure the not listening signal is sent in the error case + let speechError = SpeechRecognitionError(errorMsg: "error_listen_failed", permanent: true) + do { + let errorResult = try jsonEncoder.encode(speechError) + invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyError, + arguments: String(data: errorResult, encoding: .utf8)) + invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, + arguments: SpeechToTextStatus.doneNoResult.rawValue) + } catch { + os_log("Could not encode JSON", log: pluginLog, type: .error) + } + } + } + + private func updateSoundLevel(buffer: AVAudioPCMBuffer) { + guard + let channelData = buffer.floatChannelData + else { + return + } + + let channelDataValue = channelData.pointee + let channelDataValueArray = stride( + from: 0, + to: Int(buffer.frameLength), + by: buffer.stride + ).map { channelDataValue[$0] } + let frameLength = Float(buffer.frameLength) + let rms = sqrt(channelDataValueArray.map { $0 * $0 }.reduce(0, +) / frameLength) + let avgPower = 20 * log10(rms) + self.invokeFlutter(SwiftSpeechToTextCallbackMethods.soundLevelChange, arguments: avgPower) + } + + /// Build a list of localId:name with the current locale first + private func locales(_ result: @escaping FlutterResult) { + var localeNames = [String]() + let locales = SFSpeechRecognizer.supportedLocales() + var currentLocaleId = Locale.current.identifier + if Locale.preferredLanguages.count > 0 { + currentLocaleId = Locale.preferredLanguages[0] + } + if let idName = buildIdNameForLocale(forIdentifier: currentLocaleId) { + localeNames.append(idName) + } + for locale in locales { + if locale.identifier == currentLocaleId { + continue + } + if let idName = buildIdNameForLocale(forIdentifier: locale.identifier) { + localeNames.append(idName) + } + } + DispatchQueue.main.async { + result(localeNames) + } + } + + private func buildIdNameForLocale(forIdentifier: String) -> String? { + var idName: String? + if let name = Locale.current.localizedString(forIdentifier: forIdentifier) { + let sanitizedName = name.replacingOccurrences(of: ":", with: " ") + idName = "\(forIdentifier):\(sanitizedName)" + } + return idName + } + + private func handleResult(_ transcriptions: [SFTranscription], isFinal: Bool) { + if !isFinal && !returnPartialResults { + return + } + var speechWords: [SpeechRecognitionWords] = [] + for transcription in transcriptions { + let words: SpeechRecognitionWords = SpeechRecognitionWords( + recognizedWords: transcription.formattedString, confidence: confidenceIn(transcription)) + speechWords.append(words) + } + let speechInfo = SpeechRecognitionResult(alternates: speechWords, finalResult: isFinal) + do { + let speechMsg = try jsonEncoder.encode(speechInfo) + if let speechStr = String(data: speechMsg, encoding: .utf8) { + os_log("Encoded JSON result: %{PUBLIC}@", log: pluginLog, type: .debug, speechStr) + invokeFlutter(SwiftSpeechToTextCallbackMethods.textRecognition, arguments: speechStr) + } + } catch { + os_log("Could not encode JSON", log: pluginLog, type: .error) + } + } + + private func confidenceIn(_ transcription: SFTranscription) -> Decimal { + guard transcription.segments.count > 0 else { + return 0 + } + var totalConfidence: Float = 0.0 + for segment in transcription.segments { + totalConfidence += segment.confidence + } + let avgConfidence: Float = totalConfidence / Float(transcription.segments.count) + let confidence: Float = (avgConfidence * 1000).rounded() / 1000 + return Decimal(string: String(describing: confidence))! + } + + private func invokeFlutter(_ method: SwiftSpeechToTextCallbackMethods, arguments: Any?) { + if method != SwiftSpeechToTextCallbackMethods.soundLevelChange { + os_log("invokeFlutter %{PUBLIC}@", log: pluginLog, type: .debug, method.rawValue) + } + DispatchQueue.main.async { + self.channel.invokeMethod(method.rawValue, arguments: arguments) + } + } + +} + +@available(iOS 10.0, macOS 10.15, *) +extension SpeechToTextPlugin: SFSpeechRecognizerDelegate { + public func speechRecognizer( + _ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool + ) { + let availability = + available ? SpeechToTextStatus.available.rawValue : SpeechToTextStatus.unavailable.rawValue + os_log("Availability changed: %{PUBLIC}@", log: pluginLog, type: .debug, availability) + invokeFlutter(SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: availability) + } +} + +@available(iOS 10.0, macOS 10.15, *) +extension SpeechToTextPlugin: SFSpeechRecognitionTaskDelegate { + public func speechRecognitionDidDetectSpeech(_ task: SFSpeechRecognitionTask) { + // Do nothing for now + } + + public func speechRecognitionTaskFinishedReadingAudio(_ task: SFSpeechRecognitionTask) { + reportError(source: "FinishedReadingAudio", error: task.error) + os_log("Finished reading audio", log: pluginLog, type: .debug) + invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, + arguments: SpeechToTextStatus.notListening.rawValue) + } + + public func speechRecognitionTaskWasCancelled(_ task: SFSpeechRecognitionTask) { + reportError(source: "TaskWasCancelled", error: task.error) + os_log("Canceled reading audio", log: pluginLog, type: .debug) + invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, + arguments: SpeechToTextStatus.notListening.rawValue) + } + + public func speechRecognitionTask( + _ task: SFSpeechRecognitionTask, didFinishSuccessfully successfully: Bool + ) { + reportError(source: "FinishSuccessfully", error: task.error) + os_log("FinishSuccessfully", log: pluginLog, type: .debug) + if !successfully { + invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyStatus, + arguments: SpeechToTextStatus.doneNoResult.rawValue) + if let err = task.error as NSError? { + var errorMsg: String + switch err.code { + case 102: + errorMsg = "error_assets_not_installed" + case 201: + errorMsg = "error_speech_recognizer_disabled" + case 203: + errorMsg = "error_retry" + case 301: + errorMsg = "error_request_cancelled" + case 1100: + errorMsg = "error_speech_recognizer_already_active" + case 1101: + errorMsg = "error_speech_recognizer_connection_invalidated" + case 1107: + errorMsg = "error_speech_recognizer_connection_interrupted" + case 1110: + errorMsg = "error_no_match" + case 1700: + errorMsg = "error_speech_recognizer_request_not_authorized" + default: + errorMsg = "error_unknown (\(err.code))" + } + let speechError = SpeechRecognitionError(errorMsg: errorMsg, permanent: true) + do { + let errorResult = try jsonEncoder.encode(speechError) + invokeFlutter( + SwiftSpeechToTextCallbackMethods.notifyError, + arguments: String(data: errorResult, encoding: .utf8)) + } catch { + os_log("Could not encode JSON", log: pluginLog, type: .error) + } + } + } + if !stopping { + if let sound = successfully ? successSound : cancelSound { + onPlayEnd = { () -> Void in + self.stopCurrentListen() + } + sound.play() + } else { + stopCurrentListen() + } + } + } + + public func speechRecognitionTask( + _ task: SFSpeechRecognitionTask, didHypothesizeTranscription transcription: SFTranscription + ) { + os_log("HypothesizeTranscription", log: pluginLog, type: .debug) + reportError(source: "HypothesizeTranscription", error: task.error) + handleResult([transcription], isFinal: false) + } + + public func speechRecognitionTask( + _ task: SFSpeechRecognitionTask, + didFinishRecognition recognitionResult: SFSpeechRecognitionResult + ) { + reportError(source: "FinishRecognition", error: task.error) + os_log( + "FinishRecognition %{PUBLIC}@", log: pluginLog, type: .debug, + recognitionResult.isFinal.description) + let isFinal = recognitionResult.isFinal + handleResult(recognitionResult.transcriptions, isFinal: isFinal) + } + + private func reportError(source: String, error: Error?) { + if nil != error { + os_log( + "%{PUBLIC}@ with error: %{PUBLIC}@", log: pluginLog, type: .debug, source, + error.debugDescription) + } + } +} + +@available(iOS 10.0, macOS 10.15, *) +extension SpeechToTextPlugin: AVAudioPlayerDelegate { + + public func audioPlayerDidFinishPlaying( + _ player: AVAudioPlayer, + successfully flag: Bool + ) { + if let playEnd = self.onPlayEnd { + playEnd() + } + } +} diff --git a/speech_to_text/macos/speech_to_text.podspec b/speech_to_text/darwin/speech_to_text.podspec similarity index 79% rename from speech_to_text/macos/speech_to_text.podspec rename to speech_to_text/darwin/speech_to_text.podspec index c0dc5ce6..7ea7b3d8 100644 --- a/speech_to_text/macos/speech_to_text.podspec +++ b/speech_to_text/darwin/speech_to_text.podspec @@ -15,9 +15,13 @@ A new Flutter plugin project. s.source = { :path => '.' } s.source_files = 'Classes/**/*' - s.dependency 'FlutterMacOS' - - s.platform = :osx, '10.11' + s.ios.dependency 'Flutter' + s.osx.dependency 'FlutterMacOS' + s.ios.deployment_target = '8.0' + s.osx.deployment_target = '10.13' s.pod_target_xcconfig = { 'DEFINES_MODULE' => 'YES' } s.swift_version = '5.0' + s.ios.dependency 'Try' + s.osx.dependency 'Try' + end diff --git a/speech_to_text/example/ios/Podfile.lock b/speech_to_text/example/ios/Podfile.lock index 58eeb45e..12acdaf8 100644 --- a/speech_to_text/example/ios/Podfile.lock +++ b/speech_to_text/example/ios/Podfile.lock @@ -2,12 +2,13 @@ PODS: - Flutter (1.0.0) - speech_to_text (0.0.1): - Flutter + - FlutterMacOS - Try - Try (2.1.1) DEPENDENCIES: - Flutter (from `Flutter`) - - speech_to_text (from `.symlinks/plugins/speech_to_text/ios`) + - speech_to_text (from `.symlinks/plugins/speech_to_text/darwin`) SPEC REPOS: trunk: @@ -17,13 +18,13 @@ EXTERNAL SOURCES: Flutter: :path: Flutter speech_to_text: - :path: ".symlinks/plugins/speech_to_text/ios" + :path: ".symlinks/plugins/speech_to_text/darwin" SPEC CHECKSUMS: Flutter: e0871f40cf51350855a761d2e70bf5af5b9b5de7 - speech_to_text: b43a7d99aef037bd758ed8e45d79bbac035d2dfe + speech_to_text: 627d3fd2194770b51abb324ba45c2d39398f24a8 Try: 5ef669ae832617b3cee58cb2c6f99fb767a4ff96 PODFILE CHECKSUM: c4c93c5f6502fe2754f48404d3594bf779584011 -COCOAPODS: 1.12.1 +COCOAPODS: 1.15.2 diff --git a/speech_to_text/example/macos/Flutter/GeneratedPluginRegistrant.swift b/speech_to_text/example/macos/Flutter/GeneratedPluginRegistrant.swift index b0e412e3..e78fa15b 100644 --- a/speech_to_text/example/macos/Flutter/GeneratedPluginRegistrant.swift +++ b/speech_to_text/example/macos/Flutter/GeneratedPluginRegistrant.swift @@ -5,8 +5,8 @@ import FlutterMacOS import Foundation -import speech_to_text_macos +import speech_to_text func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { - SpeechToTextMacosPlugin.register(with: registry.registrar(forPlugin: "SpeechToTextMacosPlugin")) + SpeechToTextPlugin.register(with: registry.registrar(forPlugin: "SpeechToTextPlugin")) } diff --git a/speech_to_text/example/macos/Podfile b/speech_to_text/example/macos/Podfile index dade8dfa..049abe29 100644 --- a/speech_to_text/example/macos/Podfile +++ b/speech_to_text/example/macos/Podfile @@ -1,4 +1,4 @@ -platform :osx, '10.11' +platform :osx, '10.14' # CocoaPods analytics sends network stats synchronously affecting flutter build latency. ENV['COCOAPODS_DISABLE_STATS'] = 'true' diff --git a/speech_to_text/example/macos/Podfile.lock b/speech_to_text/example/macos/Podfile.lock index 31351ec6..bef82cc4 100644 --- a/speech_to_text/example/macos/Podfile.lock +++ b/speech_to_text/example/macos/Podfile.lock @@ -1,22 +1,30 @@ PODS: - FlutterMacOS (1.0.0) - - speech_to_text_macos (0.0.1): + - speech_to_text (0.0.1): + - Flutter - FlutterMacOS + - Try + - Try (2.1.1) DEPENDENCIES: - FlutterMacOS (from `Flutter/ephemeral`) - - speech_to_text_macos (from `Flutter/ephemeral/.symlinks/plugins/speech_to_text_macos/macos`) + - speech_to_text (from `Flutter/ephemeral/.symlinks/plugins/speech_to_text/darwin`) + +SPEC REPOS: + trunk: + - Try EXTERNAL SOURCES: FlutterMacOS: :path: Flutter/ephemeral - speech_to_text_macos: - :path: Flutter/ephemeral/.symlinks/plugins/speech_to_text_macos/macos + speech_to_text: + :path: Flutter/ephemeral/.symlinks/plugins/speech_to_text/darwin SPEC CHECKSUMS: - FlutterMacOS: 57701585bf7de1b3fc2bb61f6378d73bbdea8424 - speech_to_text_macos: ae04291713998dede24b85d3b50bd8fedcbfb565 + FlutterMacOS: 8f6f14fa908a6fb3fba0cd85dbd81ec4b251fb24 + speech_to_text: 627d3fd2194770b51abb324ba45c2d39398f24a8 + Try: 5ef669ae832617b3cee58cb2c6f99fb767a4ff96 -PODFILE CHECKSUM: 6eac6b3292e5142cfc23bdeb71848a40ec51c14c +PODFILE CHECKSUM: 353c8bcc5d5b0994e508d035b5431cfe18c1dea7 -COCOAPODS: 1.11.2 +COCOAPODS: 1.15.2 diff --git a/speech_to_text/example/macos/Runner.xcodeproj/project.pbxproj b/speech_to_text/example/macos/Runner.xcodeproj/project.pbxproj index 16ce7c51..3991d573 100644 --- a/speech_to_text/example/macos/Runner.xcodeproj/project.pbxproj +++ b/speech_to_text/example/macos/Runner.xcodeproj/project.pbxproj @@ -3,7 +3,7 @@ archiveVersion = 1; classes = { }; - objectVersion = 51; + objectVersion = 54; objects = { /* Begin PBXAggregateTarget section */ @@ -167,7 +167,6 @@ 842B8341E6F08C9B2023800C /* Pods-Runner.release.xcconfig */, 702ECF94224A5A7B974260C3 /* Pods-Runner.profile.xcconfig */, ); - name = Pods; path = Pods; sourceTree = ""; }; @@ -203,7 +202,7 @@ isa = PBXProject; attributes = { LastSwiftUpdateCheck = 0920; - LastUpgradeCheck = 1300; + LastUpgradeCheck = 1510; ORGANIZATIONNAME = ""; TargetAttributes = { 33CC10EC2044A3C60003C045 = { @@ -256,6 +255,7 @@ /* Begin PBXShellScriptBuildPhase section */ 3399D490228B24CF009A79C7 /* ShellScript */ = { isa = PBXShellScriptBuildPhase; + alwaysOutOfDate = 1; buildActionMask = 2147483647; files = ( ); @@ -404,7 +404,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.11; + MACOSX_DEPLOYMENT_TARGET = 10.14; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = macosx; SWIFT_COMPILATION_MODE = wholemodule; @@ -426,6 +426,7 @@ "$(inherited)", "@executable_path/../Frameworks", ); + MACOSX_DEPLOYMENT_TARGET = 10.15; PROVISIONING_PROFILE_SPECIFIER = ""; SWIFT_VERSION = 5.0; }; @@ -483,7 +484,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.11; + MACOSX_DEPLOYMENT_TARGET = 10.14; MTL_ENABLE_DEBUG_INFO = YES; ONLY_ACTIVE_ARCH = YES; SDKROOT = macosx; @@ -530,7 +531,7 @@ GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; GCC_WARN_UNUSED_FUNCTION = YES; GCC_WARN_UNUSED_VARIABLE = YES; - MACOSX_DEPLOYMENT_TARGET = 10.11; + MACOSX_DEPLOYMENT_TARGET = 10.14; MTL_ENABLE_DEBUG_INFO = NO; SDKROOT = macosx; SWIFT_COMPILATION_MODE = wholemodule; @@ -552,6 +553,7 @@ "$(inherited)", "@executable_path/../Frameworks", ); + MACOSX_DEPLOYMENT_TARGET = 10.15; PROVISIONING_PROFILE_SPECIFIER = ""; SWIFT_OPTIMIZATION_LEVEL = "-Onone"; SWIFT_VERSION = 5.0; @@ -572,6 +574,7 @@ "$(inherited)", "@executable_path/../Frameworks", ); + MACOSX_DEPLOYMENT_TARGET = 10.15; PROVISIONING_PROFILE_SPECIFIER = ""; SWIFT_VERSION = 5.0; }; diff --git a/speech_to_text/example/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme b/speech_to_text/example/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme index f1efa7de..397012fe 100644 --- a/speech_to_text/example/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme +++ b/speech_to_text/example/macos/Runner.xcodeproj/xcshareddata/xcschemes/Runner.xcscheme @@ -1,6 +1,6 @@ com.apple.security.cs.allow-jit + com.apple.security.device.audio-input + com.apple.security.network.server diff --git a/speech_to_text/example/macos/Runner/Info.plist b/speech_to_text/example/macos/Runner/Info.plist index 4789daa6..74509898 100644 --- a/speech_to_text/example/macos/Runner/Info.plist +++ b/speech_to_text/example/macos/Runner/Info.plist @@ -28,5 +28,10 @@ MainMenu NSPrincipalClass NSApplication + NSSpeechRecognitionUsageDescription + This example recognizes words as you speak them and displays them. + NSMicrophoneUsageDescription + This example listens for speech on the device microphone on your request. + diff --git a/speech_to_text/example/macos/Runner/Release.entitlements b/speech_to_text/example/macos/Runner/Release.entitlements index 852fa1a4..f29279b3 100644 --- a/speech_to_text/example/macos/Runner/Release.entitlements +++ b/speech_to_text/example/macos/Runner/Release.entitlements @@ -4,5 +4,7 @@ com.apple.security.app-sandbox + com.apple.security.device.audio-input + diff --git a/speech_to_text/example/pubspec.lock b/speech_to_text/example/pubspec.lock index fcf610c8..775a2058 100644 --- a/speech_to_text/example/pubspec.lock +++ b/speech_to_text/example/pubspec.lock @@ -92,26 +92,26 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" + sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" url: "https://pub.dev" source: hosted - version: "10.0.4" + version: "10.0.0" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" + sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 url: "https://pub.dev" source: hosted - version: "3.0.3" + version: "2.0.1" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" + sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 url: "https://pub.dev" source: hosted - version: "3.0.1" + version: "2.0.1" lints: dependency: transitive description: @@ -140,10 +140,10 @@ packages: dependency: transitive description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.11.0" nested: dependency: transitive description: @@ -204,14 +204,6 @@ packages: relative: true source: path version: "6.6.2" - speech_to_text_macos: - dependency: transitive - description: - name: speech_to_text_macos - sha256: e685750f7542fcaa087a5396ee471e727ec648bf681f4da83c84d086322173f6 - url: "https://pub.dev" - source: hosted - version: "1.1.0" speech_to_text_platform_interface: dependency: transitive description: @@ -256,10 +248,10 @@ packages: dependency: transitive description: name: test_api - sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" + sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" url: "https://pub.dev" source: hosted - version: "0.7.0" + version: "0.6.1" vector_math: dependency: transitive description: @@ -272,10 +264,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" + sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 url: "https://pub.dev" source: hosted - version: "14.2.1" + version: "13.0.0" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.18.0-18.0.pre.54" + dart: ">=3.2.0-0 <4.0.0" + flutter: ">=3.10.0" diff --git a/speech_to_text/ios/.gitignore b/speech_to_text/ios/.gitignore deleted file mode 100644 index aa479fd3..00000000 --- a/speech_to_text/ios/.gitignore +++ /dev/null @@ -1,37 +0,0 @@ -.idea/ -.vagrant/ -.sconsign.dblite -.svn/ - -.DS_Store -*.swp -profile - -DerivedData/ -build/ -GeneratedPluginRegistrant.h -GeneratedPluginRegistrant.m - -.generated/ - -*.pbxuser -*.mode1v3 -*.mode2v3 -*.perspectivev3 - -!default.pbxuser -!default.mode1v3 -!default.mode2v3 -!default.perspectivev3 - -xcuserdata - -*.moved-aside - -*.pyc -*sync/ -Icon? -.tags* - -/Flutter/Generated.xcconfig -/Flutter/flutter_export_environment.sh \ No newline at end of file diff --git a/speech_to_text/ios/Assets/.gitkeep b/speech_to_text/ios/Assets/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/speech_to_text/ios/Classes/SpeechToTextPlugin.h b/speech_to_text/ios/Classes/SpeechToTextPlugin.h deleted file mode 100644 index 1785eb8f..00000000 --- a/speech_to_text/ios/Classes/SpeechToTextPlugin.h +++ /dev/null @@ -1,4 +0,0 @@ -#import - -@interface SpeechToTextPlugin : NSObject -@end diff --git a/speech_to_text/ios/Classes/SpeechToTextPlugin.m b/speech_to_text/ios/Classes/SpeechToTextPlugin.m deleted file mode 100644 index 20d0327d..00000000 --- a/speech_to_text/ios/Classes/SpeechToTextPlugin.m +++ /dev/null @@ -1,8 +0,0 @@ -#import "SpeechToTextPlugin.h" -#import - -@implementation SpeechToTextPlugin -+ (void)registerWithRegistrar:(NSObject*)registrar { - [SwiftSpeechToTextPlugin registerWithRegistrar:registrar]; -} -@end diff --git a/speech_to_text/ios/Classes/SwiftSpeechToTextPlugin.swift b/speech_to_text/ios/Classes/SwiftSpeechToTextPlugin.swift deleted file mode 100644 index 1d68426d..00000000 --- a/speech_to_text/ios/Classes/SwiftSpeechToTextPlugin.swift +++ /dev/null @@ -1,679 +0,0 @@ -import Flutter -import UIKit -import Speech -import os.log -import Try - -public enum SwiftSpeechToTextMethods: String { - case has_permission - case initialize - case listen - case stop - case cancel - case locales - case unknown // just for testing -} - -public enum SwiftSpeechToTextCallbackMethods: String { - case textRecognition - case notifyStatus - case notifyError - case soundLevelChange -} - -public enum SpeechToTextStatus: String { - case listening - case notListening - case unavailable - case available - case done - case doneNoResult -} - -public enum SpeechToTextErrors: String { - case onDeviceError - case noRecognizerError - case listenFailedError - case missingOrInvalidArg -} - -public enum ListenMode: Int { - case deviceDefault = 0 - case dictation = 1 - case search = 2 - case confirmation = 3 -} - -struct SpeechRecognitionWords : Codable { - let recognizedWords: String - let confidence: Decimal -} - -struct SpeechRecognitionResult : Codable { - let alternates: [SpeechRecognitionWords] - let finalResult: Bool -} - -struct SpeechRecognitionError : Codable { - let errorMsg: String - let permanent: Bool -} - -enum SpeechToTextError: Error { - case runtimeError(String) -} - - -@available(iOS 10.0, *) -public class SwiftSpeechToTextPlugin: NSObject, FlutterPlugin { - private var channel: FlutterMethodChannel - private var registrar: FlutterPluginRegistrar - private var recognizer: SFSpeechRecognizer? - private var currentRequest: SFSpeechAudioBufferRecognitionRequest? - private var currentTask: SFSpeechRecognitionTask? - private var listeningSound: AVAudioPlayer? - private var successSound: AVAudioPlayer? - private var cancelSound: AVAudioPlayer? - private var rememberedAudioCategory: AVAudioSession.Category? - private var rememberedAudioCategoryOptions: AVAudioSession.CategoryOptions? - private var previousLocale: Locale? - private var onPlayEnd: (() -> Void)? - private var returnPartialResults: Bool = true - private var failedListen: Bool = false - private var onDeviceStatus: Bool = false - private var listening = false - private var stopping = false - private let audioSession = AVAudioSession.sharedInstance() - private var audioEngine: AVAudioEngine? - private var inputNode: AVAudioInputNode? - private let jsonEncoder = JSONEncoder() - private let busForNodeTap = 0 - private let speechBufferSize: AVAudioFrameCount = 1024 - private static var subsystem = Bundle.main.bundleIdentifier! - private let pluginLog = OSLog(subsystem: "com.csdcorp.speechToText", category: "plugin") - - public static func register(with registrar: FlutterPluginRegistrar) { - let channel = FlutterMethodChannel(name: "plugin.csdcorp.com/speech_to_text", binaryMessenger: registrar.messenger()) - let instance = SwiftSpeechToTextPlugin( channel, registrar: registrar ) - registrar.addMethodCallDelegate(instance, channel: channel ) - } - - init( _ channel: FlutterMethodChannel, registrar: FlutterPluginRegistrar ) { - self.channel = channel - self.registrar = registrar - } - - public func handle(_ call: FlutterMethodCall, result: @escaping FlutterResult) { - switch call.method { - case SwiftSpeechToTextMethods.has_permission.rawValue: - hasPermission( result ) - case SwiftSpeechToTextMethods.initialize.rawValue: - initialize( result ) - case SwiftSpeechToTextMethods.listen.rawValue: - guard let argsArr = call.arguments as? Dictionary, - let partialResults = argsArr["partialResults"] as? Bool, - let onDevice = argsArr["onDevice"] as? Bool, - let listenModeIndex = argsArr["listenMode"] as? Int, - let sampleRate = argsArr["sampleRate"] as? Int, - let autoPunctuation = argsArr["autoPunctuation"] as? Bool, - let enableHaptics = argsArr["enableHaptics"] as? Bool - else { - DispatchQueue.main.async { - result(FlutterError( code: SpeechToTextErrors.missingOrInvalidArg.rawValue, - message:"Missing arg partialResults, onDevice, listenMode, autoPunctuatio, enableHaptics and sampleRate are required", - details: nil )) - } - return - } - var localeStr: String? = nil - if let localeParam = argsArr["localeId"] as? String { - localeStr = localeParam - } - guard let listenMode = ListenMode(rawValue: listenModeIndex) else { - DispatchQueue.main.async { - result(FlutterError( code: SpeechToTextErrors.missingOrInvalidArg.rawValue, - message:"invalid value for listenMode, must be 0-2, was \(listenModeIndex)", - details: nil )) - } - return - } - - listenForSpeech( result, localeStr: localeStr, partialResults: partialResults, onDevice: onDevice, listenMode: listenMode, sampleRate: sampleRate, autoPunctuation: autoPunctuation, enableHaptics: enableHaptics ) - case SwiftSpeechToTextMethods.stop.rawValue: - stopSpeech( result ) - case SwiftSpeechToTextMethods.cancel.rawValue: - cancelSpeech( result ) - case SwiftSpeechToTextMethods.locales.rawValue: - locales( result ) - default: - os_log("Unrecognized method: %{PUBLIC}@", log: pluginLog, type: .error, call.method) - DispatchQueue.main.async { - result( FlutterMethodNotImplemented) - } - } - } - - private func hasPermission( _ result: @escaping FlutterResult) { - let has = SFSpeechRecognizer.authorizationStatus() == SFSpeechRecognizerAuthorizationStatus.authorized && - self.audioSession.recordPermission == AVAudioSession.RecordPermission.granted - DispatchQueue.main.async { - result( has ) - } - } - - private func initialize( _ result: @escaping FlutterResult) { - var success = false - let status = SFSpeechRecognizer.authorizationStatus() - switch status { - case SFSpeechRecognizerAuthorizationStatus.notDetermined: - SFSpeechRecognizer.requestAuthorization({(status)->Void in - success = status == SFSpeechRecognizerAuthorizationStatus.authorized - if ( success ) { - self.audioSession.requestRecordPermission({(granted: Bool)-> Void in - if granted { - self.setupSpeechRecognition(result) - } else{ - self.sendBoolResult( false, result ); - os_log("User denied permission", log: self.pluginLog, type: .info) - } - }) - } - else { - self.sendBoolResult( false, result ); - } - }); - case SFSpeechRecognizerAuthorizationStatus.denied: - os_log("Permission permanently denied", log: self.pluginLog, type: .info) - sendBoolResult( false, result ); - case SFSpeechRecognizerAuthorizationStatus.restricted: - os_log("Device restriction prevented initialize", log: self.pluginLog, type: .info) - sendBoolResult( false, result ); - default: - os_log("Has permissions continuing with setup", log: self.pluginLog, type: .debug) - setupSpeechRecognition(result) - } - } - - fileprivate func sendBoolResult( _ value: Bool, _ result: @escaping FlutterResult) { - DispatchQueue.main.async { - result( value ) - } - } - - fileprivate func setupListeningSound() { - listeningSound = loadSound("assets/sounds/speech_to_text_listening.m4r") - successSound = loadSound("assets/sounds/speech_to_text_stop.m4r") - cancelSound = loadSound("assets/sounds/speech_to_text_cancel.m4r") - } - - fileprivate func loadSound( _ assetPath: String ) -> AVAudioPlayer? { - var player: AVAudioPlayer? = nil - let soundKey = registrar.lookupKey(forAsset: assetPath ) - guard !soundKey.isEmpty else { - return player - } - if let soundPath = Bundle.main.path(forResource: soundKey, ofType:nil) { - let soundUrl = URL(fileURLWithPath: soundPath ) - do { - player = try AVAudioPlayer(contentsOf: soundUrl ) - player?.delegate = self - } catch { - // no audio - } - } - return player - } - - private func setupSpeechRecognition( _ result: @escaping FlutterResult) { - setupRecognizerForLocale( locale: Locale.current ) - guard recognizer != nil else { - sendBoolResult( false, result ); - return - } - if #available(iOS 13.0, *), let localRecognizer = recognizer { - onDeviceStatus = localRecognizer.supportsOnDeviceRecognition - } - recognizer?.delegate = self - setupListeningSound() - - sendBoolResult( true, result ); - } - - private func initAudioEngine( _ result: @escaping FlutterResult) -> Bool { - audioEngine = AVAudioEngine() - inputNode = audioEngine?.inputNode - if inputNode == nil { - os_log("Error no input node", log: pluginLog, type: .error) - sendBoolResult( false, result ); - } - return inputNode != nil - } - - private func setupRecognizerForLocale( locale: Locale ) { - if ( previousLocale == locale ) { - return - } - previousLocale = locale - recognizer = SFSpeechRecognizer( locale: locale ) - } - - private func getLocale( _ localeStr: String? ) -> Locale { - guard let aLocaleStr = localeStr else { - return Locale.current - } - let locale = Locale(identifier: aLocaleStr) - return locale - } - - private func stopSpeech( _ result: @escaping FlutterResult) { - if ( !listening ) { - sendBoolResult( false, result ); - return - } - stopping = true - stopAllPlayers() - self.currentTask?.finish() - if let sound = successSound { - onPlayEnd = {() -> Void in - self.stopCurrentListen( ) - self.sendBoolResult( true, result ) - return - } - sound.play() - } - else { - stopCurrentListen( ) - sendBoolResult( true, result ); - } - } - - private func cancelSpeech( _ result: @escaping FlutterResult) { - if ( !listening ) { - sendBoolResult( false, result ); - return - } - stopping = true - stopAllPlayers() - self.currentTask?.cancel() - if let sound = cancelSound { - onPlayEnd = {() -> Void in - self.stopCurrentListen( ) - self.sendBoolResult( true, result ) - return - } - sound.play() - } - else { - stopCurrentListen( ) - sendBoolResult( true, result ); - } - } - - private func stopAllPlayers() { - cancelSound?.stop() - successSound?.stop() - listeningSound?.stop() - } - - private func stopCurrentListen( ) { - self.currentRequest?.endAudio() - stopAllPlayers() - do { - try trap { - self.audioEngine?.stop() - } - } - catch { - os_log("Error stopping engine: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription) - } - do { - try trap { - self.inputNode?.removeTap(onBus: self.busForNodeTap); - } - } - catch { - os_log("Error removing trap: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription) - } - do { - if let rememberedAudioCategory = rememberedAudioCategory, let rememberedAudioCategoryOptions = rememberedAudioCategoryOptions { - try self.audioSession.setCategory(rememberedAudioCategory,options: rememberedAudioCategoryOptions) - } - } - catch { - os_log("Error stopping listen: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription) - } - do { - try self.audioSession.setActive(false, options: .notifyOthersOnDeactivation) - } - catch { - os_log("Error deactivation: %{PUBLIC}@", log: pluginLog, type: .info, error.localizedDescription) - } - self.invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.done.rawValue ) - - currentRequest = nil - currentTask = nil - onPlayEnd = nil - listening = false - stopping = false - } - - private func listenForSpeech( _ result: @escaping FlutterResult, localeStr: String?, partialResults: Bool, - onDevice: Bool, listenMode: ListenMode, sampleRate: Int, autoPunctuation: Bool, enableHaptics: Bool ) { - if ( nil != currentTask || listening ) { - sendBoolResult( false, result ); - return - } - do { - // let inErrorTest = true - failedListen = false - stopping = false - returnPartialResults = partialResults - setupRecognizerForLocale(locale: getLocale(localeStr)) - guard let localRecognizer = recognizer else { - result(FlutterError( code: SpeechToTextErrors.noRecognizerError.rawValue, - message:"Failed to create speech recognizer", - details: nil )) - return - } - if ( onDevice ) { - if #available(iOS 13.0, *), !localRecognizer.supportsOnDeviceRecognition { - result(FlutterError( code: SpeechToTextErrors.onDeviceError.rawValue, - message:"on device recognition is not supported on this device", - details: nil )) - } - } - rememberedAudioCategory = self.audioSession.category - rememberedAudioCategoryOptions = self.audioSession.categoryOptions - try self.audioSession.setCategory(AVAudioSession.Category.playAndRecord, options: [.defaultToSpeaker,.allowBluetooth,.allowBluetoothA2DP,.mixWithOthers]) - // try self.audioSession.setMode(AVAudioSession.Mode.measurement) - if ( sampleRate > 0 ) { - try self.audioSession.setPreferredSampleRate(Double(sampleRate)) - } - try self.audioSession.setMode(AVAudioSession.Mode.default) - try self.audioSession.setActive(true, options: .notifyOthersOnDeactivation) - if #available(iOS 13.0, *) { - try self.audioSession.setAllowHapticsAndSystemSoundsDuringRecording(enableHaptics) - } - if let sound = listeningSound { - self.onPlayEnd = {()->Void in - if ( !self.failedListen ) { - self.listening = true - self.invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.listening.rawValue ) - - } - } - sound.play() - } - if !initAudioEngine(result) { - return - } - if(inputNode?.inputFormat(forBus: 0).channelCount == 0){ - throw SpeechToTextError.runtimeError("Not enough available inputs.") - } - self.currentRequest = SFSpeechAudioBufferRecognitionRequest() - guard let currentRequest = self.currentRequest else { - sendBoolResult( false, result ); - return - } - currentRequest.shouldReportPartialResults = true - if #available(iOS 13.0, *), onDevice { - currentRequest.requiresOnDeviceRecognition = true - } - switch listenMode { - case ListenMode.dictation: - currentRequest.taskHint = SFSpeechRecognitionTaskHint.dictation - break - case ListenMode.search: - currentRequest.taskHint = SFSpeechRecognitionTaskHint.search - break - case ListenMode.confirmation: - currentRequest.taskHint = SFSpeechRecognitionTaskHint.confirmation - break - default: - break - } - if #available(iOS 16.0, *) { - currentRequest.addsPunctuation = autoPunctuation - } - self.currentTask = self.recognizer?.recognitionTask(with: currentRequest, delegate: self ) - let recordingFormat = inputNode?.outputFormat(forBus: self.busForNodeTap) - let theSampleRate = audioSession.sampleRate - let fmt = AVAudioFormat(commonFormat: recordingFormat!.commonFormat, sampleRate: theSampleRate, channels: recordingFormat!.channelCount, interleaved: recordingFormat!.isInterleaved) - try trap { - self.inputNode?.installTap(onBus: self.busForNodeTap, bufferSize: self.speechBufferSize, format: fmt) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in - currentRequest.append(buffer) - self.updateSoundLevel( buffer: buffer ) - } - } - // if ( inErrorTest ){ - // throw SpeechToTextError.runtimeError("for testing only") - // } - self.audioEngine?.prepare() - try self.audioEngine?.start() - if nil == listeningSound { - listening = true - self.invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.listening.rawValue ) - } - sendBoolResult( true, result ); - } - catch { - failedListen = true - os_log("Error starting listen: %{PUBLIC}@", log: pluginLog, type: .error, error.localizedDescription) - self.invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.notListening.rawValue ) - stopCurrentListen() - sendBoolResult( false, result ); - // ensure the not listening signal is sent in the error case - let speechError = SpeechRecognitionError(errorMsg: "error_listen_failed", permanent: true ) - do { - let errorResult = try jsonEncoder.encode(speechError) - invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyError, arguments: String( data:errorResult, encoding: .utf8) ) - invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.doneNoResult.rawValue ) - } catch { - os_log("Could not encode JSON", log: pluginLog, type: .error) - } - } - } - - private func updateSoundLevel( buffer: AVAudioPCMBuffer) { - guard - let channelData = buffer.floatChannelData - else { - return - } - - let channelDataValue = channelData.pointee - let channelDataValueArray = stride(from: 0, - to: Int(buffer.frameLength), - by: buffer.stride).map{ channelDataValue[$0] } - let frameLength = Float(buffer.frameLength) - let rms = sqrt(channelDataValueArray.map{ $0 * $0 }.reduce(0, +) / frameLength ) - let avgPower = 20 * log10(rms) - self.invokeFlutter( SwiftSpeechToTextCallbackMethods.soundLevelChange, arguments: avgPower ) - } - - /// Build a list of localId:name with the current locale first - private func locales( _ result: @escaping FlutterResult ) { - var localeNames = [String](); - let locales = SFSpeechRecognizer.supportedLocales(); - var currentLocaleId = Locale.current.identifier - if Locale.preferredLanguages.count > 0 { - currentLocaleId = Locale.preferredLanguages[0] - } - if let idName = buildIdNameForLocale(forIdentifier: currentLocaleId ) { - localeNames.append(idName) - } - for locale in locales { - if ( locale.identifier == currentLocaleId) { - continue - } - if let idName = buildIdNameForLocale(forIdentifier: locale.identifier ) { - localeNames.append(idName) - } - } - DispatchQueue.main.async { - result(localeNames) - } - } - - private func buildIdNameForLocale( forIdentifier: String ) -> String? { - var idName: String? - if let name = Locale.current.localizedString(forIdentifier: forIdentifier ) { - let sanitizedName = name.replacingOccurrences(of: ":", with: " ") - idName = "\(forIdentifier):\(sanitizedName)" - } - return idName - } - - private func handleResult( _ transcriptions: [SFTranscription], isFinal: Bool ) { - if ( !isFinal && !returnPartialResults ) { - return - } - var speechWords: [SpeechRecognitionWords] = [] - for transcription in transcriptions { - let words: SpeechRecognitionWords = SpeechRecognitionWords(recognizedWords: transcription.formattedString, confidence: confidenceIn( transcription)) - speechWords.append( words ) - } - let speechInfo = SpeechRecognitionResult(alternates: speechWords, finalResult: isFinal ) - do { - let speechMsg = try jsonEncoder.encode(speechInfo) - if let speechStr = String( data:speechMsg, encoding: .utf8) { - os_log("Encoded JSON result: %{PUBLIC}@", log: pluginLog, type: .debug, speechStr ) - invokeFlutter( SwiftSpeechToTextCallbackMethods.textRecognition, arguments: speechStr ) - } - } catch { - os_log("Could not encode JSON", log: pluginLog, type: .error) - } - } - - private func confidenceIn( _ transcription: SFTranscription ) -> Decimal { - guard ( transcription.segments.count > 0 ) else { - return 0; - } - var totalConfidence: Float = 0.0; - for segment in transcription.segments { - totalConfidence += segment.confidence - } - let avgConfidence: Float = totalConfidence / Float(transcription.segments.count ) - let confidence: Float = (avgConfidence * 1000).rounded() / 1000 - return Decimal( string: String( describing: confidence ) )! - } - - private func invokeFlutter( _ method: SwiftSpeechToTextCallbackMethods, arguments: Any? ) { - if(method != SwiftSpeechToTextCallbackMethods.soundLevelChange){ - os_log("invokeFlutter %{PUBLIC}@", log: pluginLog, type: .debug, method.rawValue ) - } - DispatchQueue.main.async { - self.channel.invokeMethod( method.rawValue, arguments: arguments ) - } - } - -} - -@available(iOS 10.0, *) -extension SwiftSpeechToTextPlugin : SFSpeechRecognizerDelegate { - public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) { - let availability = available ? SpeechToTextStatus.available.rawValue : SpeechToTextStatus.unavailable.rawValue - os_log("Availability changed: %{PUBLIC}@", log: pluginLog, type: .debug, availability) - invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: availability ) - } -} - -@available(iOS 10.0, *) -extension SwiftSpeechToTextPlugin : SFSpeechRecognitionTaskDelegate { - public func speechRecognitionDidDetectSpeech(_ task: SFSpeechRecognitionTask) { - // Do nothing for now - } - - public func speechRecognitionTaskFinishedReadingAudio(_ task: SFSpeechRecognitionTask) { - reportError(source: "FinishedReadingAudio", error: task.error) - os_log("Finished reading audio", log: pluginLog, type: .debug ) - invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.notListening.rawValue ) - } - - public func speechRecognitionTaskWasCancelled(_ task: SFSpeechRecognitionTask) { - reportError(source: "TaskWasCancelled", error: task.error) - os_log("Canceled reading audio", log: pluginLog, type: .debug ) - invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.notListening.rawValue ) - } - - public func speechRecognitionTask(_ task: SFSpeechRecognitionTask, didFinishSuccessfully successfully: Bool) { - reportError(source: "FinishSuccessfully", error: task.error) - os_log("FinishSuccessfully", log: pluginLog, type: .debug ) - if ( !successfully ) { - invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyStatus, arguments: SpeechToTextStatus.doneNoResult.rawValue ) - if let err = task.error as NSError? { - var errorMsg: String - switch err.code { - case 102: - errorMsg = "error_assets_not_installed" - case 201: - errorMsg = "error_speech_recognizer_disabled" - case 203: - errorMsg = "error_retry" - case 301: - errorMsg = "error_request_cancelled" - case 1100: - errorMsg = "error_speech_recognizer_already_active" - case 1101: - errorMsg = "error_speech_recognizer_connection_invalidated" - case 1107: - errorMsg = "error_speech_recognizer_connection_interrupted" - case 1110: - errorMsg = "error_no_match" - case 1700: - errorMsg = "error_speech_recognizer_request_not_authorized" - default: - errorMsg = "error_unknown (\(err.code))" - } - let speechError = SpeechRecognitionError(errorMsg: errorMsg, permanent: true ) - do { - let errorResult = try jsonEncoder.encode(speechError) - invokeFlutter( SwiftSpeechToTextCallbackMethods.notifyError, arguments: String(data:errorResult, encoding: .utf8) ) - } catch { - os_log("Could not encode JSON", log: pluginLog, type: .error) - } - } - } - if !stopping { - if let sound = successfully ? successSound : cancelSound { - onPlayEnd = {() -> Void in - self.stopCurrentListen( ) - } - sound.play() - } - else { - stopCurrentListen( ) - } - } - } - - public func speechRecognitionTask(_ task: SFSpeechRecognitionTask, didHypothesizeTranscription transcription: SFTranscription) { - os_log("HypothesizeTranscription", log: pluginLog, type: .debug ) - reportError(source: "HypothesizeTranscription", error: task.error) - handleResult( [transcription], isFinal: false ) - } - - public func speechRecognitionTask(_ task: SFSpeechRecognitionTask, didFinishRecognition recognitionResult: SFSpeechRecognitionResult) { - reportError(source: "FinishRecognition", error: task.error) - os_log("FinishRecognition %{PUBLIC}@", log: pluginLog, type: .debug, recognitionResult.isFinal.description ) - let isFinal = recognitionResult.isFinal - handleResult( recognitionResult.transcriptions, isFinal: isFinal ) - } - - private func reportError( source: String, error: Error?) { - if ( nil != error) { - os_log("%{PUBLIC}@ with error: %{PUBLIC}@", log: pluginLog, type: .debug, source, error.debugDescription) - } - } -} - -@available(iOS 10.0, *) -extension SwiftSpeechToTextPlugin : AVAudioPlayerDelegate { - - public func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, - successfully flag: Bool) { - if let playEnd = self.onPlayEnd { - playEnd() - } - } -} diff --git a/speech_to_text/ios/speech_to_text.podspec b/speech_to_text/ios/speech_to_text.podspec deleted file mode 100644 index 1db79aa0..00000000 --- a/speech_to_text/ios/speech_to_text.podspec +++ /dev/null @@ -1,22 +0,0 @@ -# -# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html -# -Pod::Spec.new do |s| - s.name = 'speech_to_text' - s.version = '0.0.1' - s.summary = 'A new flutter plugin project.' - s.description = <<-DESC -A new flutter plugin project. - DESC - s.homepage = 'http://example.com' - s.license = { :file => '../LICENSE' } - s.author = { 'Your Company' => 'email@example.com' } - s.source = { :path => '.' } - s.source_files = 'Classes/**/*' - s.public_header_files = 'Classes/**/*.h' - s.dependency 'Flutter' - s.dependency 'Try' - - s.ios.deployment_target = '8.0' -end - diff --git a/speech_to_text/lib/speech_to_text_provider.dart b/speech_to_text/lib/speech_to_text_provider.dart index 19b4653b..ec3118cd 100644 --- a/speech_to_text/lib/speech_to_text_provider.dart +++ b/speech_to_text/lib/speech_to_text_provider.dart @@ -5,7 +5,6 @@ import 'package:speech_to_text/speech_recognition_error.dart'; import 'package:speech_to_text/speech_recognition_event.dart'; import 'package:speech_to_text/speech_recognition_result.dart'; import 'package:speech_to_text/speech_to_text.dart'; -import 'package:speech_to_text_platform_interface/speech_to_text_platform_interface.dart'; /// Simplifies interaction with [SpeechToText] by handling all the callbacks and notifying /// listeners as events happen. diff --git a/speech_to_text/macos/Classes/SpeechToTextPlugin.swift b/speech_to_text/macos/Classes/SpeechToTextPlugin.swift deleted file mode 100644 index c8123486..00000000 --- a/speech_to_text/macos/Classes/SpeechToTextPlugin.swift +++ /dev/null @@ -1,19 +0,0 @@ -import Cocoa -import FlutterMacOS - -public class SpeechToTextPlugin: NSObject, FlutterPlugin { - public static func register(with registrar: FlutterPluginRegistrar) { - let channel = FlutterMethodChannel(name: "speech_to_text", binaryMessenger: registrar.messenger) - let instance = SpeechToTextPlugin() - registrar.addMethodCallDelegate(instance, channel: channel) - } - - public func handle(_ call: FlutterMethodCall, result: @escaping FlutterResult) { - switch call.method { - case "getPlatformVersion": - result("macOS " + ProcessInfo.processInfo.operatingSystemVersionString) - default: - result(FlutterMethodNotImplemented) - } - } -} diff --git a/speech_to_text/pubspec.lock b/speech_to_text/pubspec.lock index 8197c756..77bcc290 100644 --- a/speech_to_text/pubspec.lock +++ b/speech_to_text/pubspec.lock @@ -292,26 +292,26 @@ packages: dependency: transitive description: name: leak_tracker - sha256: "7f0df31977cb2c0b88585095d168e689669a2cc9b97c309665e3386f3e9d341a" + sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" url: "https://pub.dev" source: hosted - version: "10.0.4" + version: "10.0.0" leak_tracker_flutter_testing: dependency: transitive description: name: leak_tracker_flutter_testing - sha256: "06e98f569d004c1315b991ded39924b21af84cf14cc94791b8aea337d25b57f8" + sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 url: "https://pub.dev" source: hosted - version: "3.0.3" + version: "2.0.1" leak_tracker_testing: dependency: transitive description: name: leak_tracker_testing - sha256: "6ba465d5d76e67ddf503e1161d1f4a6bc42306f9d66ca1e8f079a47290fb06d3" + sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 url: "https://pub.dev" source: hosted - version: "3.0.1" + version: "2.0.1" lints: dependency: transitive description: @@ -348,10 +348,10 @@ packages: dependency: "direct main" description: name: meta - sha256: "7687075e408b093f36e6bbf6c91878cc0d4cd10f409506f7bc996f68220b9136" + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.12.0" + version: "1.11.0" mime: dependency: transitive description: @@ -469,14 +469,6 @@ packages: url: "https://pub.dev" source: hosted version: "1.10.0" - speech_to_text_macos: - dependency: "direct main" - description: - name: speech_to_text_macos - sha256: e685750f7542fcaa087a5396ee471e727ec648bf681f4da83c84d086322173f6 - url: "https://pub.dev" - source: hosted - version: "1.1.0" speech_to_text_platform_interface: dependency: "direct main" description: @@ -529,10 +521,10 @@ packages: dependency: transitive description: name: test_api - sha256: "9955ae474176f7ac8ee4e989dadfb411a58c30415bcfb648fa04b2b8a03afa7f" + sha256: "5c2f730018264d276c20e4f1503fd1308dfbbae39ec8ee63c5236311ac06954b" url: "https://pub.dev" source: hosted - version: "0.7.0" + version: "0.6.1" timing: dependency: transitive description: @@ -561,10 +553,10 @@ packages: dependency: transitive description: name: vm_service - sha256: "3923c89304b715fb1eb6423f017651664a03bf5f4b29983627c4da791f74a4ec" + sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 url: "https://pub.dev" source: hosted - version: "14.2.1" + version: "13.0.0" watcher: dependency: transitive description: @@ -590,5 +582,5 @@ packages: source: hosted version: "3.1.2" sdks: - dart: ">=3.3.0 <4.0.0" - flutter: ">=3.18.0-18.0.pre.54" + dart: ">=3.2.0 <4.0.0" + flutter: ">=3.10.0" diff --git a/speech_to_text/pubspec.yaml b/speech_to_text/pubspec.yaml index caafbe9d..c32fe804 100644 --- a/speech_to_text/pubspec.yaml +++ b/speech_to_text/pubspec.yaml @@ -11,7 +11,6 @@ dependencies: flutter: sdk: flutter speech_to_text_platform_interface: ^2.2.0 - speech_to_text_macos: ^1.1.0 json_annotation: ^4.0.0 clock: ^1.0.1 pedantic: ^1.9.2 @@ -38,8 +37,10 @@ flutter: pluginClass: SpeechToTextPlugin ios: pluginClass: SpeechToTextPlugin + sharedDarwinSource: true web: pluginClass: SpeechToTextPlugin fileName: speech_to_text_web.dart macos: - default_package: speech_to_text_macos + pluginClass: SpeechToTextPlugin + sharedDarwinSource: true diff --git a/speech_to_text_macos/.gitignore b/speech_to_text_macos/.gitignore deleted file mode 100644 index 96486fd9..00000000 --- a/speech_to_text_macos/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -# Miscellaneous -*.class -*.log -*.pyc -*.swp -.DS_Store -.atom/ -.buildlog/ -.history -.svn/ -migrate_working_dir/ - -# IntelliJ related -*.iml -*.ipr -*.iws -.idea/ - -# The .vscode folder contains launch configuration and tasks you configure in -# VS Code which you may wish to be included in version control, so this line -# is commented out by default. -#.vscode/ - -# Flutter/Dart/Pub related -# Libraries should not include pubspec.lock, per https://dart.dev/guides/libraries/private-files#pubspeclock. -/pubspec.lock -**/doc/api/ -.dart_tool/ -.packages -build/ diff --git a/speech_to_text_macos/.metadata b/speech_to_text_macos/.metadata deleted file mode 100644 index e3a6d67b..00000000 --- a/speech_to_text_macos/.metadata +++ /dev/null @@ -1,30 +0,0 @@ -# This file tracks properties of this Flutter project. -# Used by Flutter tool to assess capabilities and perform upgrades etc. -# -# This file should be version controlled. - -version: - revision: 85684f9300908116a78138ea4c6036c35c9a1236 - channel: stable - -project_type: plugin - -# Tracks metadata for the flutter migrate command -migration: - platforms: - - platform: root - create_revision: 85684f9300908116a78138ea4c6036c35c9a1236 - base_revision: 85684f9300908116a78138ea4c6036c35c9a1236 - - platform: macos - create_revision: 85684f9300908116a78138ea4c6036c35c9a1236 - base_revision: 85684f9300908116a78138ea4c6036c35c9a1236 - - # User provided section - - # List of Local paths (relative to this file) that should be - # ignored by the migrate tool. - # - # Files that are not part of the templates will be ignored by default. - unmanaged_files: - - 'lib/main.dart' - - 'ios/Runner.xcodeproj/project.pbxproj' diff --git a/speech_to_text_macos/CHANGELOG.md b/speech_to_text_macos/CHANGELOG.md deleted file mode 100644 index 63802d66..00000000 --- a/speech_to_text_macos/CHANGELOG.md +++ /dev/null @@ -1,20 +0,0 @@ -## 1.1.0 - -### New -* Listen changed to use new SpeechListenOptions - -## 1.0.2 - -### Fix -* Typo in dart class name - -## 1.0.1 - -### Fix -* Changed implement to match speech_to_text package name - -## 1.0.0 - -### New -* This is a no-op version that does nothing but report that -speech is not supported diff --git a/speech_to_text_macos/LICENSE b/speech_to_text_macos/LICENSE deleted file mode 100644 index 1a47e8bf..00000000 --- a/speech_to_text_macos/LICENSE +++ /dev/null @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2022, Corner Software Development Corp. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/speech_to_text_macos/README.md b/speech_to_text_macos/README.md deleted file mode 100644 index 2dfc679f..00000000 --- a/speech_to_text_macos/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# speech\_to\_text\\_macos - -The macos implementation of [`speech_to_text`][1]. - -## Usage - -This package is [endorsed][2], which means you can simply use `speech_to_text` -normally. This package will be automatically included in your app when you do. - -[1]: https://pub.dev/packages/speech_to_text -[2]: https://flutter.dev/docs/development/packages-and-plugins/developing-packages#endorsed-federated-plugin \ No newline at end of file diff --git a/speech_to_text_macos/analysis_options.yaml b/speech_to_text_macos/analysis_options.yaml deleted file mode 100644 index a5744c1c..00000000 --- a/speech_to_text_macos/analysis_options.yaml +++ /dev/null @@ -1,4 +0,0 @@ -include: package:flutter_lints/flutter.yaml - -# Additional information about this file can be found at -# https://dart.dev/guides/language/analysis-options diff --git a/speech_to_text_macos/lib/speech_to_text_macos.dart b/speech_to_text_macos/lib/speech_to_text_macos.dart deleted file mode 100644 index c93032f4..00000000 --- a/speech_to_text_macos/lib/speech_to_text_macos.dart +++ /dev/null @@ -1,109 +0,0 @@ -import 'package:speech_to_text_platform_interface/speech_to_text_platform_interface.dart'; - -/// Implementation of the platform interface for platforms where -/// speech recognition is not yet supported. This always reports speech -/// as unavailable. This allows applications which do not require speech -/// to use the plugin in their builds and at runtime disable the speech -/// specific functionality. -class SpeechToTextMacOS extends SpeechToTextPlatform { - static void registerWith() { - SpeechToTextPlatform.instance = SpeechToTextMacOS(); - } - - /// Returns true if the user has already granted permission to access the - /// microphone, does not prompt the user. - /// - /// This method can be called before [initialize] to check if permission - /// has already been granted. If this returns false then the [initialize] - /// call will prompt the user for permission if it is allowed to do so. - /// Note that applications cannot ask for permission again if the user has - /// denied them permission in the past. - @override - Future hasPermission() async { - return false; - } - - /// Initialize speech recognition services, returns true if - /// successful, false if failed. - /// - /// This method must be called before any other speech functions. - /// If this method returns false no further [SpeechToText] methods - /// should be used. False usually means that the user has denied - /// permission to use speech. - /// - /// [debugLogging] controls whether there is detailed logging from the underlying - /// plugins. It is off by default, usually only useful for troubleshooting issues - /// with a particular OS version or device, fairly verbose - @override - Future initialize( - {debugLogging = false, List? options}) async { - return false; - } - - /// Stops the current listen for speech if active, does nothing if not. - /// - /// Stopping a listen session will cause a final result to be sent. Each - /// listen session should be ended with either [stop] or [cancel], for - /// example in the dispose method of a Widget. [cancel] is automatically - /// invoked by a permanent error if [cancelOnError] is set to true in the - /// [listen] call. - /// - /// *Note:* Cannot be used until a successful [initialize] call. Should - /// only be used after a successful [listen] call. - @override - Future stop() async {} - - /// Cancels the current listen for speech if active, does nothing if not. - /// - /// Canceling means that there will be no final result returned from the - /// recognizer. Each listen session should be ended with either [stop] or - /// [cancel], for example in the dispose method of a Widget. [cancel] is - /// automatically invoked by a permanent error if [cancelOnError] is set - /// to true in the [listen] call. - /// - /// *Note* Cannot be used until a successful [initialize] call. Should only - /// be used after a successful [listen] call. - @override - Future cancel() async {} - - /// Starts a listening session for speech and converts it to text. - /// - /// Cannot be used until a successful [initialize] call. There is a - /// time limit on listening imposed by both Android and iOS. The time - /// depends on the device, network, etc. Android is usually quite short, - /// especially if there is no active speech event detected, on the order - /// of ten seconds or so. - /// - /// [localeId] is an optional locale that can be used to listen in a language - /// other than the current system default. See [locales] to find the list of - /// supported languages for listening. - /// - /// [partialResults] if true the listen reports results as they are recognized, - /// when false only final results are reported. Defaults to true. - /// - /// [onDevice] if true the listen attempts to recognize locally with speech never - /// leaving the device. If it cannot do this the listen attempt will fail. This is - /// usually only needed for sensitive content where privacy or security is a concern. - /// - /// [sampleRate] optional for compatibility with certain iOS devices, some devices - /// crash with `sampleRate != device's supported sampleRate`, try 44100 if seeing - /// crashes - /// - @override - Future listen( - {String? localeId, - @Deprecated('Use options instead') partialResults = true, - @Deprecated('Use options instead') onDevice = false, - @Deprecated('Use options instead') int listenMode = 0, - @Deprecated('Use options instead') sampleRate = 0, - SpeechListenOptions? options}) async { - return false; - } - - /// returns the list of speech locales available on the device. - /// - @override - Future> locales() async { - return []; - } -} diff --git a/speech_to_text_macos/macos/Classes/SpeechToTextMacosPlugin.swift b/speech_to_text_macos/macos/Classes/SpeechToTextMacosPlugin.swift deleted file mode 100644 index f206f782..00000000 --- a/speech_to_text_macos/macos/Classes/SpeechToTextMacosPlugin.swift +++ /dev/null @@ -1,19 +0,0 @@ -import Cocoa -import FlutterMacOS - -public class SpeechToTextMacosPlugin: NSObject, FlutterPlugin { - public static func register(with registrar: FlutterPluginRegistrar) { - let channel = FlutterMethodChannel(name: "speech_to_text_macos", binaryMessenger: registrar.messenger) - let instance = SpeechToTextMacosPlugin() - registrar.addMethodCallDelegate(instance, channel: channel) - } - - public func handle(_ call: FlutterMethodCall, result: @escaping FlutterResult) { - switch call.method { - case "getPlatformVersion": - result("macOS " + ProcessInfo.processInfo.operatingSystemVersionString) - default: - result(FlutterMethodNotImplemented) - } - } -} diff --git a/speech_to_text_macos/macos/Flutter/GeneratedPluginRegistrant.swift b/speech_to_text_macos/macos/Flutter/GeneratedPluginRegistrant.swift deleted file mode 100644 index cccf817a..00000000 --- a/speech_to_text_macos/macos/Flutter/GeneratedPluginRegistrant.swift +++ /dev/null @@ -1,10 +0,0 @@ -// -// Generated file. Do not edit. -// - -import FlutterMacOS -import Foundation - - -func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) { -} diff --git a/speech_to_text_macos/macos/speech_to_text_macos.podspec b/speech_to_text_macos/macos/speech_to_text_macos.podspec deleted file mode 100644 index 0965d495..00000000 --- a/speech_to_text_macos/macos/speech_to_text_macos.podspec +++ /dev/null @@ -1,23 +0,0 @@ -# -# To learn more about a Podspec see http://guides.cocoapods.org/syntax/podspec.html. -# Run `pod lib lint speech_to_text_macos.podspec` to validate before publishing. -# -Pod::Spec.new do |s| - s.name = 'speech_to_text_macos' - s.version = '0.0.1' - s.summary = 'A new Flutter plugin project.' - s.description = <<-DESC -A new Flutter plugin project. - DESC - s.homepage = 'http://example.com' - s.license = { :file => '../LICENSE' } - s.author = { 'Your Company' => 'email@example.com' } - - s.source = { :path => '.' } - s.source_files = 'Classes/**/*' - s.dependency 'FlutterMacOS' - - s.platform = :osx, '10.11' - s.pod_target_xcconfig = { 'DEFINES_MODULE' => 'YES' } - s.swift_version = '5.0' -end diff --git a/speech_to_text_macos/pubspec.yaml b/speech_to_text_macos/pubspec.yaml deleted file mode 100644 index f66a8770..00000000 --- a/speech_to_text_macos/pubspec.yaml +++ /dev/null @@ -1,32 +0,0 @@ -name: speech_to_text_macos -description: macOS implementation of the speech_to_text plugin -repository: https://github.com/csdcorp/speech_to_text/speech_to_text_macos -issue_tracker: https://github.com/csdcorp/speech_to_text/issues -version: 1.1.0 - -environment: - sdk: ">=3.0.0 <4.0.0" - flutter: ">=3.10.0" - -dependencies: - flutter: - sdk: flutter - plugin_platform_interface: ^2.1.4 - speech_to_text_platform_interface: ^2.2.0 - -dev_dependencies: - flutter_test: - sdk: flutter - flutter_lints: ^2.0.1 - -# For information on the generic Dart part of this file, see the -# following page: https://dart.dev/tools/pub/pubspec - -# The following section is specific to Flutter packages. -flutter: - plugin: - implements: speech_to_text - platforms: - macos: - pluginClass: SpeechToTextMacosPlugin - dartPluginClass: SpeechToTextMacOS diff --git a/speech_to_text_macos/test/speech_to_text_macos_test.dart b/speech_to_text_macos/test/speech_to_text_macos_test.dart deleted file mode 100644 index 424658cc..00000000 --- a/speech_to_text_macos/test/speech_to_text_macos_test.dart +++ /dev/null @@ -1,28 +0,0 @@ -import 'package:flutter_test/flutter_test.dart'; -import 'package:speech_to_text_macos/speech_to_text_macos.dart'; - -void main() { - late SpeechToTextMacOS speechToText; - - setUp(() { - speechToText = SpeechToTextMacOS(); - }); - - test('hasPermission is false before initialize', () async { - expect(await speechToText.hasPermission(), isFalse); - }); - test('initialize is false', () async { - expect(await speechToText.initialize(), isFalse); - }); - test('hasPermission is false after initialize', () async { - expect(await speechToText.initialize(), isFalse); - expect(await speechToText.hasPermission(), isFalse); - }); - test('locales is empty', () async { - expect(await speechToText.locales(), isEmpty); - }); - test('listen is false', () async { - expect(await speechToText.initialize(), isFalse); - expect(await speechToText.listen(), isFalse); - }); -} diff --git a/speech_to_text_platform_interface/pubspec.lock b/speech_to_text_platform_interface/pubspec.lock index 4f5c1bc9..739ca74c 100644 --- a/speech_to_text_platform_interface/pubspec.lock +++ b/speech_to_text_platform_interface/pubspec.lock @@ -171,6 +171,30 @@ packages: url: "https://pub.dev" source: hosted version: "2.0.0" + leak_tracker: + dependency: transitive + description: + name: leak_tracker + sha256: "78eb209deea09858f5269f5a5b02be4049535f568c07b275096836f01ea323fa" + url: "https://pub.dev" + source: hosted + version: "10.0.0" + leak_tracker_flutter_testing: + dependency: transitive + description: + name: leak_tracker_flutter_testing + sha256: b46c5e37c19120a8a01918cfaf293547f47269f7cb4b0058f21531c2465d6ef0 + url: "https://pub.dev" + source: hosted + version: "2.0.1" + leak_tracker_testing: + dependency: transitive + description: + name: leak_tracker_testing + sha256: a597f72a664dbd293f3bfc51f9ba69816f84dcd403cdac7066cb3f6003f3ab47 + url: "https://pub.dev" + source: hosted + version: "2.0.1" lints: dependency: transitive description: @@ -191,26 +215,26 @@ packages: dependency: transitive description: name: matcher - sha256: "1803e76e6653768d64ed8ff2e1e67bea3ad4b923eb5c56a295c3e634bad5960e" + sha256: d2323aa2060500f906aa31a895b4030b6da3ebdcc5619d14ce1aada65cd161cb url: "https://pub.dev" source: hosted - version: "0.12.16" + version: "0.12.16+1" material_color_utilities: dependency: transitive description: name: material_color_utilities - sha256: "9528f2f296073ff54cb9fee677df673ace1218163c3bc7628093e7eed5203d41" + sha256: "0e0a020085b65b6083975e499759762399b4475f766c21668c4ecca34ea74e5a" url: "https://pub.dev" source: hosted - version: "0.5.0" + version: "0.8.0" meta: dependency: "direct main" description: name: meta - sha256: a6e590c838b18133bb482a2745ad77c5bb7715fb0451209e1a7567d416678b8e + sha256: d584fa6707a52763a52446f02cc621b077888fb63b93bbcb1143a7be5a0c0c04 url: "https://pub.dev" source: hosted - version: "1.10.0" + version: "1.11.0" mockito: dependency: "direct dev" description: @@ -231,10 +255,10 @@ packages: dependency: transitive description: name: path - sha256: "8829d8a55c13fc0e37127c29fedf290c102f4e40ae94ada574091fe0ff96c917" + sha256: "087ce49c3f0dc39180befefc60fdb4acd8f8620e5682fe2476afd0b3688bb4af" url: "https://pub.dev" source: hosted - version: "1.8.3" + version: "1.9.0" pedantic: dependency: "direct dev" description: @@ -336,22 +360,22 @@ packages: url: "https://pub.dev" source: hosted version: "2.1.4" - watcher: + vm_service: dependency: transitive description: - name: watcher - sha256: "68173f2fa67d241323a4123be7ed4e43424c54befa5505d71c8ad4b7baf8f71d" + name: vm_service + sha256: b3d56ff4341b8f182b96aceb2fa20e3dcb336b9f867bc0eafc0de10f1048e957 url: "https://pub.dev" source: hosted - version: "1.0.0" - web: + version: "13.0.0" + watcher: dependency: transitive description: - name: web - sha256: afe077240a270dcfd2aafe77602b4113645af95d0ad31128cc02bce5ac5d5152 + name: watcher + sha256: "68173f2fa67d241323a4123be7ed4e43424c54befa5505d71c8ad4b7baf8f71d" url: "https://pub.dev" source: hosted - version: "0.3.0" + version: "1.0.0" yaml: dependency: transitive description: @@ -361,4 +385,4 @@ packages: source: hosted version: "3.1.0" sdks: - dart: ">=3.2.0-194.0.dev <4.0.0" + dart: ">=3.2.0-0 <4.0.0"