-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathworkingFile.cs
159 lines (138 loc) · 8.25 KB
/
workingFile.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
// using System;
// using System.Threading.Tasks;
// using Microsoft.CognitiveServices.Speech;
// using Microsoft.CognitiveServices.Speech.Audio;
// using Microsoft.CognitiveServices.Speech.Intent;
// namespace HomeAutomation
// {
// class Program
// {
// static void Main(string[] args)
// {
// ContinuousRecognitionWithFileAsync().Wait();
// Console.WriteLine("Please press Enter to continue.");
// Console.ReadLine();
// }
// public static async Task ContinuousRecognitionWithFileAsync()
// {
// // <recognitionContinuousWithFile>
// // Creates an instance of a speech config with specified subscription key and service region.
// // Replace with your own subscription key and service region (e.g., "westus").
// var config = SpeechConfig.FromSubscription("f6bc37e263114ef7a232d0c7a3ecd4eb", "westus");
// var stopRecognition = new TaskCompletionSource<int>();
// // Creates a speech recognizer using file as audio input.
// // Replace with your own audio file name.
// using (var audioInput = AudioConfig.FromWavFileInput(@"whatstheweatherlike.wav"))
// {
// using (var recognizer = new SpeechRecognizer(config, audioInput))
// {
// Console.WriteLine("recognizer "+recognizer);
// // Subscribes to events.
// recognizer.Recognizing += (s, e) =>
// {
// Console.WriteLine($"RECOGNIZING Voice: Text={e.Result.Text}");
// };
// recognizer.Recognized += (s, e) =>
// {
// if (e.Result.Reason == ResultReason.RecognizedSpeech)
// {
// Console.WriteLine($"RECOGNIZED voice: Text={e.Result.Text}");
// }
// else if (e.Result.Reason == ResultReason.NoMatch)
// {
// Console.WriteLine($"NOMATCH: Speech could not be recognized.");
// }
// };
// recognizer.Canceled += (s, e) =>
// {
// Console.WriteLine($"CANCELED: Reason={e.Reason}");
// if (e.Reason == CancellationReason.Error)
// {
// Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
// Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
// Console.WriteLine($"CANCELED: Did you update the subscription info?");
// }
// stopRecognition.TrySetResult(0);
// };
// recognizer.SessionStarted += (s, e) =>
// {
// Console.WriteLine("\n Session started event.");
// };
// recognizer.SessionStopped += (s, e) =>
// {
// Console.WriteLine("\n Session stopped event.");
// Console.WriteLine("\nStop recognition.");
// stopRecognition.TrySetResult(0);
// };
// // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
// await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);
// // Waits for completion.
// // Use Task.WaitAny to keep the task rooted.
// Task.WaitAny(new[] { stopRecognition.Task });
// // Stops recognition.
// await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
// }
// }
// // </recognitionContinuousWithFile>
// }
// static async Task RecognizeIntentAsync()
// {
// // Creates an instance of a speech config with specified subscription key
// // and service region. Note that in contrast to other services supported by
// // the Cognitive Services Speech SDK, the Language Understanding service
// // requires a specific subscription key from https://www.luis.ai/.
// // The Language Understanding service calls the required key 'endpoint key'.
// // Once you've obtained it, replace with below with your own Language Understanding subscription key
// // and service region (e.g., "westus").
// // The default language is "en-us".
// var config = SpeechConfig.FromSubscription("38f56c8ed26f4cd18ff492217ce8347a", "westus");
// // Creates an intent recognizer using microphone as audio input.
// using (var recognizer = new IntentRecognizer(config))
// {
// // Creates a Language Understanding model using the app id, and adds specific intents from your model
// var model = LanguageUnderstandingModel.FromAppId("23ffd55a-0ed4-4377-9494-176c11682ed8");
// /* recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName1", "id1");
// recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName2", "id2");
// recognizer.AddIntent(model, "YourLanguageUnderstandingIntentName3", "any-IntentId-here");*/
// recognizer.AddIntent(model, "HomeAutomation.TurnOff", "off");
// recognizer.AddIntent(model, "HomeAutomation.TurnOn", "on");
// // Starts recognizing.
// Console.WriteLine("Say something...");
// // Starts intent recognition, and returns after a single utterance is recognized. The end of a
// // single utterance is determined by listening for silence at the end or until a maximum of 15
// // seconds of audio is processed. The task returns the recognition text as result.
// // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
// // shot recognition like command or query.
// // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
// var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);
// // Checks result.
// if (result.Reason == ResultReason.RecognizedIntent)
// {
// Console.WriteLine($"RECOGNIZED: Text={result.Text}");
// Console.WriteLine($" Intent Id: {result.IntentId}.");
// Console.WriteLine($" Language Understanding JSON: {result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult)}.");
// }
// else if (result.Reason == ResultReason.RecognizedSpeech)
// {
// Console.WriteLine($"RECOGNIZED: Text={result.Text}");
// Console.WriteLine($" Intent not recognized.");
// }
// else if (result.Reason == ResultReason.NoMatch)
// {
// Console.WriteLine($"NOMATCH: Speech could not be recognized.");
// }
// else if (result.Reason == ResultReason.Canceled)
// {
// var cancellation = CancellationDetails.FromResult(result);
// Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");
// if (cancellation.Reason == CancellationReason.Error)
// {
// Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
// Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
// Console.WriteLine($"CANCELED: Did you update the subscription info?");
// }
// }
// }
// }
// }
// }