-
Notifications
You must be signed in to change notification settings - Fork 82
/
FileDemo.cs
286 lines (262 loc) · 12.3 KB
/
FileDemo.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
/*
Copyright 2020-2023 Picovoice Inc.
You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
file accompanying this source.
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
*/
using System;
using System.Collections.Generic;
using System.IO;
using System.Text;
using Pv;
namespace RhinoDemo
{
/// <summary>
/// File Demo for Rhino Speech-to-Intent engine. The demo takes an input audio file and a context file
/// and returns prints the inference result.
/// </summary>
public class FileDemo
{
/// <summary>
/// Reads through input file and prints the inference result returned by Rhino.
/// </summary>
/// <param name="inputAudioPath">Required argument. Absolute path to input audio file.</param>
/// <param name="accessKey">AccessKey obtained from Picovoice Console (https://console.picovoice.ai/).</param>
/// <param name="contextPath">
/// Absolute path to file containing context model (file with `.rhn` extension. A context represents the set of
/// expressions(spoken commands), intents, and intent arguments(slots) within a domain of interest.
/// </param>
/// <param name="modelPath">
/// Absolute path to the file containing model parameters. If not set it will be set to the
/// default location.
/// </param>
/// <param name="sensitivity">
/// Inference sensitivity expressed as floating point value within [0,1]. A higher sensitivity value results in fewer misses
/// at the cost of (potentially) increasing the erroneous inference rate.
/// </param>
/// <param name="endpointDurationSec">
/// Endpoint duration in seconds. An endpoint is a chunk of silence at the end of an
/// utterance that marks the end of spoken command. It should be a positive number within [0.5, 5]. A lower endpoint
/// duration reduces delay and improves responsiveness. A higher endpoint duration assures Rhino doesn't return inference
/// preemptively in case the user pauses before finishing the request.
/// </param>
/// <param name="requireEndpoint">
/// If set to `true`, Rhino requires an endpoint (a chunk of silence) after the spoken command.
/// If set to `false`, Rhino tries to detect silence, but if it cannot, it still will provide inference regardless. Set
/// to `false` only if operating in an environment with overlapping speech (e.g. people talking in the background).
/// </param>
public static void RunDemo(
string accessKey,
string inputAudioPath,
string contextPath,
string modelPath,
float sensitivity,
float endpointDurationSec,
bool requireEndpoint)
{
// init rhino speech-to-intent engine
using (Rhino rhino = Rhino.Create(
accessKey,
contextPath,
modelPath,
sensitivity,
endpointDurationSec,
requireEndpoint))
{
// open and validate wav file
using (BinaryReader reader = new BinaryReader(File.Open(inputAudioPath, FileMode.Open)))
{
ValidateWavFile(reader, rhino.SampleRate, 16, out short numChannels);
// read audio and send frames to rhino
short[] rhinoFrame = new short[rhino.FrameLength];
int frameIndex = 0;
while (reader.BaseStream.Position != reader.BaseStream.Length)
{
rhinoFrame[frameIndex++] = reader.ReadInt16();
if (frameIndex == rhinoFrame.Length)
{
bool isFinalized = rhino.Process(rhinoFrame);
if (isFinalized)
{
Inference inference = rhino.GetInference();
if (inference.IsUnderstood)
{
Console.WriteLine("{");
Console.WriteLine($" intent : '{inference.Intent}'");
Console.WriteLine(" slots : {");
foreach (KeyValuePair<string, string> slot in inference.Slots)
{
Console.WriteLine($" {slot.Key} : '{slot.Value}'");
}
Console.WriteLine(" }");
Console.WriteLine("}");
}
else
{
Console.WriteLine("Didn't understand the command.");
}
return;
}
frameIndex = 0;
}
// skip right channel
if (numChannels == 2)
{
reader.ReadInt16();
}
}
}
Console.WriteLine("Reached end of audio file before Rhino returned an inference.");
}
}
/// <summary>
/// Reads RIFF header of a WAV file and validates its properties against Picovoice audio processing requirements
/// </summary>
/// <param name="reader">WAV file stream reader</param>
/// <param name="requiredSampleRate">Required sample rate in Hz</param>
/// <param name="requiredBitDepth">Required number of bits per sample</param>
/// <param name="numChannels">Number of channels can be returned by function</param>
public static void ValidateWavFile(BinaryReader reader, int requiredSampleRate, short requiredBitDepth, out short numChannels)
{
byte[] riffHeader = reader?.ReadBytes(44);
int riff = BitConverter.ToInt32(riffHeader, 0);
int wave = BitConverter.ToInt32(riffHeader, 8);
if (riff != BitConverter.ToInt32(Encoding.UTF8.GetBytes("RIFF"), 0) ||
wave != BitConverter.ToInt32(Encoding.UTF8.GetBytes("WAVE"), 0))
{
throw new ArgumentException("input_audio_path", $"Invalid input audio file format. Input file must be a {requiredSampleRate}kHz, 16-bit WAV file.");
}
numChannels = BitConverter.ToInt16(riffHeader, 22);
int sampleRate = BitConverter.ToInt32(riffHeader, 24);
short bitDepth = BitConverter.ToInt16(riffHeader, 34);
if (sampleRate != requiredSampleRate || bitDepth != requiredBitDepth)
{
throw new ArgumentException("input_audio_path", $"Invalid input audio file format. Input file must be a {requiredSampleRate}Hz, 16-bit WAV file.");
}
if (numChannels == 2)
{
Console.WriteLine("Picovoice processes single-channel audio but stereo file is provided. Processing left channel only.");
}
}
public static void Main(string[] args)
{
AppDomain.CurrentDomain.UnhandledException += OnUnhandledException;
if (args.Length == 0)
{
Console.WriteLine(HELP_STR);
Console.Read();
return;
}
string inputAudioPath = null;
string accessKey = null;
string contextPath = null;
string modelPath = null;
float sensitivity = 0.5f;
float endpointDurationSec = 1.0f;
bool requireEndpoint = true;
bool showHelp = false;
// parse command line arguments
int argIndex = 0;
while (argIndex < args.Length)
{
if (args[argIndex] == "--input_audio_path")
{
if (++argIndex < args.Length)
{
inputAudioPath = args[argIndex++];
}
}
else if (args[argIndex] == "--access_key")
{
if (++argIndex < args.Length)
{
accessKey = args[argIndex++];
}
}
else if (args[argIndex] == "--context_path")
{
if (++argIndex < args.Length)
{
contextPath = args[argIndex++];
}
}
else if (args[argIndex] == "--model_path")
{
if (++argIndex < args.Length)
{
modelPath = args[argIndex++];
}
}
else if (args[argIndex] == "--sensitivity")
{
argIndex++;
if (argIndex < args.Length && float.TryParse(args[argIndex], out sensitivity))
{
argIndex++;
}
}
else if (args[argIndex] == "--endpoint_duration")
{
argIndex++;
if (argIndex < args.Length && float.TryParse(args[argIndex], out endpointDurationSec))
{
argIndex++;
}
}
else if (args[argIndex] == "--require_endpoint")
{
if (++argIndex < args.Length)
{
if (args[argIndex++].ToLower() == "false")
{
requireEndpoint = false;
}
}
}
else if (args[argIndex] == "-h" || args[argIndex] == "--help")
{
showHelp = true;
argIndex++;
}
else
{
argIndex++;
}
}
// print help text and exit
if (showHelp)
{
Console.WriteLine(HELP_STR);
Console.Read();
return;
}
// argument validation
if (string.IsNullOrEmpty(inputAudioPath))
{
throw new ArgumentNullException("input_audio_path");
}
if (!File.Exists(inputAudioPath))
{
throw new ArgumentException($"Audio file at path {inputAudioPath} does not exist", "--input_audio_path");
}
RunDemo(accessKey, inputAudioPath, contextPath, modelPath, sensitivity, endpointDurationSec, requireEndpoint);
}
private static void OnUnhandledException(object sender, UnhandledExceptionEventArgs e)
{
Console.WriteLine(e.ExceptionObject.ToString());
Console.Read();
Environment.Exit(1);
}
private static readonly string HELP_STR = "Available options: \n" +
"\t--input_audio_path (required): Absolute path to input audio file.\n" +
"\t--access_key (required): AccessKey obtained from Picovoice Console (https://console.picovoice.ai/)\n" +
"\t--context_path (required): Absolute path to context file.\n" +
"\t--model_path: Absolute path to the file containing model parameters.\n" +
"\t--sensitivity: Inference sensitivity. It should be a number within [0, 1]. A higher sensitivity value results in " +
"fewer misses at the cost of (potentially) increasing the erroneous inference rate.\n" +
"\t--endpoint_duration: Endpoint duration in seconds. It should be a positive number within [0.5, 5].\n" +
"\t--require_endpoint: ['true'|'false'] If set to 'false', Rhino does not require an endpoint (chunk of silence) before finishing inference.\n";
}
}