streamTranscription method
Transcribe an audio file and stream text deltas as SSE.
Only supported by gpt-4o-transcribe and gpt-4o-mini-transcribe models.
Implementation
Future<TranscriptionStream> streamTranscription({
required Uint8List fileBytes,
required String filename,
required AudioModel model,
String? chunkingStrategy,
List<String>? include,
String? language,
String? prompt,
// Response format must be json for streaming models.
AudioResponseFormat responseFormat = AudioResponseFormat.json,
num? temperature,
List<String>? timestampGranularities,
}) async {
final boundary = '----dart-openai-${DateTime.now().microsecondsSinceEpoch.toRadixString(16)}';
// Build the multipart/form-data body manually so we can feed it to SseClient.
final body = _buildMultipartBody(
boundary: boundary,
fileField: 'file',
filename: filename,
fileBytes: fileBytes,
fields: {
'model': model.toJson(),
'stream': 'true',
'response_format': responseFormat.toJson(),
if (chunkingStrategy != null) 'chunking_strategy': chunkingStrategy,
if (language != null) 'language': language,
if (prompt != null) 'prompt': prompt,
if (temperature != null) 'temperature': temperature.toString(),
if (include != null)
for (final i in include) 'include[]': i,
if (timestampGranularities != null)
for (final t in timestampGranularities) 'timestamp_granularities[]': t,
},
);
final sse = SseClient(
baseUrl.resolve('audio/transcriptions'),
headers: getHeaders({
'Content-Type': 'multipart/form-data; boundary=$boundary',
}),
httpClient: httpClient,
body: body,
);
return TranscriptionStream(sse);
}