createTranscription method
Transcribe an audio file (blocking).
final result = await client.createTranscription(
fileBytes: await File('speech.mp3').readAsBytes(),
filename: 'speech.mp3',
model: 'gpt-4o-mini-transcribe',
language: 'en',
);
print(result.text); // full transcript
Implementation
Future<TranscriptionResult> createTranscription({
required Uint8List fileBytes,
required String filename,
required AudioModel model, // whisper-1, gpt-4o-transcribe…
String? chunkingStrategy, // 'auto' | JSON string
List<String>? include, // e.g. ['logprobs']
String? language, // ISO-639-1
String? prompt,
AudioResponseFormat responseFormat = AudioResponseFormat.json, // json, text, srt, vtt, …
num? temperature,
List<String>? timestampGranularities, // ['word', 'segment']
}) async {
final url = baseUrl.resolve('audio/transcriptions');
final req = http.MultipartRequest('POST', url)
..headers.addAll(getHeaders({}) ?? {})
// – core fields –
..fields['model'] = model.toJson()
..fields['response_format'] = responseFormat.toJson()
// – optional –
.._maybeField('chunking_strategy', chunkingStrategy)
.._maybeField('language', language)
.._maybeField('prompt', prompt)
.._maybeField('temperature', temperature?.toString())
.._maybeJsonField('timestamp_granularities[]', timestampGranularities)
.._maybeJsonField('include[]', include)
// – audio file –
..files.add(http.MultipartFile.fromBytes(
'file',
fileBytes,
filename: filename,
));
final streamed = await req.send();
final resp = await http.Response.fromStream(streamed);
if (resp.statusCode == 200) {
return TranscriptionResult.fromResponseBody(resp.body, responseFormat.toJson());
}
throw OpenAIRequestException.fromHttpResponse(resp);
}