streamSpeechEvents method

Future<SpeechStream> streamSpeechEvents({
  1. required String input,
  2. required SpeechModel model,
  3. required SpeechVoice voice,
  4. String? instructions,
  5. SpeechResponseFormat? responseFormat,
  6. num? speed,
  7. String streamFormat = 'sse',
  8. List<String>? include,
})

Create TTS and stream it back chunk-by-chunk as SSE.

final stream = await client.streamSpeech(
  input: 'Hello there!',
  model: 'gpt-4o-mini-tts',
  voice: 'nova',
  responseFormat: 'mp3',
);

await for (final ev in stream.events) {
  switch (ev) {
    case SpeechAudioDelta():
      audioSink.add(ev.audioBytes);                // play or save
    case SpeechAudioDone():
      print('done: ${ev.usage}');
  }
}

Implementation

Future<SpeechStream> streamSpeechEvents({
  required String input,
  required SpeechModel model,
  required SpeechVoice voice,
  String? instructions,
  SpeechResponseFormat? responseFormat, // mp3 (default), opus, aac, flac, wav, pcm
  num? speed, // 0.25 – 4.0   (default 1.0)
  /// Leave as `"sse"` (the default here) unless you want raw audio frames.
  String streamFormat = 'sse',

  /// To receive `transcript.*` events include `"logprobs"` here.
  List<String>? include,
}) async {
  final sse = streamJson('/audio/speech', {
    'stream': true, // tells the endpoint we want SSE
    'input': input,
    'model': model.toJson(),
    'voice': voice.toJson(),
    if (instructions != null) 'instructions': instructions,
    if (responseFormat != null) 'response_format': responseFormat.toJson(),
    if (speed != null) 'speed': speed,
    'stream_format': streamFormat, // default here = "sse"
    if (include != null) 'include': include,
  });

  return SpeechStream(sse);
}