startRecording method

Future<void> startRecording({
  1. Duration timeout = const Duration(seconds: 20),
})

Starts capturing audio into an in-memory buffer.

timeout controls how long to wait for an agent to become active. When the agent becomes active, buffered audio is sent automatically and agentReadyFuture completes. If the timeout is reached first, agentReadyFuture completes with an error and callers should reset the buffer.

Ensure microphone permissions are granted before calling this. Audio capture may fail without permissions.

Implementation

Future<void> startRecording({
  Duration timeout = const Duration(seconds: 20),
}) async {
  if (_isRecording) {
    logger.warning('Already recording');
    return;
  }
  _isRecording = true;

  // Set up timeout for agent readiness
  _agentReadyManager.setTimer(timeout, timeoutReason: 'Agent did not become ready within timeout');

  _localTrack = await LocalAudioTrack.create();
  logger.fine('[Preconnect audio] created local track ${_localTrack!.mediaStreamTrack.id}');

  final rendererId = Uuid().v4();
  logger.info('Starting audio renderer with rendererId: $rendererId');

  _audioCapture = createAudioFrameCapture();
  final result = await _audioCapture!.start(
    track: _localTrack!.mediaStreamTrack,
    rendererId: rendererId,
    sampleRate: _requestSampleRate,
    channels: 1,
    commonFormat: 'int16',
  );

  if (!result) {
    final error = StateError('Failed to start audio renderer ($result)');
    logger.severe('[Preconnect audio] $error');
    _onError?.call(error);
    await stopRecording(withError: error);
    await _localTrack?.stop();
    _localTrack = null;
    throw error;
  }

  if (!kIsWeb) {
    await webrtc.NativeAudioManagement.startLocalRecording();
    _nativeRecordingStarted = true;
  }

  logger.info('startAudioRenderer result: $result');

  _streamSubscription = _audioCapture!.frameStream.listen((frame) {
    if (!_isRecording) return;

    _renderedSampleRate = frame.sampleRate;
    _renderedChannels = frame.channels;

    final didOverflow = _buffer.write(frame.data);
    if (didOverflow && !_hasLoggedOverflow) {
      _hasLoggedOverflow = true;
      logger.warning(
        '[Preconnect audio] buffer exceeded ${defaultMaxSize ~/ 1024}KB, dropping oldest audio until agent is ready',
      );
    }
  });

  // Listen for agent readiness and send the buffer when active.
  _participantStateListener = _room.events.on<ParticipantStateUpdatedEvent>(
      filter: (event) => event.participant.kind == ParticipantKind.AGENT && event.state == ParticipantState.active,
      (event) async {
    logger.info('[Preconnect audio] Agent is active: ${event.participant.identity}');
    try {
      await sendAudioData(agents: [event.participant.identity]);
      _agentReadyManager.complete();
    } catch (error) {
      _agentReadyManager.completeError(error);
      _onError?.call(error);
    }
  });

  _localTrackPublishedEvent = _room.events.waitFor<LocalTrackPublishedEvent>(
    duration: Duration(seconds: 10),
    filter: (event) => event.participant == _room.localParticipant,
  );

  // Emit the started event
  _room.events.emit(PreConnectAudioBufferStartedEvent(
    sampleRate: _requestSampleRate,
    timeout: timeout,
  ));
}