Module livekit.plugins.xai.realtime
Sub-modules
livekit.plugins.xai.realtime.realtime_modellivekit.plugins.xai.realtime.types
Classes
class FileSearch (vector_store_ids: list[str] = <factory>,
max_num_results: int | None = None)-
Expand source code
@dataclass(slots=True) class FileSearch(XAITool): """Enable file search tool for searching uploaded document collections.""" vector_store_ids: list[str] = field(default_factory=list) max_num_results: int | None = None def to_dict(self) -> dict[str, Any]: result: dict[str, Any] = { "type": "file_search", "vector_store_ids": self.vector_store_ids, } if self.max_num_results is not None: result["max_num_results"] = self.max_num_results return resultEnable file search tool for searching uploaded document collections.
Ancestors
- XAITool
- livekit.agents.llm.tool_context.ProviderTool
- livekit.agents.llm.tool_context.Tool
- abc.ABC
Instance variables
var max_num_results : int | None-
Expand source code
@dataclass(slots=True) class FileSearch(XAITool): """Enable file search tool for searching uploaded document collections.""" vector_store_ids: list[str] = field(default_factory=list) max_num_results: int | None = None def to_dict(self) -> dict[str, Any]: result: dict[str, Any] = { "type": "file_search", "vector_store_ids": self.vector_store_ids, } if self.max_num_results is not None: result["max_num_results"] = self.max_num_results return result var vector_store_ids : list[str]-
Expand source code
@dataclass(slots=True) class FileSearch(XAITool): """Enable file search tool for searching uploaded document collections.""" vector_store_ids: list[str] = field(default_factory=list) max_num_results: int | None = None def to_dict(self) -> dict[str, Any]: result: dict[str, Any] = { "type": "file_search", "vector_store_ids": self.vector_store_ids, } if self.max_num_results is not None: result["max_num_results"] = self.max_num_results return result
Methods
def to_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def to_dict(self) -> dict[str, Any]: result: dict[str, Any] = { "type": "file_search", "vector_store_ids": self.vector_store_ids, } if self.max_num_results is not None: result["max_num_results"] = self.max_num_results return result
class RealtimeModel (*,
voice: Literal['Ara', 'Eve', 'Leo', 'Rex', 'Sal'] | str | livekit.agents.types.NotGiven | None = 'Ara',
api_key: str | None = None,
base_url: str | livekit.agents.types.NotGiven = NOT_GIVEN,
turn_detection: openai.types.beta.realtime.session.TurnDetection | livekit.agents.types.NotGiven | None = NOT_GIVEN,
http_session: aiohttp.client.ClientSession | None = None,
max_session_duration: float | livekit.agents.types.NotGiven | None = NOT_GIVEN,
conn_options: livekit.agents.types.APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0))-
Expand source code
class RealtimeModel(openai.realtime.RealtimeModel): def __init__( self, *, voice: NotGivenOr[GrokVoices | str | None] = "Ara", api_key: str | None = None, base_url: NotGivenOr[str] = NOT_GIVEN, turn_detection: NotGivenOr[TurnDetection | None] = NOT_GIVEN, http_session: aiohttp.ClientSession | None = None, max_session_duration: NotGivenOr[float | None] = NOT_GIVEN, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, ) -> None: api_key = api_key or os.environ.get("XAI_API_KEY") if api_key is None: raise ValueError( "The api_key client option must be set either by passing api_key " "to the client or by setting the XAI_API_KEY environment variable" ) resolved_voice = voice if is_given(voice) else "Ara" super().__init__( base_url=base_url if is_given(base_url) else XAI_BASE_URL, model="grok-4-1-fast-non-reasoning", voice=resolved_voice, # type: ignore[arg-type] api_key=api_key, modalities=["audio"], turn_detection=turn_detection if is_given(turn_detection) else XAI_DEFAULT_TURN_DETECTION, http_session=http_session if is_given(http_session) else None, max_session_duration=max_session_duration if is_given(max_session_duration) else None, conn_options=conn_options, ) def session(self) -> "RealtimeSession": sess = RealtimeSession(self) self._sessions.add(sess) return sessInitialize a Realtime model client for OpenAI or Azure OpenAI.
Args
model:str- Realtime model name, e.g., "gpt-realtime".
voice:str- Voice used for audio responses. Defaults to "marin".
- modalities (list[Literal["text", "audio"]] | NotGiven): Modalities to enable. Defaults to ["text", "audio"] if not provided.
tool_choice:llm.ToolChoice | None | NotGiven- Tool selection policy for responses.
base_url:str | NotGiven- HTTP base URL of the OpenAI/Azure API. If not provided, uses OPENAI_BASE_URL for OpenAI; for Azure, constructed from AZURE_OPENAI_ENDPOINT.
input_audio_transcription:AudioTranscription | None | NotGiven- Options for transcribing input audio.
input_audio_noise_reduction:NoiseReductionType | NoiseReduction | InputAudioNoiseReduction | None | NotGiven- Input audio noise reduction settings.
turn_detection:RealtimeAudioInputTurnDetection | None | NotGiven- Server-side turn-detection options.
speed:float | NotGiven- Audio playback speed multiplier.
tracing:Tracing | None | NotGiven- Tracing configuration for OpenAI Realtime.
api_key:str | None- OpenAI API key. If None and not using Azure, read from OPENAI_API_KEY.
http_session:aiohttp.ClientSession | None- Optional shared HTTP session.
azure_deployment:str | None- Azure deployment name. Presence of any Azure-specific option enables Azure mode.
entra_token:str | None- Azure Entra token auth (alternative to api_key).
api_version:str | None- Azure OpenAI API version appended as query parameter.
max_session_duration:float | None | NotGiven- Seconds before recycling the connection.
conn_options:APIConnectOptions- Retry/backoff and connection settings.
temperature:float | NotGiven- Deprecated; ignored by Realtime v1.
Raises
ValueError- If OPENAI_API_KEY is missing in non-Azure mode, or if Azure endpoint cannot be determined when in Azure mode.
Examples
Basic OpenAI usage:
from livekit.plugins.openai.realtime import RealtimeModel from openai.types import realtime model = RealtimeModel( voice="marin", modalities=["audio"], input_audio_transcription=realtime.AudioTranscription( model="gpt-4o-transcribe", ), input_audio_noise_reduction="near_field", turn_detection=realtime.realtime_audio_input_turn_detection.SemanticVad( type="semantic_vad", create_response=True, eagerness="auto", interrupt_response=True, ), ) session = AgentSession(llm=model)Ancestors
- livekit.plugins.openai.realtime.realtime_model.RealtimeModel
- livekit.agents.llm.realtime.RealtimeModel
Methods
def session(self) ‑> RealtimeSession-
Expand source code
def session(self) -> "RealtimeSession": sess = RealtimeSession(self) self._sessions.add(sess) return sess
class RealtimeSession (realtime_model: RealtimeModel)-
Expand source code
class RealtimeSession( llm.RealtimeSession[Literal["openai_server_event_received", "openai_client_event_queued"]] ): """ A session for the OpenAI Realtime API. This class is used to interact with the OpenAI Realtime API. It is responsible for sending events to the OpenAI Realtime API and receiving events from it. It exposes two more events: - openai_server_event_received: expose the raw server events from the OpenAI Realtime API - openai_client_event_queued: expose the raw client events sent to the OpenAI Realtime API """ def __init__(self, realtime_model: RealtimeModel) -> None: super().__init__(realtime_model) self._realtime_model: RealtimeModel = realtime_model self._tools = llm.ToolContext.empty() self._msg_ch = utils.aio.Chan[Union[RealtimeClientEvent, dict[str, Any]]]() self._input_resampler: rtc.AudioResampler | None = None self._instructions: str | None = None self._main_atask = asyncio.create_task(self._main_task(), name="RealtimeSession._main_task") self.send_event(self._create_session_update_event()) self._response_created_futures: dict[str, asyncio.Future[llm.GenerationCreatedEvent]] = {} self._item_delete_future: dict[str, asyncio.Future] = {} self._item_create_future: dict[str, asyncio.Future] = {} self._current_generation: _ResponseGeneration | None = None self._remote_chat_ctx = llm.remote_chat_context.RemoteChatContext() self._update_chat_ctx_lock = asyncio.Lock() self._update_fnc_ctx_lock = asyncio.Lock() # 100ms chunks self._bstream = utils.audio.AudioByteStream( SAMPLE_RATE, NUM_CHANNELS, samples_per_channel=SAMPLE_RATE // 10 ) self._pushed_duration_s: float = 0 # duration of audio pushed to the OpenAI Realtime API def send_event(self, event: RealtimeClientEvent | dict[str, Any]) -> None: with contextlib.suppress(utils.aio.channel.ChanClosed): self._msg_ch.send_nowait(event) @utils.log_exceptions(logger=logger) async def _main_task(self) -> None: num_retries: int = 0 max_retries = self._realtime_model._opts.conn_options.max_retry async def _reconnect() -> None: logger.debug( "reconnecting to OpenAI Realtime API", extra={"max_session_duration": self._realtime_model._opts.max_session_duration}, ) events: list[RealtimeClientEvent | dict[str, Any]] = [] # options and instructions events.append(self._create_session_update_event()) # tools tools = self._tools.flatten() if tools: events.append(self._create_tools_update_event(tools)) # chat context chat_ctx = self.chat_ctx.copy( exclude_function_call=True, exclude_instructions=True, exclude_empty_message=True, exclude_handoff=True, ) old_chat_ctx = self._remote_chat_ctx self._remote_chat_ctx = llm.remote_chat_context.RemoteChatContext() events.extend(self._create_update_chat_ctx_events(chat_ctx)) try: for ev in events: # certain events could already be in dict format if isinstance(ev, BaseModel): ev = ev.model_dump( by_alias=True, exclude_unset=True, exclude_defaults=False ) self.emit("openai_client_event_queued", ev) await ws_conn.send_str(json.dumps(ev)) except Exception as e: self._remote_chat_ctx = old_chat_ctx # restore the old chat context raise APIConnectionError( message=( "Failed to send message to OpenAI Realtime API during session re-connection" ), ) from e logger.debug("reconnected to OpenAI Realtime API") self.emit("session_reconnected", llm.RealtimeSessionReconnectedEvent()) reconnecting = False while not self._msg_ch.closed: try: ws_conn = await self._create_ws_conn() if reconnecting: await _reconnect() num_retries = 0 # reset the retry counter await self._run_ws(ws_conn) except APIError as e: if max_retries == 0 or not e.retryable: self._emit_error(e, recoverable=False) raise elif num_retries == max_retries: self._emit_error(e, recoverable=False) raise APIConnectionError( f"OpenAI Realtime API connection failed after {num_retries} attempts", ) from e else: self._emit_error(e, recoverable=True) retry_interval = self._realtime_model._opts.conn_options._interval_for_retry( num_retries ) logger.warning( f"OpenAI Realtime API connection failed, retrying in {retry_interval}s", exc_info=e, extra={"attempt": num_retries, "max_retries": max_retries}, ) await asyncio.sleep(retry_interval) num_retries += 1 except Exception as e: self._emit_error(e, recoverable=False) raise reconnecting = True async def _create_ws_conn(self) -> aiohttp.ClientWebSocketResponse: headers = {"User-Agent": "LiveKit Agents"} if self._realtime_model._opts.is_azure: if self._realtime_model._opts.entra_token: headers["Authorization"] = f"Bearer {self._realtime_model._opts.entra_token}" if self._realtime_model._opts.api_key: headers["api-key"] = self._realtime_model._opts.api_key else: headers["Authorization"] = f"Bearer {self._realtime_model._opts.api_key}" url = process_base_url( self._realtime_model._opts.base_url, self._realtime_model._opts.model, is_azure=self._realtime_model._opts.is_azure, api_version=self._realtime_model._opts.api_version, azure_deployment=self._realtime_model._opts.azure_deployment, ) if lk_oai_debug: logger.debug(f"connecting to Realtime API: {url}") try: return await asyncio.wait_for( self._realtime_model._ensure_http_session().ws_connect(url=url, headers=headers), self._realtime_model._opts.conn_options.timeout, ) except aiohttp.ClientError as e: raise APIConnectionError("OpenAI Realtime API client connection error") from e except asyncio.TimeoutError as e: raise APIConnectionError( message="OpenAI Realtime API connection timed out", ) from e async def _run_ws(self, ws_conn: aiohttp.ClientWebSocketResponse) -> None: closing = False @utils.log_exceptions(logger=logger) async def _send_task() -> None: nonlocal closing async for msg in self._msg_ch: try: if isinstance(msg, BaseModel): msg = msg.model_dump( by_alias=True, exclude_unset=True, exclude_defaults=False ) self.emit("openai_client_event_queued", msg) await ws_conn.send_str(json.dumps(msg)) if lk_oai_debug: msg_copy = msg.copy() if msg_copy["type"] == "input_audio_buffer.append": msg_copy = {**msg_copy, "audio": "..."} logger.debug(f">>> {msg_copy}") except Exception: logger.exception("failed to send event") closing = True await ws_conn.close() @utils.log_exceptions(logger=logger) async def _recv_task() -> None: while True: msg = await ws_conn.receive() if msg.type in ( aiohttp.WSMsgType.CLOSED, aiohttp.WSMsgType.CLOSE, aiohttp.WSMsgType.CLOSING, ): if closing: # closing is expected, see _send_task return # this will trigger a reconnection raise APIConnectionError(message="OpenAI S2S connection closed unexpectedly") if msg.type != aiohttp.WSMsgType.TEXT: continue event = json.loads(msg.data) # emit the raw json dictionary instead of the BaseModel because different # providers can have different event types that are not part of the OpenAI Realtime API # noqa: E501 self.emit("openai_server_event_received", event) try: if lk_oai_debug: event_copy = event.copy() if event_copy["type"] == "response.output_audio.delta": event_copy = {**event_copy, "delta": "..."} logger.debug(f"<<< {event_copy}") if event["type"] == "input_audio_buffer.speech_started": self._handle_input_audio_buffer_speech_started( InputAudioBufferSpeechStartedEvent.construct(**event) ) elif event["type"] == "input_audio_buffer.speech_stopped": self._handle_input_audio_buffer_speech_stopped( InputAudioBufferSpeechStoppedEvent.construct(**event) ) elif event["type"] == "response.created": self._handle_response_created(ResponseCreatedEvent.construct(**event)) elif event["type"] == "response.output_item.added": self._handle_response_output_item_added( ResponseOutputItemAddedEvent.construct(**event) ) elif event["type"] == "response.content_part.added": self._handle_response_content_part_added( ResponseContentPartAddedEvent.construct(**event) ) elif event["type"] == "conversation.item.added": self._handle_conversion_item_added(ConversationItemAdded.construct(**event)) elif event["type"] == "conversation.item.deleted": self._handle_conversion_item_deleted( ConversationItemDeletedEvent.construct(**event) ) elif event["type"] == "conversation.item.input_audio_transcription.delta": # currently incoming transcripts are transcribed only after the user stops speaking # it's not very useful to emit these as the transcribe process takes place within ~100ms # when they handle streaming transcriptions, we'll handle it then. pass elif event["type"] == "conversation.item.input_audio_transcription.completed": self._handle_conversion_item_input_audio_transcription_completed( ConversationItemInputAudioTranscriptionCompletedEvent.construct(**event) ) elif event["type"] == "conversation.item.input_audio_transcription.failed": self._handle_conversion_item_input_audio_transcription_failed( ConversationItemInputAudioTranscriptionFailedEvent.construct(**event) ) elif event["type"] == "response.output_text.delta": self._handle_response_text_delta(ResponseTextDeltaEvent.construct(**event)) elif event["type"] == "response.output_text.done": self._handle_response_text_done(ResponseTextDoneEvent.construct(**event)) elif event["type"] == "response.output_audio_transcript.delta": self._handle_response_audio_transcript_delta(event) elif event["type"] == "response.output_audio.delta": self._handle_response_audio_delta( ResponseAudioDeltaEvent.construct(**event) ) elif event["type"] == "response.output_audio_transcript.done": self._handle_response_audio_transcript_done( ResponseAudioTranscriptDoneEvent.construct(**event) ) elif event["type"] == "response.output_audio.done": self._handle_response_audio_done(ResponseAudioDoneEvent.construct(**event)) elif event["type"] == "response.output_item.done": self._handle_response_output_item_done( ResponseOutputItemDoneEvent.construct(**event) ) elif event["type"] == "response.done": self._handle_response_done(ResponseDoneEvent.construct(**event)) elif event["type"] == "error": self._handle_error(RealtimeErrorEvent.construct(**event)) elif lk_oai_debug: logger.debug(f"unhandled event: {event['type']}", extra={"event": event}) except Exception: if event["type"] == "response.output_audio.delta": event["delta"] = event["delta"][:10] + "..." logger.exception("failed to handle event", extra={"event": event}) tasks = [ asyncio.create_task(_recv_task(), name="_recv_task"), asyncio.create_task(_send_task(), name="_send_task"), ] wait_reconnect_task: asyncio.Task | None = None if self._realtime_model._opts.max_session_duration is not None: wait_reconnect_task = asyncio.create_task( asyncio.sleep(self._realtime_model._opts.max_session_duration), name="_timeout_task", ) tasks.append(wait_reconnect_task) try: done, _ = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) # propagate exceptions from completed tasks for task in done: if task != wait_reconnect_task: task.result() if wait_reconnect_task and wait_reconnect_task in done and self._current_generation: # wait for the current generation to complete before reconnecting await self._current_generation._done_fut closing = True finally: await utils.aio.cancel_and_wait(*tasks) await ws_conn.close() def _create_session_update_event(self) -> SessionUpdateEvent: audio_format = realtime.realtime_audio_formats.AudioPCM(rate=SAMPLE_RATE, type="audio/pcm") # they do not support both text and audio modalities, it'll respond in audio + transcript modality = "audio" if "audio" in self._realtime_model._opts.modalities else "text" session = RealtimeSessionCreateRequest( type="realtime", model=self._realtime_model._opts.model, output_modalities=[modality], audio=RealtimeAudioConfig( input=RealtimeAudioConfigInput( format=audio_format, noise_reduction=self._realtime_model._opts.input_audio_noise_reduction, transcription=self._realtime_model._opts.input_audio_transcription, turn_detection=self._realtime_model._opts.turn_detection, ), output=RealtimeAudioConfigOutput( format=audio_format, speed=self._realtime_model._opts.speed, voice=self._realtime_model._opts.voice, ), ), max_output_tokens=self._realtime_model._opts.max_response_output_tokens, tool_choice=to_oai_tool_choice(self._realtime_model._opts.tool_choice), tracing=self._realtime_model._opts.tracing, ) if self._instructions is not None: session.instructions = self._instructions # initial session update return SessionUpdateEvent( type="session.update", # Using model_construct since OpenAI restricts voices to those defined in the BaseModel. # noqa: E501 # Other providers support different voices, so we need to accommodate that. session=session, event_id=utils.shortuuid("session_update_"), ) @property def chat_ctx(self) -> llm.ChatContext: return self._remote_chat_ctx.to_chat_ctx() @property def tools(self) -> llm.ToolContext: return self._tools.copy() def update_options( self, *, tool_choice: NotGivenOr[llm.ToolChoice | None] = NOT_GIVEN, voice: NotGivenOr[str] = NOT_GIVEN, turn_detection: NotGivenOr[RealtimeAudioInputTurnDetection | None] = NOT_GIVEN, max_response_output_tokens: NotGivenOr[int | Literal["inf"] | None] = NOT_GIVEN, input_audio_transcription: NotGivenOr[AudioTranscription | None] = NOT_GIVEN, input_audio_noise_reduction: NotGivenOr[ NoiseReductionType | NoiseReduction | InputAudioNoiseReduction | None ] = NOT_GIVEN, speed: NotGivenOr[float] = NOT_GIVEN, tracing: NotGivenOr[Tracing | None] = NOT_GIVEN, ) -> None: session = RealtimeSessionCreateRequest( type="realtime", ) has_changes = False if is_given(tool_choice): tool_choice = cast(Optional[llm.ToolChoice], tool_choice) self._realtime_model._opts.tool_choice = tool_choice session.tool_choice = to_oai_tool_choice(tool_choice) has_changes = True if is_given(max_response_output_tokens): self._realtime_model._opts.max_response_output_tokens = max_response_output_tokens # type: ignore session.max_output_tokens = max_response_output_tokens # type: ignore has_changes = True if is_given(tracing): self._realtime_model._opts.tracing = cast(Union[Tracing, None], tracing) session.tracing = cast(Union[Tracing, None], tracing) # type: ignore has_changes = True has_audio_config = False audio_output = RealtimeAudioConfigOutput() audio_input = RealtimeAudioConfigInput() audio_config = RealtimeAudioConfig( output=audio_output, input=audio_input, ) if is_given(voice): self._realtime_model._opts.voice = voice audio_output.voice = voice has_audio_config = True if is_given(turn_detection): self._realtime_model._opts.turn_detection = turn_detection # type: ignore audio_input.turn_detection = turn_detection # type: ignore has_audio_config = True if is_given(input_audio_transcription): self._realtime_model._opts.input_audio_transcription = input_audio_transcription audio_input.transcription = input_audio_transcription has_audio_config = True if is_given(input_audio_noise_reduction): input_audio_noise_reduction = to_noise_reduction(input_audio_noise_reduction) # type: ignore self._realtime_model._opts.input_audio_noise_reduction = input_audio_noise_reduction audio_input.noise_reduction = input_audio_noise_reduction has_audio_config = True if is_given(speed): self._realtime_model._opts.speed = speed audio_output.speed = speed has_audio_config = True if has_audio_config: session.audio = audio_config has_changes = True if has_changes: self.send_event( SessionUpdateEvent( type="session.update", session=session, event_id=utils.shortuuid("options_update_"), ) ) async def update_chat_ctx(self, chat_ctx: llm.ChatContext) -> None: async with self._update_chat_ctx_lock: chat_ctx = chat_ctx.copy(exclude_handoff=True, exclude_instructions=True) events = self._create_update_chat_ctx_events(chat_ctx) futs: list[asyncio.Future[None]] = [] for ev in events: futs.append(f := asyncio.Future[None]()) if isinstance(ev, ConversationItemDeleteEvent): self._item_delete_future[ev.item_id] = f elif isinstance(ev, ConversationItemCreateEvent): assert ev.item.id is not None self._item_create_future[ev.item.id] = f self.send_event(ev) if not futs: return try: await asyncio.wait_for(asyncio.gather(*futs, return_exceptions=True), timeout=5.0) except asyncio.TimeoutError: raise llm.RealtimeError("update_chat_ctx timed out.") from None def _create_update_chat_ctx_events( self, chat_ctx: llm.ChatContext ) -> list[ConversationItemCreateEvent | ConversationItemDeleteEvent]: events: list[ConversationItemCreateEvent | ConversationItemDeleteEvent] = [] remote_ctx = self._remote_chat_ctx.to_chat_ctx() diff_ops = llm.utils.compute_chat_ctx_diff(remote_ctx, chat_ctx) def _delete_item(msg_id: str) -> None: events.append( ConversationItemDeleteEvent( type="conversation.item.delete", item_id=msg_id, event_id=utils.shortuuid("chat_ctx_delete_"), ) ) def _create_item(previous_msg_id: str | None, msg_id: str) -> None: chat_item = chat_ctx.get_by_id(msg_id) assert chat_item is not None events.append( ConversationItemCreateEvent( type="conversation.item.create", item=livekit_item_to_openai_item(chat_item), previous_item_id=("root" if previous_msg_id is None else previous_msg_id), event_id=utils.shortuuid("chat_ctx_create_"), ) ) def _is_content_empty(msg_id: str) -> bool: remote_item = remote_ctx.get_by_id(msg_id) if remote_item and remote_item.type == "message" and not remote_item.content: return True return False for msg_id in diff_ops.to_remove: # we don't have content synced down for some types of content (audio/images) # these won't be present in the Agent's view of the context # so in those cases, we do not want to remove them from the server context if _is_content_empty(msg_id): continue _delete_item(msg_id) for previous_msg_id, msg_id in diff_ops.to_create: _create_item(previous_msg_id, msg_id) # update the items with the same id but different content for previous_msg_id, msg_id in diff_ops.to_update: # likewise, empty content almost always means the content is not synced down # we don't want to recreate these items there if _is_content_empty(msg_id): continue _delete_item(msg_id) _create_item(previous_msg_id, msg_id) return events async def update_tools(self, tools: list[llm.Tool]) -> None: async with self._update_fnc_ctx_lock: ev = self._create_tools_update_event(tools) self.send_event(ev) retained_tool_names: set[str] = set() for t in ev["session"]["tools"]: if name := t.get("name"): retained_tool_names.add(name) # TODO(dz): handle MCP tools retained_tools = [ tool for tool in tools if ( isinstance(tool, (llm.FunctionTool, llm.RawFunctionTool)) and tool.info.name in retained_tool_names ) or isinstance(tool, llm.ProviderTool) ] self._tools = llm.ToolContext(retained_tools) # this function can be overrided def _create_tools_update_event(self, tools: list[llm.Tool]) -> dict[str, Any]: oai_tools: list[RealtimeFunctionTool] = [] for tool in tools: if isinstance(tool, llm.FunctionTool): tool_desc = llm.utils.build_legacy_openai_schema(tool, internally_tagged=True) elif isinstance(tool, llm.RawFunctionTool): tool_desc = tool.info.raw_schema tool_desc.pop("meta", None) # meta is not supported by OpenAI Realtime API tool_desc["type"] = "function" # internally tagged elif isinstance(tool, llm.ProviderTool): continue # currently only xAI supports ProviderTools else: logger.error( "OpenAI Realtime API doesn't support this tool type", extra={"tool": tool} ) continue try: session_tool = RealtimeFunctionTool.model_validate(tool_desc) oai_tools.append(session_tool) except ValidationError: logger.error( "OpenAI Realtime API doesn't support this tool", extra={"tool": tool_desc}, ) continue event = SessionUpdateEvent( type="session.update", session=RealtimeSessionCreateRequest.model_construct( type="realtime", model=self._realtime_model._opts.model, tools=oai_tools, # type: ignore ), event_id=utils.shortuuid("tools_update_"), ) event_dict = event.model_dump(by_alias=True, exclude_unset=True, exclude_defaults=False) return event_dict async def update_instructions(self, instructions: str) -> None: event_id = utils.shortuuid("instructions_update_") self.send_event( SessionUpdateEvent( type="session.update", session=RealtimeSessionCreateRequest.model_construct( type="realtime", instructions=instructions, ), event_id=event_id, ) ) self._instructions = instructions def push_audio(self, frame: rtc.AudioFrame) -> None: for f in self._resample_audio(frame): data = f.data.tobytes() for nf in self._bstream.write(data): self.send_event( InputAudioBufferAppendEvent( type="input_audio_buffer.append", audio=base64.b64encode(nf.data).decode("utf-8"), ) ) self._pushed_duration_s += nf.duration def push_video(self, frame: rtc.VideoFrame) -> None: message = llm.ChatMessage( role="user", content=[llm.ImageContent(image=frame)], ) oai_item = livekit_item_to_openai_item(message) self.send_event( ConversationItemCreateEvent( type="conversation.item.create", item=oai_item, event_id=utils.shortuuid("video_"), ) ) def commit_audio(self) -> None: if self._pushed_duration_s > 0.1: # OpenAI requires at least 100ms of audio self.send_event(InputAudioBufferCommitEvent(type="input_audio_buffer.commit")) self._pushed_duration_s = 0 def clear_audio(self) -> None: self.send_event(InputAudioBufferClearEvent(type="input_audio_buffer.clear")) self._pushed_duration_s = 0 def generate_reply( self, *, instructions: NotGivenOr[str] = NOT_GIVEN ) -> asyncio.Future[llm.GenerationCreatedEvent]: event_id = utils.shortuuid("response_create_") fut = asyncio.Future[llm.GenerationCreatedEvent]() self._response_created_futures[event_id] = fut self.send_event( ResponseCreateEvent( type="response.create", event_id=event_id, response=RealtimeResponseCreateParams( instructions=instructions or None, metadata={"client_event_id": event_id}, ), ) ) def _on_timeout() -> None: if fut and not fut.done(): fut.set_exception(llm.RealtimeError("generate_reply timed out.")) handle = asyncio.get_event_loop().call_later(5.0, _on_timeout) fut.add_done_callback(lambda _: handle.cancel()) return fut def interrupt(self) -> None: self.send_event(ResponseCancelEvent(type="response.cancel")) def truncate( self, *, message_id: str, modalities: list[Literal["text", "audio"]], audio_end_ms: int, audio_transcript: NotGivenOr[str] = NOT_GIVEN, ) -> None: if "audio" in modalities: self.send_event( ConversationItemTruncateEvent( type="conversation.item.truncate", content_index=0, item_id=message_id, audio_end_ms=audio_end_ms, ) ) elif utils.is_given(audio_transcript): # sync the forwarded text to the remote chat ctx chat_ctx = self.chat_ctx.copy( exclude_handoff=True, ) if (idx := chat_ctx.index_by_id(message_id)) is not None: new_item = copy.copy(chat_ctx.items[idx]) assert new_item.type == "message" new_item.content = [audio_transcript] chat_ctx.items[idx] = new_item events = self._create_update_chat_ctx_events(chat_ctx) for ev in events: self.send_event(ev) async def aclose(self) -> None: self._msg_ch.close() await self._main_atask def _resample_audio(self, frame: rtc.AudioFrame) -> Iterator[rtc.AudioFrame]: if self._input_resampler: if frame.sample_rate != self._input_resampler._input_rate: # input audio changed to a different sample rate self._input_resampler = None if self._input_resampler is None and ( frame.sample_rate != SAMPLE_RATE or frame.num_channels != NUM_CHANNELS ): self._input_resampler = rtc.AudioResampler( input_rate=frame.sample_rate, output_rate=SAMPLE_RATE, num_channels=NUM_CHANNELS, ) if self._input_resampler: # TODO(long): flush the resampler when the input source is changed yield from self._input_resampler.push(frame) else: yield frame def _handle_input_audio_buffer_speech_started( self, _: InputAudioBufferSpeechStartedEvent ) -> None: self.emit("input_speech_started", llm.InputSpeechStartedEvent()) def _handle_input_audio_buffer_speech_stopped( self, _: InputAudioBufferSpeechStoppedEvent ) -> None: user_transcription_enabled = ( self._realtime_model._opts.input_audio_transcription is not None ) self.emit( "input_speech_stopped", llm.InputSpeechStoppedEvent(user_transcription_enabled=user_transcription_enabled), ) def _handle_response_created(self, event: ResponseCreatedEvent) -> None: assert event.response.id is not None, "response.id is None" self._current_generation = _ResponseGeneration( message_ch=utils.aio.Chan(), function_ch=utils.aio.Chan(), messages={}, _created_timestamp=time.time(), _done_fut=asyncio.Future(), ) generation_ev = llm.GenerationCreatedEvent( message_stream=self._current_generation.message_ch, function_stream=self._current_generation.function_ch, user_initiated=False, response_id=event.response.id, ) if ( isinstance(event.response.metadata, dict) and (client_event_id := event.response.metadata.get("client_event_id")) and (fut := self._response_created_futures.pop(client_event_id, None)) ): if not fut.done(): generation_ev.user_initiated = True fut.set_result(generation_ev) else: logger.warning("response of generate_reply received after it's timed out.") self.emit("generation_created", generation_ev) def _handle_response_output_item_added(self, event: ResponseOutputItemAddedEvent) -> None: assert self._current_generation is not None, "current_generation is None" assert (item_id := event.item.id) is not None, "item.id is None" assert (item_type := event.item.type) is not None, "item.type is None" if item_type == "message": item_generation = _MessageGeneration( message_id=item_id, text_ch=utils.aio.Chan(), audio_ch=utils.aio.Chan(), modalities=asyncio.Future(), ) if not self._realtime_model.capabilities.audio_output: item_generation.audio_ch.close() item_generation.modalities.set_result(["text"]) self._current_generation.message_ch.send_nowait( llm.MessageGeneration( message_id=item_id, text_stream=item_generation.text_ch, audio_stream=item_generation.audio_ch, modalities=item_generation.modalities, ) ) self._current_generation.messages[item_id] = item_generation def _handle_response_content_part_added(self, event: ResponseContentPartAddedEvent) -> None: assert self._current_generation is not None, "current_generation is None" assert (item_id := event.item_id) is not None, "item_id is None" assert (item_type := event.part.type) is not None, "part.type is None" if item_type == "text" and self._realtime_model.capabilities.audio_output: logger.warning("Text response received from OpenAI Realtime API in audio modality.") with contextlib.suppress(asyncio.InvalidStateError): self._current_generation.messages[item_id].modalities.set_result( ["text"] if item_type == "text" else ["audio", "text"] ) def _handle_conversion_item_added(self, event: ConversationItemAdded) -> None: assert event.item.id is not None, "item.id is None" try: self._remote_chat_ctx.insert( event.previous_item_id, openai_item_to_livekit_item(event.item) ) except ValueError as e: logger.warning( f"failed to insert item `{event.item.id}`: {str(e)}", ) if fut := self._item_create_future.pop(event.item.id, None): fut.set_result(None) def _handle_conversion_item_deleted(self, event: ConversationItemDeletedEvent) -> None: assert event.item_id is not None, "item_id is None" try: self._remote_chat_ctx.delete(event.item_id) except ValueError as e: logger.warning( f"failed to delete item `{event.item_id}`: {str(e)}", ) if fut := self._item_delete_future.pop(event.item_id, None): fut.set_result(None) def _handle_conversion_item_input_audio_transcription_completed( self, event: ConversationItemInputAudioTranscriptionCompletedEvent ) -> None: if remote_item := self._remote_chat_ctx.get(event.item_id): assert isinstance(remote_item.item, llm.ChatMessage) remote_item.item.content.append(event.transcript) self.emit( "input_audio_transcription_completed", llm.InputTranscriptionCompleted( item_id=event.item_id, transcript=event.transcript, is_final=True, ), ) def _handle_conversion_item_input_audio_transcription_failed( self, event: ConversationItemInputAudioTranscriptionFailedEvent ) -> None: logger.error( "OpenAI Realtime API failed to transcribe input audio", extra={"error": event.error}, ) def _handle_response_text_delta(self, event: ResponseTextDeltaEvent) -> None: assert self._current_generation is not None, "current_generation is None" item_generation = self._current_generation.messages[event.item_id] if ( item_generation.audio_ch.closed and self._current_generation._first_token_timestamp is None ): # only if audio is not available self._current_generation._first_token_timestamp = time.time() item_generation.text_ch.send_nowait(event.delta) item_generation.audio_transcript += event.delta def _handle_response_text_done(self, event: ResponseTextDoneEvent) -> None: assert self._current_generation is not None, "current_generation is None" def _handle_response_audio_transcript_delta(self, event: dict[str, Any]) -> None: assert self._current_generation is not None, "current_generation is None" item_id = event["item_id"] delta = event["delta"] if (start_time := event.get("start_time")) is not None: delta = io.TimedString(delta, start_time=start_time) item_generation = self._current_generation.messages[item_id] item_generation.text_ch.send_nowait(delta) item_generation.audio_transcript += delta def _handle_response_audio_delta(self, event: ResponseAudioDeltaEvent) -> None: assert self._current_generation is not None, "current_generation is None" item_generation = self._current_generation.messages[event.item_id] if self._current_generation._first_token_timestamp is None: self._current_generation._first_token_timestamp = time.time() if not item_generation.modalities.done(): item_generation.modalities.set_result(["audio", "text"]) data = base64.b64decode(event.delta) item_generation.audio_ch.send_nowait( rtc.AudioFrame( data=data, sample_rate=SAMPLE_RATE, num_channels=NUM_CHANNELS, samples_per_channel=len(data) // 2, ) ) def _handle_response_audio_transcript_done( self, event: ResponseAudioTranscriptDoneEvent ) -> None: assert self._current_generation is not None, "current_generation is None" # also need to sync existing item's context remote_item = self._remote_chat_ctx.get(event.item_id) if remote_item and event.transcript and isinstance(remote_item.item, llm.ChatMessage): remote_item.item.content.append(event.transcript) def _handle_response_audio_done(self, _: ResponseAudioDoneEvent) -> None: assert self._current_generation is not None, "current_generation is None" def _handle_response_output_item_done(self, event: ResponseOutputItemDoneEvent) -> None: assert self._current_generation is not None, "current_generation is None" assert (item_id := event.item.id) is not None, "item.id is None" assert (item_type := event.item.type) is not None, "item.type is None" if item_type == "function_call" and isinstance( event.item, RealtimeConversationItemFunctionCall ): item = event.item assert item.call_id is not None, "call_id is None" assert item.name is not None, "name is None" assert item.arguments is not None, "arguments is None" self._current_generation.function_ch.send_nowait( llm.FunctionCall( call_id=item.call_id, name=item.name, arguments=item.arguments, ) ) elif item_type == "message": item_generation = self._current_generation.messages[item_id] item_generation.text_ch.close() item_generation.audio_ch.close() if not item_generation.modalities.done(): # in case message modalities is not set, this shouldn't happen item_generation.modalities.set_result(self._realtime_model._opts.modalities) def _handle_response_done(self, event: ResponseDoneEvent) -> None: if self._current_generation is None: return # OpenAI has a race condition where we could receive response.done without any previous response.created (This happens generally during interruption) # noqa: E501 assert self._current_generation is not None, "current_generation is None" created_timestamp = self._current_generation._created_timestamp first_token_timestamp = self._current_generation._first_token_timestamp for generation in self._current_generation.messages.values(): # close all messages that haven't been closed yet if not generation.text_ch.closed: generation.text_ch.close() if not generation.audio_ch.closed: generation.audio_ch.close() if not generation.modalities.done(): generation.modalities.set_result(self._realtime_model._opts.modalities) self._current_generation.function_ch.close() self._current_generation.message_ch.close() for item_id, item_generation in self._current_generation.messages.items(): if (remote_item := self._remote_chat_ctx.get(item_id)) and isinstance( remote_item.item, llm.ChatMessage ): remote_item.item.content.append(item_generation.audio_transcript) with contextlib.suppress(asyncio.InvalidStateError): self._current_generation._done_fut.set_result(None) self._current_generation = None # calculate metrics usage = ( event.response.usage.model_dump(exclude_defaults=True) if event.response.usage else {} ) ttft = first_token_timestamp - created_timestamp if first_token_timestamp else -1 duration = time.time() - created_timestamp metrics = RealtimeModelMetrics( timestamp=created_timestamp, request_id=event.response.id or "", ttft=ttft, duration=duration, cancelled=event.response.status == "cancelled", label=self._realtime_model.label, input_tokens=usage.get("input_tokens", 0), output_tokens=usage.get("output_tokens", 0), total_tokens=usage.get("total_tokens", 0), tokens_per_second=usage.get("output_tokens", 0) / duration if duration > 0 else 0, input_token_details=RealtimeModelMetrics.InputTokenDetails( audio_tokens=usage.get("input_token_details", {}).get("audio_tokens", 0), cached_tokens=usage.get("input_token_details", {}).get("cached_tokens", 0), text_tokens=usage.get("input_token_details", {}).get("text_tokens", 0), cached_tokens_details=RealtimeModelMetrics.CachedTokenDetails( text_tokens=usage.get("input_token_details", {}) .get("cached_tokens_details", {}) .get("text_tokens", 0), audio_tokens=usage.get("input_token_details", {}) .get("cached_tokens_details", {}) .get("audio_tokens", 0), image_tokens=usage.get("input_token_details", {}) .get("cached_tokens_details", {}) .get("image_tokens", 0), ), image_tokens=usage.get("input_token_details", {}).get("image_tokens", 0), ), output_token_details=RealtimeModelMetrics.OutputTokenDetails( text_tokens=usage.get("output_token_details", {}).get("text_tokens", 0), audio_tokens=usage.get("output_token_details", {}).get("audio_tokens", 0), image_tokens=usage.get("output_token_details", {}).get("image_tokens", 0), ), metadata=Metadata( model_name=self._realtime_model.model, model_provider=self._realtime_model.provider ), ) self.emit("metrics_collected", metrics) self._handle_response_done_but_not_complete(event) def _handle_response_done_but_not_complete(self, event: ResponseDoneEvent) -> None: """Handle response done but not complete, i.e. cancelled, incomplete or failed. For example this method will emit an error if we receive a "failed" status, e.g. with type "invalid_request_error" due to code "inference_rate_limit_exceeded". In other failures it will emit a debug level log. """ if event.response.status == "completed": return if event.response.status == "failed": if event.response.status_details and hasattr(event.response.status_details, "error"): error_type = getattr(event.response.status_details.error, "type", "unknown") error_body = event.response.status_details.error message = f"OpenAI Realtime API response failed with error type: {error_type}" else: error_body = None message = "OpenAI Realtime API response failed with unknown error" self._emit_error( APIError( message=message, body=error_body, retryable=True, ), # all possible faulures undocumented by openai, # so we assume optimistically all retryable/recoverable recoverable=True, ) elif event.response.status in {"cancelled", "incomplete"}: logger.debug( "OpenAI Realtime API response done but not complete with status: %s", event.response.status, extra={ "event_id": event.response.id, "event_response_status": event.response.status, }, ) else: logger.debug("Unknown response status: %s", event.response.status) def _handle_error(self, event: RealtimeErrorEvent) -> None: if event.error.message.startswith("Cancellation failed"): return logger.error( "OpenAI Realtime API returned an error", extra={"error": event.error}, ) self._emit_error( APIError( message="OpenAI Realtime API returned an error", body=event.error, retryable=True, ), recoverable=True, ) # TODO: set exception for the response future if it exists def _emit_error(self, error: Exception, recoverable: bool) -> None: self.emit( "error", llm.RealtimeModelError( timestamp=time.time(), label=self._realtime_model._label, error=error, recoverable=recoverable, ), )A session for the OpenAI Realtime API.
This class is used to interact with the OpenAI Realtime API. It is responsible for sending events to the OpenAI Realtime API and receiving events from it.
It exposes two more events: - openai_server_event_received: expose the raw server events from the OpenAI Realtime API - openai_client_event_queued: expose the raw client events sent to the OpenAI Realtime API
Ancestors
- livekit.agents.llm.realtime.RealtimeSession
- abc.ABC
- EventEmitter
- typing.Generic
Subclasses
Instance variables
prop chat_ctx : llm.ChatContext-
Expand source code
@property def chat_ctx(self) -> llm.ChatContext: return self._remote_chat_ctx.to_chat_ctx() prop tools : llm.ToolContext-
Expand source code
@property def tools(self) -> llm.ToolContext: return self._tools.copy()
Methods
async def aclose(self) ‑> None-
Expand source code
async def aclose(self) -> None: self._msg_ch.close() await self._main_atask def clear_audio(self) ‑> None-
Expand source code
def clear_audio(self) -> None: self.send_event(InputAudioBufferClearEvent(type="input_audio_buffer.clear")) self._pushed_duration_s = 0 def commit_audio(self) ‑> None-
Expand source code
def commit_audio(self) -> None: if self._pushed_duration_s > 0.1: # OpenAI requires at least 100ms of audio self.send_event(InputAudioBufferCommitEvent(type="input_audio_buffer.commit")) self._pushed_duration_s = 0 def generate_reply(self, *, instructions: NotGivenOr[str] = NOT_GIVEN) ‑> _asyncio.Future[livekit.agents.llm.realtime.GenerationCreatedEvent]-
Expand source code
def generate_reply( self, *, instructions: NotGivenOr[str] = NOT_GIVEN ) -> asyncio.Future[llm.GenerationCreatedEvent]: event_id = utils.shortuuid("response_create_") fut = asyncio.Future[llm.GenerationCreatedEvent]() self._response_created_futures[event_id] = fut self.send_event( ResponseCreateEvent( type="response.create", event_id=event_id, response=RealtimeResponseCreateParams( instructions=instructions or None, metadata={"client_event_id": event_id}, ), ) ) def _on_timeout() -> None: if fut and not fut.done(): fut.set_exception(llm.RealtimeError("generate_reply timed out.")) handle = asyncio.get_event_loop().call_later(5.0, _on_timeout) fut.add_done_callback(lambda _: handle.cancel()) return fut def interrupt(self) ‑> None-
Expand source code
def interrupt(self) -> None: self.send_event(ResponseCancelEvent(type="response.cancel")) def push_audio(self, frame: rtc.AudioFrame) ‑> None-
Expand source code
def push_audio(self, frame: rtc.AudioFrame) -> None: for f in self._resample_audio(frame): data = f.data.tobytes() for nf in self._bstream.write(data): self.send_event( InputAudioBufferAppendEvent( type="input_audio_buffer.append", audio=base64.b64encode(nf.data).decode("utf-8"), ) ) self._pushed_duration_s += nf.duration def push_video(self, frame: rtc.VideoFrame) ‑> None-
Expand source code
def push_video(self, frame: rtc.VideoFrame) -> None: message = llm.ChatMessage( role="user", content=[llm.ImageContent(image=frame)], ) oai_item = livekit_item_to_openai_item(message) self.send_event( ConversationItemCreateEvent( type="conversation.item.create", item=oai_item, event_id=utils.shortuuid("video_"), ) ) def send_event(self, event: RealtimeClientEvent | dict[str, Any]) ‑> None-
Expand source code
def send_event(self, event: RealtimeClientEvent | dict[str, Any]) -> None: with contextlib.suppress(utils.aio.channel.ChanClosed): self._msg_ch.send_nowait(event) def truncate(self,
*,
message_id: str,
modalities: "list[Literal['text', 'audio']]",
audio_end_ms: int,
audio_transcript: NotGivenOr[str] = NOT_GIVEN) ‑> None-
Expand source code
def truncate( self, *, message_id: str, modalities: list[Literal["text", "audio"]], audio_end_ms: int, audio_transcript: NotGivenOr[str] = NOT_GIVEN, ) -> None: if "audio" in modalities: self.send_event( ConversationItemTruncateEvent( type="conversation.item.truncate", content_index=0, item_id=message_id, audio_end_ms=audio_end_ms, ) ) elif utils.is_given(audio_transcript): # sync the forwarded text to the remote chat ctx chat_ctx = self.chat_ctx.copy( exclude_handoff=True, ) if (idx := chat_ctx.index_by_id(message_id)) is not None: new_item = copy.copy(chat_ctx.items[idx]) assert new_item.type == "message" new_item.content = [audio_transcript] chat_ctx.items[idx] = new_item events = self._create_update_chat_ctx_events(chat_ctx) for ev in events: self.send_event(ev) async def update_chat_ctx(self, chat_ctx: llm.ChatContext) ‑> None-
Expand source code
async def update_chat_ctx(self, chat_ctx: llm.ChatContext) -> None: async with self._update_chat_ctx_lock: chat_ctx = chat_ctx.copy(exclude_handoff=True, exclude_instructions=True) events = self._create_update_chat_ctx_events(chat_ctx) futs: list[asyncio.Future[None]] = [] for ev in events: futs.append(f := asyncio.Future[None]()) if isinstance(ev, ConversationItemDeleteEvent): self._item_delete_future[ev.item_id] = f elif isinstance(ev, ConversationItemCreateEvent): assert ev.item.id is not None self._item_create_future[ev.item.id] = f self.send_event(ev) if not futs: return try: await asyncio.wait_for(asyncio.gather(*futs, return_exceptions=True), timeout=5.0) except asyncio.TimeoutError: raise llm.RealtimeError("update_chat_ctx timed out.") from None async def update_instructions(self, instructions: str) ‑> None-
Expand source code
async def update_instructions(self, instructions: str) -> None: event_id = utils.shortuuid("instructions_update_") self.send_event( SessionUpdateEvent( type="session.update", session=RealtimeSessionCreateRequest.model_construct( type="realtime", instructions=instructions, ), event_id=event_id, ) ) self._instructions = instructions def update_options(self,
*,
tool_choice: NotGivenOr[llm.ToolChoice | None] = NOT_GIVEN,
voice: NotGivenOr[str] = NOT_GIVEN,
turn_detection: NotGivenOr[RealtimeAudioInputTurnDetection | None] = NOT_GIVEN,
max_response_output_tokens: "NotGivenOr[int | Literal['inf'] | None]" = NOT_GIVEN,
input_audio_transcription: NotGivenOr[AudioTranscription | None] = NOT_GIVEN,
input_audio_noise_reduction: NotGivenOr[NoiseReductionType | NoiseReduction | InputAudioNoiseReduction | None] = NOT_GIVEN,
speed: NotGivenOr[float] = NOT_GIVEN,
tracing: NotGivenOr[Tracing | None] = NOT_GIVEN) ‑> None-
Expand source code
def update_options( self, *, tool_choice: NotGivenOr[llm.ToolChoice | None] = NOT_GIVEN, voice: NotGivenOr[str] = NOT_GIVEN, turn_detection: NotGivenOr[RealtimeAudioInputTurnDetection | None] = NOT_GIVEN, max_response_output_tokens: NotGivenOr[int | Literal["inf"] | None] = NOT_GIVEN, input_audio_transcription: NotGivenOr[AudioTranscription | None] = NOT_GIVEN, input_audio_noise_reduction: NotGivenOr[ NoiseReductionType | NoiseReduction | InputAudioNoiseReduction | None ] = NOT_GIVEN, speed: NotGivenOr[float] = NOT_GIVEN, tracing: NotGivenOr[Tracing | None] = NOT_GIVEN, ) -> None: session = RealtimeSessionCreateRequest( type="realtime", ) has_changes = False if is_given(tool_choice): tool_choice = cast(Optional[llm.ToolChoice], tool_choice) self._realtime_model._opts.tool_choice = tool_choice session.tool_choice = to_oai_tool_choice(tool_choice) has_changes = True if is_given(max_response_output_tokens): self._realtime_model._opts.max_response_output_tokens = max_response_output_tokens # type: ignore session.max_output_tokens = max_response_output_tokens # type: ignore has_changes = True if is_given(tracing): self._realtime_model._opts.tracing = cast(Union[Tracing, None], tracing) session.tracing = cast(Union[Tracing, None], tracing) # type: ignore has_changes = True has_audio_config = False audio_output = RealtimeAudioConfigOutput() audio_input = RealtimeAudioConfigInput() audio_config = RealtimeAudioConfig( output=audio_output, input=audio_input, ) if is_given(voice): self._realtime_model._opts.voice = voice audio_output.voice = voice has_audio_config = True if is_given(turn_detection): self._realtime_model._opts.turn_detection = turn_detection # type: ignore audio_input.turn_detection = turn_detection # type: ignore has_audio_config = True if is_given(input_audio_transcription): self._realtime_model._opts.input_audio_transcription = input_audio_transcription audio_input.transcription = input_audio_transcription has_audio_config = True if is_given(input_audio_noise_reduction): input_audio_noise_reduction = to_noise_reduction(input_audio_noise_reduction) # type: ignore self._realtime_model._opts.input_audio_noise_reduction = input_audio_noise_reduction audio_input.noise_reduction = input_audio_noise_reduction has_audio_config = True if is_given(speed): self._realtime_model._opts.speed = speed audio_output.speed = speed has_audio_config = True if has_audio_config: session.audio = audio_config has_changes = True if has_changes: self.send_event( SessionUpdateEvent( type="session.update", session=session, event_id=utils.shortuuid("options_update_"), ) ) async def update_tools(self, tools: list[llm.Tool]) ‑> None-
Expand source code
async def update_tools(self, tools: list[llm.Tool]) -> None: async with self._update_fnc_ctx_lock: ev = self._create_tools_update_event(tools) self.send_event(ev) retained_tool_names: set[str] = set() for t in ev["session"]["tools"]: if name := t.get("name"): retained_tool_names.add(name) # TODO(dz): handle MCP tools retained_tools = [ tool for tool in tools if ( isinstance(tool, (llm.FunctionTool, llm.RawFunctionTool)) and tool.info.name in retained_tool_names ) or isinstance(tool, llm.ProviderTool) ] self._tools = llm.ToolContext(retained_tools)
Inherited members
class TurnDetection (**data: Any)-
Expand source code
class TurnDetection(BaseModel): create_response: Optional[bool] = None """ Whether or not to automatically generate a response when a VAD stop event occurs. """ eagerness: Optional[Literal["low", "medium", "high", "auto"]] = None """Used only for `semantic_vad` mode. The eagerness of the model to respond. `low` will wait longer for the user to continue speaking, `high` will respond more quickly. `auto` is the default and is equivalent to `medium`. """ interrupt_response: Optional[bool] = None """ Whether or not to automatically interrupt any ongoing response with output to the default conversation (i.e. `conversation` of `auto`) when a VAD start event occurs. """ prefix_padding_ms: Optional[int] = None """Used only for `server_vad` mode. Amount of audio to include before the VAD detected speech (in milliseconds). Defaults to 300ms. """ silence_duration_ms: Optional[int] = None """Used only for `server_vad` mode. Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more quickly, but may jump in on short pauses from the user. """ threshold: Optional[float] = None """Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the model, and thus might perform better in noisy environments. """ type: Optional[Literal["server_vad", "semantic_vad"]] = None """Type of turn detection."""Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__- The names of the class variables defined on the model.
__private_attributes__- Metadata about the private attributes of the model.
__signature__- The synthesized
__init__[Signature][inspect.Signature] of the model. __pydantic_complete__- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__- The core schema of the model.
__pydantic_custom_init__- Whether the model has a custom
__init__function. __pydantic_decorators__- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__andModel.__root_validators__from Pydantic V1. __pydantic_generic_metadata__- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__- The name of the post-init method for the model, if defined.
__pydantic_root_model__- Whether the model is a [
RootModel][pydantic.root_model.RootModel]. __pydantic_serializer__- The
pydantic-coreSchemaSerializerused to dump instances of the model. __pydantic_validator__- The
pydantic-coreSchemaValidatorused to validate instances of the model. __pydantic_fields__- A dictionary of field names and their corresponding [
FieldInfo][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__- A dictionary of computed field names and their corresponding [
ComputedFieldInfo][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__- A dictionary containing extra values, if [
extra][pydantic.config.ConfigDict.extra] is set to'allow'. __pydantic_fields_set__- The names of fields explicitly set during instantiation.
__pydantic_private__- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.selfis explicitly positional-only to allowselfas a field name.Ancestors
- openai.BaseModel
- pydantic.main.BaseModel
Class variables
var create_response : bool | None-
Whether or not to automatically generate a response when a VAD stop event occurs.
var eagerness : Literal['low', 'medium', 'high', 'auto'] | None-
Used only for
semantic_vadmode.The eagerness of the model to respond.
lowwill wait longer for the user to continue speaking,highwill respond more quickly.autois the default and is equivalent tomedium. var interrupt_response : bool | None-
Whether or not to automatically interrupt any ongoing response with output to the default conversation (i.e.
conversationofauto) when a VAD start event occurs. var model_configvar prefix_padding_ms : int | None-
Used only for
server_vadmode.Amount of audio to include before the VAD detected speech (in milliseconds). Defaults to 300ms.
var silence_duration_ms : int | None-
Used only for
server_vadmode.Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms. With shorter values the model will respond more quickly, but may jump in on short pauses from the user.
var threshold : float | None-
Used only for
server_vadmode.Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher threshold will require louder audio to activate the model, and thus might perform better in noisy environments.
var type : Literal['server_vad', 'semantic_vad'] | None-
Type of turn detection.
class WebSearch-
Expand source code
@dataclass(slots=True) class WebSearch(XAITool): """Enable web search tool for real-time internet searches.""" def to_dict(self) -> dict[str, Any]: return {"type": "web_search"}Enable web search tool for real-time internet searches.
Ancestors
- XAITool
- livekit.agents.llm.tool_context.ProviderTool
- livekit.agents.llm.tool_context.Tool
- abc.ABC
Methods
def to_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def to_dict(self) -> dict[str, Any]: return {"type": "web_search"}
class XSearch (allowed_x_handles: list[str] | None = None)-
Expand source code
@dataclass(slots=True) class XSearch(XAITool): """Enable X (Twitter) search tool for searching posts.""" allowed_x_handles: list[str] | None = None def to_dict(self) -> dict[str, Any]: result: dict[str, Any] = {"type": "x_search"} if self.allowed_x_handles: result["allowed_x_handles"] = self.allowed_x_handles return resultEnable X (Twitter) search tool for searching posts.
Ancestors
- XAITool
- livekit.agents.llm.tool_context.ProviderTool
- livekit.agents.llm.tool_context.Tool
- abc.ABC
Instance variables
var allowed_x_handles : list[str] | None-
Expand source code
@dataclass(slots=True) class XSearch(XAITool): """Enable X (Twitter) search tool for searching posts.""" allowed_x_handles: list[str] | None = None def to_dict(self) -> dict[str, Any]: result: dict[str, Any] = {"type": "x_search"} if self.allowed_x_handles: result["allowed_x_handles"] = self.allowed_x_handles return result
Methods
def to_dict(self) ‑> dict[str, typing.Any]-
Expand source code
def to_dict(self) -> dict[str, Any]: result: dict[str, Any] = {"type": "x_search"} if self.allowed_x_handles: result["allowed_x_handles"] = self.allowed_x_handles return result