Module livekit.agents.llm
Sub-modules
livekit.agents.llm.mcp
livekit.agents.llm.remote_chat_context
livekit.agents.llm.utils
Functions
def find_function_tools(cls_or_obj: Any) ‑> list[livekit.agents.llm.tool_context.FunctionTool | livekit.agents.llm.tool_context.RawFunctionTool]
-
Expand source code
def find_function_tools(cls_or_obj: Any) -> list[FunctionTool | RawFunctionTool]: methods: list[FunctionTool | RawFunctionTool] = [] for _, member in inspect.getmembers(cls_or_obj): if is_function_tool(member) or is_raw_function_tool(member): methods.append(member) return methods
def function_tool(f: F | Raw_F | None = None,
*,
name: str | None = None,
description: str | None = None,
raw_schema: RawFunctionDescription | dict[str, Any] | None = None) ‑> livekit.agents.llm.tool_context.FunctionTool | livekit.agents.llm.tool_context.RawFunctionTool | Callable[[~F], livekit.agents.llm.tool_context.FunctionTool] | Callable[[~Raw_F], livekit.agents.llm.tool_context.RawFunctionTool]-
Expand source code
def function_tool( f: F | Raw_F | None = None, *, name: str | None = None, description: str | None = None, raw_schema: RawFunctionDescription | dict[str, Any] | None = None, ) -> ( FunctionTool | RawFunctionTool | Callable[[F], FunctionTool] | Callable[[Raw_F], RawFunctionTool] ): def deco_raw(func: Raw_F) -> RawFunctionTool: assert raw_schema is not None if not raw_schema.get("name"): raise ValueError("raw function name cannot be empty") if "parameters" not in raw_schema: # support empty parameters raise ValueError("raw function description must contain a parameters key") info = _RawFunctionToolInfo(raw_schema={**raw_schema}, name=raw_schema["name"]) setattr(func, "__livekit_raw_tool_info", info) return cast(RawFunctionTool, func) def deco_func(func: F) -> FunctionTool: from docstring_parser import parse_from_object docstring = parse_from_object(func) info = _FunctionToolInfo( name=name or func.__name__, description=description or docstring.description, ) setattr(func, "__livekit_tool_info", info) return cast(FunctionTool, func) if f is not None: return deco_raw(cast(Raw_F, f)) if raw_schema is not None else deco_func(cast(F, f)) return deco_raw if raw_schema is not None else deco_func
def is_function_tool(f: Callable[..., Any]) ‑> TypeGuard[livekit.agents.llm.tool_context.FunctionTool]
-
Expand source code
def is_function_tool(f: Callable[..., Any]) -> TypeGuard[FunctionTool]: return hasattr(f, "__livekit_tool_info")
def is_raw_function_tool(f: Callable[..., Any]) ‑> TypeGuard[livekit.agents.llm.tool_context.RawFunctionTool]
-
Expand source code
def is_raw_function_tool(f: Callable[..., Any]) -> TypeGuard[RawFunctionTool]: return hasattr(f, "__livekit_raw_tool_info")
Classes
class AudioContent (**data: Any)
-
Expand source code
class AudioContent(BaseModel): type: Literal["audio_content"] = Field(default="audio_content") frame: list[rtc.AudioFrame] transcript: str | None = None
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var frame : list[AudioFrame]
var model_config
var transcript : str | None
var type : Literal['audio_content']
class AvailabilityChangedEvent (llm: LLM,
available: bool)-
Expand source code
@dataclass class AvailabilityChangedEvent: llm: LLM available: bool
AvailabilityChangedEvent(llm: 'LLM', available: 'bool')
Instance variables
var available : bool
var llm : livekit.agents.llm.llm.LLM
class ChatChunk (**data: Any)
-
Expand source code
class ChatChunk(BaseModel): id: str delta: ChoiceDelta | None = None usage: CompletionUsage | None = None
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var delta : livekit.agents.llm.llm.ChoiceDelta | None
var id : str
var model_config
var usage : livekit.agents.llm.llm.CompletionUsage | None
class ChatContext (items: NotGivenOr[list[ChatItem]] = NOT_GIVEN)
-
Expand source code
class ChatContext: def __init__(self, items: NotGivenOr[list[ChatItem]] = NOT_GIVEN): self._items: list[ChatItem] = items if is_given(items) else [] @classmethod def empty(cls) -> ChatContext: return cls([]) @property def items(self) -> list[ChatItem]: return self._items @items.setter def items(self, items: list[ChatItem]) -> None: self._items = items def add_message( self, *, role: ChatRole, content: list[ChatContent] | str, id: NotGivenOr[str] = NOT_GIVEN, interrupted: NotGivenOr[bool] = NOT_GIVEN, created_at: NotGivenOr[float] = NOT_GIVEN, ) -> ChatMessage: kwargs: dict[str, Any] = {} if is_given(id): kwargs["id"] = id if is_given(interrupted): kwargs["interrupted"] = interrupted if is_given(created_at): kwargs["created_at"] = created_at if isinstance(content, str): message = ChatMessage(role=role, content=[content], **kwargs) else: message = ChatMessage(role=role, content=content, **kwargs) if is_given(created_at): idx = self.find_insertion_index(created_at=created_at) self._items.insert(idx, message) else: self._items.append(message) return message def insert(self, item: ChatItem | Sequence[ChatItem]) -> None: """Insert an item or list of items into the chat context by creation time.""" items = list(item) if isinstance(item, Sequence) else [item] for _item in items: idx = self.find_insertion_index(created_at=_item.created_at) self._items.insert(idx, _item) def get_by_id(self, item_id: str) -> ChatItem | None: return next((item for item in self.items if item.id == item_id), None) def index_by_id(self, item_id: str) -> int | None: return next((i for i, item in enumerate(self.items) if item.id == item_id), None) def copy( self, *, exclude_function_call: bool = False, exclude_instructions: bool = False, exclude_empty_message: bool = False, tools: NotGivenOr[Sequence[FunctionTool | RawFunctionTool | str | Any]] = NOT_GIVEN, ) -> ChatContext: items = [] from .tool_context import ( get_function_info, get_raw_function_info, is_function_tool, is_raw_function_tool, ) valid_tools = set[str]() if is_given(tools): for tool in tools: if isinstance(tool, str): valid_tools.add(tool) elif is_function_tool(tool): valid_tools.add(get_function_info(tool).name) elif is_raw_function_tool(tool): valid_tools.add(get_raw_function_info(tool).name) # TODO(theomonnom): other tools for item in self.items: if exclude_function_call and item.type in [ "function_call", "function_call_output", ]: continue if ( exclude_instructions and item.type == "message" and item.role in ["system", "developer"] ): continue if exclude_empty_message and item.type == "message" and not item.content: continue if ( is_given(tools) and (item.type == "function_call" or item.type == "function_call_output") and item.name not in valid_tools ): continue items.append(item) return ChatContext(items) def truncate(self, *, max_items: int) -> ChatContext: """Truncate the chat context to the last N items in place. Removes leading function calls to avoid partial function outputs. Preserves the first system message by adding it back to the beginning. """ instructions = next( (item for item in self._items if item.type == "message" and item.role == "system"), None, ) new_items = self._items[-max_items:] # chat ctx shouldn't start with function_call or function_call_output while new_items and new_items[0].type in [ "function_call", "function_call_output", ]: new_items.pop(0) if instructions: new_items.insert(0, instructions) self._items[:] = new_items return self def merge( self, other_chat_ctx: ChatContext, *, exclude_function_call: bool = False, exclude_instructions: bool = False, ) -> ChatContext: """Add messages from `other_chat_ctx` into this one, avoiding duplicates, and keep items sorted by created_at.""" existing_ids = {item.id for item in self._items} for item in other_chat_ctx.items: if exclude_function_call and item.type in [ "function_call", "function_call_output", ]: continue if ( exclude_instructions and item.type == "message" and item.role in ["system", "developer"] ): continue if item.id not in existing_ids: idx = self.find_insertion_index(created_at=item.created_at) self._items.insert(idx, item) existing_ids.add(item.id) return self def to_dict( self, *, exclude_image: bool = True, exclude_audio: bool = True, exclude_timestamp: bool = True, exclude_function_call: bool = False, ) -> dict[str, Any]: items: list[ChatItem] = [] for item in self.items: if exclude_function_call and item.type in [ "function_call", "function_call_output", ]: continue if item.type == "message": item = item.model_copy() if exclude_image: item.content = [c for c in item.content if not isinstance(c, ImageContent)] if exclude_audio: item.content = [c for c in item.content if not isinstance(c, AudioContent)] items.append(item) exclude_fields = set() if exclude_timestamp: exclude_fields.add("created_at") return { "items": [ item.model_dump( mode="json", exclude_none=True, exclude_defaults=False, exclude=exclude_fields, ) for item in items ], } @overload def to_provider_format( self, format: Literal["openai"], *, inject_dummy_user_message: bool = True ) -> tuple[list[dict], Literal[None]]: ... @overload def to_provider_format( self, format: Literal["google"], *, inject_dummy_user_message: bool = True ) -> tuple[list[dict], _provider_format.google.GoogleFormatData]: ... @overload def to_provider_format( self, format: Literal["aws"], *, inject_dummy_user_message: bool = True ) -> tuple[list[dict], _provider_format.aws.BedrockFormatData]: ... @overload def to_provider_format( self, format: Literal["anthropic"], *, inject_dummy_user_message: bool = True ) -> tuple[list[dict], _provider_format.anthropic.AnthropicFormatData]: ... @overload def to_provider_format( self, format: Literal["mistralai"], *, inject_dummy_user_message: bool = True ) -> tuple[list[dict], Literal[None]]: ... @overload def to_provider_format(self, format: str, **kwargs: Any) -> tuple[list[dict], Any]: ... def to_provider_format( self, format: Literal["openai", "google", "aws", "anthropic", "mistralai"] | str, *, inject_dummy_user_message: bool = True, **kwargs: Any, ) -> tuple[list[dict], Any]: """Convert the chat context to a provider-specific format. If ``inject_dummy_user_message`` is ``True``, a dummy user message will be added to the beginning or end of the chat context depending on the provider. This is necessary because some providers expect a user message to be present for generating a response. """ kwargs["inject_dummy_user_message"] = inject_dummy_user_message if format == "openai": return _provider_format.openai.to_chat_ctx(self, **kwargs) elif format == "google": return _provider_format.google.to_chat_ctx(self, **kwargs) elif format == "aws": return _provider_format.aws.to_chat_ctx(self, **kwargs) elif format == "anthropic": return _provider_format.anthropic.to_chat_ctx(self, **kwargs) elif format == "mistralai": return _provider_format.mistralai.to_chat_ctx(self, **kwargs) else: raise ValueError(f"Unsupported provider format: {format}") def find_insertion_index(self, *, created_at: float) -> int: """ Returns the index to insert an item by creation time. Iterates in reverse, assuming items are sorted by `created_at`. Finds the position after the last item with `created_at <=` the given timestamp. """ for i in reversed(range(len(self._items))): if self._items[i].created_at <= created_at: return i + 1 return 0 @classmethod def from_dict(cls, data: dict[str, Any]) -> ChatContext: item_adapter = TypeAdapter(list[ChatItem]) items = item_adapter.validate_python(data["items"]) return cls(items) @property def readonly(self) -> bool: return False def is_equivalent(self, other: ChatContext) -> bool: """ Return True if `other` has the same sequence of items with matching essential fields (IDs, types, and payload) as this context. Comparison rules: - Messages: compares the full `content` list, `role` and `interrupted`. - Function calls: compares `name`, `call_id`, and `arguments`. - Function call outputs: compares `name`, `call_id`, `output`, and `is_error`. Does not consider timestamps or other metadata. """ if self is other: return True if len(self.items) != len(other.items): return False for a, b in zip(self.items, other.items): if a.id != b.id or a.type != b.type: return False if a.type == "message" and b.type == "message": if a.role != b.role or a.interrupted != b.interrupted or a.content != b.content: return False elif a.type == "function_call" and b.type == "function_call": if a.name != b.name or a.call_id != b.call_id or a.arguments != b.arguments: return False elif a.type == "function_call_output" and b.type == "function_call_output": if ( a.name != b.name or a.call_id != b.call_id or a.output != b.output or a.is_error != b.is_error ): return False return True
Subclasses
- livekit.agents.llm.chat_context._ReadOnlyChatContext
Static methods
def empty() ‑> livekit.agents.llm.chat_context.ChatContext
def from_dict(data: dict[str, Any]) ‑> livekit.agents.llm.chat_context.ChatContext
Instance variables
prop items : list[ChatItem]
-
Expand source code
@property def items(self) -> list[ChatItem]: return self._items
prop readonly : bool
-
Expand source code
@property def readonly(self) -> bool: return False
Methods
def add_message(self,
*,
role: ChatRole,
content: list[ChatContent] | str,
id: NotGivenOr[str] = NOT_GIVEN,
interrupted: NotGivenOr[bool] = NOT_GIVEN,
created_at: NotGivenOr[float] = NOT_GIVEN) ‑> livekit.agents.llm.chat_context.ChatMessage-
Expand source code
def add_message( self, *, role: ChatRole, content: list[ChatContent] | str, id: NotGivenOr[str] = NOT_GIVEN, interrupted: NotGivenOr[bool] = NOT_GIVEN, created_at: NotGivenOr[float] = NOT_GIVEN, ) -> ChatMessage: kwargs: dict[str, Any] = {} if is_given(id): kwargs["id"] = id if is_given(interrupted): kwargs["interrupted"] = interrupted if is_given(created_at): kwargs["created_at"] = created_at if isinstance(content, str): message = ChatMessage(role=role, content=[content], **kwargs) else: message = ChatMessage(role=role, content=content, **kwargs) if is_given(created_at): idx = self.find_insertion_index(created_at=created_at) self._items.insert(idx, message) else: self._items.append(message) return message
def copy(self,
*,
exclude_function_call: bool = False,
exclude_instructions: bool = False,
exclude_empty_message: bool = False,
tools: NotGivenOr[Sequence[FunctionTool | RawFunctionTool | str | Any]] = NOT_GIVEN) ‑> ChatContext-
Expand source code
def copy( self, *, exclude_function_call: bool = False, exclude_instructions: bool = False, exclude_empty_message: bool = False, tools: NotGivenOr[Sequence[FunctionTool | RawFunctionTool | str | Any]] = NOT_GIVEN, ) -> ChatContext: items = [] from .tool_context import ( get_function_info, get_raw_function_info, is_function_tool, is_raw_function_tool, ) valid_tools = set[str]() if is_given(tools): for tool in tools: if isinstance(tool, str): valid_tools.add(tool) elif is_function_tool(tool): valid_tools.add(get_function_info(tool).name) elif is_raw_function_tool(tool): valid_tools.add(get_raw_function_info(tool).name) # TODO(theomonnom): other tools for item in self.items: if exclude_function_call and item.type in [ "function_call", "function_call_output", ]: continue if ( exclude_instructions and item.type == "message" and item.role in ["system", "developer"] ): continue if exclude_empty_message and item.type == "message" and not item.content: continue if ( is_given(tools) and (item.type == "function_call" or item.type == "function_call_output") and item.name not in valid_tools ): continue items.append(item) return ChatContext(items)
def find_insertion_index(self, *, created_at: float) ‑> int
-
Expand source code
def find_insertion_index(self, *, created_at: float) -> int: """ Returns the index to insert an item by creation time. Iterates in reverse, assuming items are sorted by `created_at`. Finds the position after the last item with `created_at <=` the given timestamp. """ for i in reversed(range(len(self._items))): if self._items[i].created_at <= created_at: return i + 1 return 0
Returns the index to insert an item by creation time.
Iterates in reverse, assuming items are sorted by
created_at
. Finds the position after the last item withcreated_at <=
the given timestamp. def get_by_id(self, item_id: str) ‑> livekit.agents.llm.chat_context.ChatMessage | livekit.agents.llm.chat_context.FunctionCall | livekit.agents.llm.chat_context.FunctionCallOutput | None
-
Expand source code
def get_by_id(self, item_id: str) -> ChatItem | None: return next((item for item in self.items if item.id == item_id), None)
def index_by_id(self, item_id: str) ‑> int | None
-
Expand source code
def index_by_id(self, item_id: str) -> int | None: return next((i for i, item in enumerate(self.items) if item.id == item_id), None)
def insert(self, item: ChatItem | Sequence[ChatItem]) ‑> None
-
Expand source code
def insert(self, item: ChatItem | Sequence[ChatItem]) -> None: """Insert an item or list of items into the chat context by creation time.""" items = list(item) if isinstance(item, Sequence) else [item] for _item in items: idx = self.find_insertion_index(created_at=_item.created_at) self._items.insert(idx, _item)
Insert an item or list of items into the chat context by creation time.
def is_equivalent(self,
other: ChatContext) ‑> bool-
Expand source code
def is_equivalent(self, other: ChatContext) -> bool: """ Return True if `other` has the same sequence of items with matching essential fields (IDs, types, and payload) as this context. Comparison rules: - Messages: compares the full `content` list, `role` and `interrupted`. - Function calls: compares `name`, `call_id`, and `arguments`. - Function call outputs: compares `name`, `call_id`, `output`, and `is_error`. Does not consider timestamps or other metadata. """ if self is other: return True if len(self.items) != len(other.items): return False for a, b in zip(self.items, other.items): if a.id != b.id or a.type != b.type: return False if a.type == "message" and b.type == "message": if a.role != b.role or a.interrupted != b.interrupted or a.content != b.content: return False elif a.type == "function_call" and b.type == "function_call": if a.name != b.name or a.call_id != b.call_id or a.arguments != b.arguments: return False elif a.type == "function_call_output" and b.type == "function_call_output": if ( a.name != b.name or a.call_id != b.call_id or a.output != b.output or a.is_error != b.is_error ): return False return True
Return True if
other
has the same sequence of items with matching essential fields (IDs, types, and payload) as this context.Comparison rules: - Messages: compares the full
content
list,role
andinterrupted
. - Function calls: comparesname
,call_id
, andarguments
. - Function call outputs: comparesname
,call_id
,output
, andis_error
.Does not consider timestamps or other metadata.
def merge(self,
other_chat_ctx: ChatContext,
*,
exclude_function_call: bool = False,
exclude_instructions: bool = False) ‑> livekit.agents.llm.chat_context.ChatContext-
Expand source code
def merge( self, other_chat_ctx: ChatContext, *, exclude_function_call: bool = False, exclude_instructions: bool = False, ) -> ChatContext: """Add messages from `other_chat_ctx` into this one, avoiding duplicates, and keep items sorted by created_at.""" existing_ids = {item.id for item in self._items} for item in other_chat_ctx.items: if exclude_function_call and item.type in [ "function_call", "function_call_output", ]: continue if ( exclude_instructions and item.type == "message" and item.role in ["system", "developer"] ): continue if item.id not in existing_ids: idx = self.find_insertion_index(created_at=item.created_at) self._items.insert(idx, item) existing_ids.add(item.id) return self
Add messages from
other_chat_ctx
into this one, avoiding duplicates, and keep items sorted by created_at. def to_dict(self,
*,
exclude_image: bool = True,
exclude_audio: bool = True,
exclude_timestamp: bool = True,
exclude_function_call: bool = False) ‑> dict[str, typing.Any]-
Expand source code
def to_dict( self, *, exclude_image: bool = True, exclude_audio: bool = True, exclude_timestamp: bool = True, exclude_function_call: bool = False, ) -> dict[str, Any]: items: list[ChatItem] = [] for item in self.items: if exclude_function_call and item.type in [ "function_call", "function_call_output", ]: continue if item.type == "message": item = item.model_copy() if exclude_image: item.content = [c for c in item.content if not isinstance(c, ImageContent)] if exclude_audio: item.content = [c for c in item.content if not isinstance(c, AudioContent)] items.append(item) exclude_fields = set() if exclude_timestamp: exclude_fields.add("created_at") return { "items": [ item.model_dump( mode="json", exclude_none=True, exclude_defaults=False, exclude=exclude_fields, ) for item in items ], }
def to_provider_format(self,
format: "Literal['openai', 'google', 'aws', 'anthropic', 'mistralai'] | str",
*,
inject_dummy_user_message: bool = True,
**kwargs: Any) ‑> tuple[list[dict], typing.Any]-
Expand source code
def to_provider_format( self, format: Literal["openai", "google", "aws", "anthropic", "mistralai"] | str, *, inject_dummy_user_message: bool = True, **kwargs: Any, ) -> tuple[list[dict], Any]: """Convert the chat context to a provider-specific format. If ``inject_dummy_user_message`` is ``True``, a dummy user message will be added to the beginning or end of the chat context depending on the provider. This is necessary because some providers expect a user message to be present for generating a response. """ kwargs["inject_dummy_user_message"] = inject_dummy_user_message if format == "openai": return _provider_format.openai.to_chat_ctx(self, **kwargs) elif format == "google": return _provider_format.google.to_chat_ctx(self, **kwargs) elif format == "aws": return _provider_format.aws.to_chat_ctx(self, **kwargs) elif format == "anthropic": return _provider_format.anthropic.to_chat_ctx(self, **kwargs) elif format == "mistralai": return _provider_format.mistralai.to_chat_ctx(self, **kwargs) else: raise ValueError(f"Unsupported provider format: {format}")
Convert the chat context to a provider-specific format.
If
inject_dummy_user_message
isTrue
, a dummy user message will be added to the beginning or end of the chat context depending on the provider.This is necessary because some providers expect a user message to be present for generating a response.
def truncate(self, *, max_items: int) ‑> livekit.agents.llm.chat_context.ChatContext
-
Expand source code
def truncate(self, *, max_items: int) -> ChatContext: """Truncate the chat context to the last N items in place. Removes leading function calls to avoid partial function outputs. Preserves the first system message by adding it back to the beginning. """ instructions = next( (item for item in self._items if item.type == "message" and item.role == "system"), None, ) new_items = self._items[-max_items:] # chat ctx shouldn't start with function_call or function_call_output while new_items and new_items[0].type in [ "function_call", "function_call_output", ]: new_items.pop(0) if instructions: new_items.insert(0, instructions) self._items[:] = new_items return self
Truncate the chat context to the last N items in place.
Removes leading function calls to avoid partial function outputs. Preserves the first system message by adding it back to the beginning.
class ChatMessage (**data: Any)
-
Expand source code
class ChatMessage(BaseModel): id: str = Field(default_factory=lambda: utils.shortuuid("item_")) type: Literal["message"] = "message" role: ChatRole content: list[ChatContent] interrupted: bool = False transcript_confidence: float | None = None hash: bytes | None = None created_at: float = Field(default_factory=time.time) @property def text_content(self) -> str | None: """ Returns a string of all text content in the message. Multiple text content items will be joined by a newline. """ text_parts = [c for c in self.content if isinstance(c, str)] if not text_parts: return None return "\n".join(text_parts)
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var content : list[livekit.agents.llm.chat_context.ImageContent | livekit.agents.llm.chat_context.AudioContent | str]
var created_at : float
var hash : bytes | None
var id : str
var interrupted : bool
var model_config
var role : Literal['developer', 'system', 'user', 'assistant']
var transcript_confidence : float | None
var type : Literal['message']
Instance variables
prop text_content : str | None
-
Expand source code
@property def text_content(self) -> str | None: """ Returns a string of all text content in the message. Multiple text content items will be joined by a newline. """ text_parts = [c for c in self.content if isinstance(c, str)] if not text_parts: return None return "\n".join(text_parts)
Returns a string of all text content in the message.
Multiple text content items will be joined by a newline.
class ChoiceDelta (**data: Any)
-
Expand source code
class ChoiceDelta(BaseModel): role: ChatRole | None = None content: str | None = None tool_calls: list[FunctionToolCall] = Field(default_factory=list)
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var content : str | None
var model_config
var role : Literal['developer', 'system', 'user', 'assistant'] | None
var tool_calls : list[livekit.agents.llm.llm.FunctionToolCall]
class CompletionUsage (**data: Any)
-
Expand source code
class CompletionUsage(BaseModel): completion_tokens: int """The number of tokens in the completion.""" prompt_tokens: int """The number of input tokens used (includes cached tokens).""" prompt_cached_tokens: int = 0 """The number of cached input tokens used.""" cache_creation_tokens: int = 0 """The number of tokens used to create the cache.""" cache_read_tokens: int = 0 """The number of tokens read from the cache.""" total_tokens: int """The total number of tokens used (completion + prompt tokens)."""
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var cache_creation_tokens : int
-
The number of tokens used to create the cache.
var cache_read_tokens : int
-
The number of tokens read from the cache.
var completion_tokens : int
-
The number of tokens in the completion.
var model_config
var prompt_cached_tokens : int
-
The number of cached input tokens used.
var prompt_tokens : int
-
The number of input tokens used (includes cached tokens).
var total_tokens : int
-
The total number of tokens used (completion + prompt tokens).
class FallbackAdapter (llm: list[LLM],
*,
attempt_timeout: float = 5.0,
max_retry_per_llm: int = 0,
retry_interval: float = 0.5,
retry_on_chunk_sent: bool = False)-
Expand source code
class FallbackAdapter( LLM[Literal["llm_availability_changed"]], ): def __init__( self, llm: list[LLM], *, attempt_timeout: float = 5.0, # use fallback instead of retrying max_retry_per_llm: int = 0, retry_interval: float = 0.5, retry_on_chunk_sent: bool = False, ) -> None: """FallbackAdapter is an LLM that can fallback to a different LLM if the current LLM fails. Args: llm (list[LLM]): List of LLM instances to fallback to. attempt_timeout (float, optional): Timeout for each LLM attempt. Defaults to 5.0. max_retry_per_llm (int, optional): Internal retries per LLM. Defaults to 0, which means no internal retries, the failed LLM will be skipped and the next LLM will be used. retry_interval (float, optional): Interval between retries. Defaults to 0.5. retry_on_chunk_sent (bool, optional): Whether to retry when a LLM failed after chunks are sent. Defaults to False. Raises: ValueError: If no LLM instances are provided. """ if len(llm) < 1: raise ValueError("at least one LLM instance must be provided.") super().__init__() self._llm_instances = llm self._attempt_timeout = attempt_timeout self._max_retry_per_llm = max_retry_per_llm self._retry_interval = retry_interval self._retry_on_chunk_sent = retry_on_chunk_sent self._status = [ _LLMStatus(available=True, recovering_task=None) for _ in self._llm_instances ] def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool | RawFunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_FALLBACK_API_CONNECT_OPTIONS, parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN, ) -> LLMStream: return FallbackLLMStream( llm=self, conn_options=conn_options, chat_ctx=chat_ctx, tools=tools or [], parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, extra_kwargs=extra_kwargs, )
Helper class that provides a standard way to create an ABC using inheritance.
FallbackAdapter is an LLM that can fallback to a different LLM if the current LLM fails.
Args
llm
:list[LLM]
- List of LLM instances to fallback to.
attempt_timeout
:float
, optional- Timeout for each LLM attempt. Defaults to 5.0.
max_retry_per_llm
:int
, optional- Internal retries per LLM. Defaults to 0, which means no internal retries, the failed LLM will be skipped and the next LLM will be used.
retry_interval
:float
, optional- Interval between retries. Defaults to 0.5.
retry_on_chunk_sent
:bool
, optional- Whether to retry when a LLM failed after chunks are sent. Defaults to False.
Raises
ValueError
- If no LLM instances are provided.
Ancestors
- livekit.agents.llm.llm.LLM
- abc.ABC
- EventEmitter
- typing.Generic
Methods
def chat(self,
*,
chat_ctx: ChatContext,
tools: list[FunctionTool | RawFunctionTool] | None = None,
conn_options: APIConnectOptions = APIConnectOptions(max_retry=0, retry_interval=2.0, timeout=10.0),
parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN,
tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN) ‑> livekit.agents.llm.llm.LLMStream-
Expand source code
def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool | RawFunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_FALLBACK_API_CONNECT_OPTIONS, parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN, ) -> LLMStream: return FallbackLLMStream( llm=self, conn_options=conn_options, chat_ctx=chat_ctx, tools=tools or [], parallel_tool_calls=parallel_tool_calls, tool_choice=tool_choice, extra_kwargs=extra_kwargs, )
Inherited members
class FunctionCall (**data: Any)
-
Expand source code
class FunctionCall(BaseModel): id: str = Field(default_factory=lambda: utils.shortuuid("item_")) type: Literal["function_call"] = "function_call" call_id: str arguments: str name: str created_at: float = Field(default_factory=time.time)
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var arguments : str
var call_id : str
var created_at : float
var id : str
var model_config
var name : str
var type : Literal['function_call']
class FunctionCallOutput (**data: Any)
-
Expand source code
class FunctionCallOutput(BaseModel): id: str = Field(default_factory=lambda: utils.shortuuid("item_")) type: Literal["function_call_output"] = Field(default="function_call_output") name: str = Field(default="") call_id: str output: str is_error: bool created_at: float = Field(default_factory=time.time)
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var call_id : str
var created_at : float
var id : str
var is_error : bool
var model_config
var name : str
var output : str
var type : Literal['function_call_output']
class FunctionTool (*args, **kwargs)
-
Expand source code
@runtime_checkable class FunctionTool(Protocol): __livekit_tool_info: _FunctionToolInfo def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol): def meth(self) -> int: ...
Such classes are primarily used with static type checkers that recognize structural subtyping (static duck-typing).
For example::
class C: def meth(self) -> int: return 0 def func(x: Proto) -> int: return x.meth() func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with @typing.runtime_checkable act as simple-minded runtime protocols that check only the presence of given attributes, ignoring their type signatures. Protocol classes can be generic, they are defined as::
class GenProto[T](Protocol): def meth(self) -> T: ...
Ancestors
- typing.Protocol
- typing.Generic
class FunctionToolCall (**data: Any)
-
Expand source code
class FunctionToolCall(BaseModel): type: Literal["function"] = "function" name: str arguments: str call_id: str
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var arguments : str
var call_id : str
var model_config
var name : str
var type : Literal['function']
class GenerationCreatedEvent (message_stream: AsyncIterable[MessageGeneration],
function_stream: AsyncIterable[FunctionCall],
user_initiated: bool)-
Expand source code
@dataclass class GenerationCreatedEvent: message_stream: AsyncIterable[MessageGeneration] function_stream: AsyncIterable[FunctionCall] user_initiated: bool """True if the message was generated by the user using generate_reply()"""
GenerationCreatedEvent(message_stream: 'AsyncIterable[MessageGeneration]', function_stream: 'AsyncIterable[FunctionCall]', user_initiated: 'bool')
Instance variables
var function_stream : AsyncIterable[livekit.agents.llm.chat_context.FunctionCall]
var message_stream : AsyncIterable[livekit.agents.llm.realtime.MessageGeneration]
var user_initiated : bool
-
True if the message was generated by the user using generate_reply()
class ImageContent (**data: Any)
-
Expand source code
class ImageContent(BaseModel): """ ImageContent is used to input images into the ChatContext on supported LLM providers / plugins. You may need to consult your LLM provider's documentation on supported URL types. ```python # Pass a VideoFrame directly, which will be automatically converted to a JPEG data URL internally async for event in rtc.VideoStream(video_track): chat_image = ImageContent(image=event.frame) # this instance is now available for your ChatContext # Encode your VideoFrame yourself for more control, and pass the result as a data URL (see EncodeOptions for more details) from livekit.agents.utils.images import encode, EncodeOptions, ResizeOptions image_bytes = encode( event.frame, EncodeOptions( format="PNG", resize_options=ResizeOptions(width=512, height=512, strategy="scale_aspect_fit"), ), ) chat_image = ImageContent( image=f"data:image/png;base64,{base64.b64encode(image_bytes).decode('utf-8')}" ) # With an external URL chat_image = ImageContent(image="https://example.com/image.jpg") ``` """ # noqa: E501 id: str = Field(default_factory=lambda: utils.shortuuid("img_")) """ Unique identifier for the image """ type: Literal["image_content"] = Field(default="image_content") image: str | rtc.VideoFrame """ Either a string URL or a VideoFrame object """ inference_width: int | None = None """ Resizing parameter for rtc.VideoFrame inputs (ignored for URL images) """ inference_height: int | None = None """ Resizing parameter for rtc.VideoFrame inputs (ignored for URL images) """ inference_detail: Literal["auto", "high", "low"] = "auto" """ Detail parameter for LLM provider, if supported. Currently only supported by OpenAI (see https://platform.openai.com/docs/guides/vision?lang=node#low-or-high-fidelity-image-understanding) """ mime_type: str | None = None """ MIME type of the image """ _cache: dict[Any, Any] = PrivateAttr(default_factory=dict)
ImageContent is used to input images into the ChatContext on supported LLM providers / plugins.
You may need to consult your LLM provider's documentation on supported URL types.
# Pass a VideoFrame directly, which will be automatically converted to a JPEG data URL internally async for event in rtc.VideoStream(video_track): chat_image = ImageContent(image=event.frame) # this instance is now available for your ChatContext # Encode your VideoFrame yourself for more control, and pass the result as a data URL (see EncodeOptions for more details) from livekit.agents.utils.images import encode, EncodeOptions, ResizeOptions image_bytes = encode( event.frame, EncodeOptions( format="PNG", resize_options=ResizeOptions(width=512, height=512, strategy="scale_aspect_fit"), ), ) chat_image = ImageContent( image=f"data:image/png;base64,{base64.b64encode(image_bytes).decode('utf-8')}" ) # With an external URL chat_image = ImageContent(image="https://example.com/image.jpg")
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var id : str
-
Unique identifier for the image
var image : str | VideoFrame
-
Either a string URL or a VideoFrame object
var inference_detail : Literal['auto', 'high', 'low']
-
Detail parameter for LLM provider, if supported.
Currently only supported by OpenAI (see https://platform.openai.com/docs/guides/vision?lang=node#low-or-high-fidelity-image-understanding)
var inference_height : int | None
-
Resizing parameter for rtc.VideoFrame inputs (ignored for URL images)
var inference_width : int | None
-
Resizing parameter for rtc.VideoFrame inputs (ignored for URL images)
var mime_type : str | None
-
MIME type of the image
var model_config
var type : Literal['image_content']
Methods
def model_post_init(self: BaseModel, context: Any, /) ‑> None
-
Expand source code
def init_private_attributes(self: BaseModel, context: Any, /) -> None: """This function is meant to behave like a BaseModel method to initialise private attributes. It takes context as an argument since that's what pydantic-core passes when calling it. Args: self: The BaseModel instance. context: The context. """ if getattr(self, '__pydantic_private__', None) is None: pydantic_private = {} for name, private_attr in self.__private_attributes__.items(): default = private_attr.get_default() if default is not PydanticUndefined: pydantic_private[name] = default object_setattr(self, '__pydantic_private__', pydantic_private)
This function is meant to behave like a BaseModel method to initialise private attributes.
It takes context as an argument since that's what pydantic-core passes when calling it.
Args
self
- The BaseModel instance.
context
- The context.
class InputSpeechStartedEvent
-
Expand source code
@dataclass class InputSpeechStartedEvent: pass
InputSpeechStartedEvent()
class InputSpeechStoppedEvent (user_transcription_enabled: bool)
-
Expand source code
@dataclass class InputSpeechStoppedEvent: user_transcription_enabled: bool
InputSpeechStoppedEvent(user_transcription_enabled: 'bool')
Instance variables
var user_transcription_enabled : bool
class InputTranscriptionCompleted (item_id: str, transcript: str, is_final: bool)
-
Expand source code
@dataclass class InputTranscriptionCompleted: item_id: str """id of the item""" transcript: str """transcript of the input audio""" is_final: bool
InputTranscriptionCompleted(item_id: 'str', transcript: 'str', is_final: 'bool')
Instance variables
var is_final : bool
var item_id : str
-
id of the item
var transcript : str
-
transcript of the input audio
class LLM
-
Expand source code
class LLM( ABC, rtc.EventEmitter[Union[Literal["metrics_collected", "error"], TEvent]], Generic[TEvent], ): def __init__(self) -> None: super().__init__() self._label = f"{type(self).__module__}.{type(self).__name__}" @property def label(self) -> str: return self._label @property def model(self) -> str: """Get the model name/identifier for this LLM instance. Returns: The model name if available, "unknown" otherwise. Note: Plugins should override this property to provide their model information. """ return "unknown" @abstractmethod def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool | RawFunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN, ) -> LLMStream: ... def prewarm(self) -> None: """Pre-warm connection to the LLM service""" pass async def aclose(self) -> None: ... async def __aenter__(self) -> LLM: return self async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.aclose()
Helper class that provides a standard way to create an ABC using inheritance.
Ancestors
- abc.ABC
- EventEmitter
- typing.Generic
Subclasses
- livekit.agents.llm.fallback_adapter.FallbackAdapter
- livekit.plugins.anthropic.llm.LLM
- livekit.plugins.aws.llm.LLM
- livekit.plugins.google.llm.LLM
- livekit.plugins.langchain.langgraph.LLMAdapter
- livekit.plugins.mistralai.llm.LLM
- livekit.plugins.openai.llm.LLM
Instance variables
prop label : str
-
Expand source code
@property def label(self) -> str: return self._label
prop model : str
-
Expand source code
@property def model(self) -> str: """Get the model name/identifier for this LLM instance. Returns: The model name if available, "unknown" otherwise. Note: Plugins should override this property to provide their model information. """ return "unknown"
Get the model name/identifier for this LLM instance.
Returns
The model name if available, "unknown" otherwise.
Note
Plugins should override this property to provide their model information.
Methods
async def aclose(self) ‑> None
-
Expand source code
async def aclose(self) -> None: ...
def chat(self,
*,
chat_ctx: ChatContext,
tools: list[FunctionTool | RawFunctionTool] | None = None,
conn_options: APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0),
parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN,
tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN) ‑> livekit.agents.llm.llm.LLMStream-
Expand source code
@abstractmethod def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool | RawFunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN, ) -> LLMStream: ...
def prewarm(self) ‑> None
-
Expand source code
def prewarm(self) -> None: """Pre-warm connection to the LLM service""" pass
Pre-warm connection to the LLM service
Inherited members
class LLMError (**data: Any)
-
Expand source code
class LLMError(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) type: Literal["llm_error"] = "llm_error" timestamp: float label: str error: Exception = Field(..., exclude=True) recoverable: bool
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var error : Exception
var label : str
var model_config
var recoverable : bool
var timestamp : float
var type : Literal['llm_error']
class LLMStream (llm: LLM,
*,
chat_ctx: ChatContext,
tools: list[FunctionTool | RawFunctionTool],
conn_options: APIConnectOptions)-
Expand source code
class LLMStream(ABC): def __init__( self, llm: LLM, *, chat_ctx: ChatContext, tools: list[FunctionTool | RawFunctionTool], conn_options: APIConnectOptions, ) -> None: self._llm = llm self._chat_ctx = chat_ctx self._tools = tools self._conn_options = conn_options self._event_ch = aio.Chan[ChatChunk]() self._event_aiter, monitor_aiter = aio.itertools.tee(self._event_ch, 2) self._current_attempt_has_error = False self._metrics_task = asyncio.create_task( self._metrics_monitor_task(monitor_aiter), name="LLM._metrics_task" ) self._task = asyncio.create_task(self._main_task()) self._task.add_done_callback(lambda _: self._event_ch.close()) self._llm_request_span: trace.Span | None = None @abstractmethod async def _run(self) -> None: ... @tracer.start_as_current_span("llm_request", end_on_exit=False) async def _main_task(self) -> None: self._llm_request_span = trace.get_current_span() self._llm_request_span.set_attribute(trace_types.ATTR_GEN_AI_REQUEST_MODEL, self._llm.model) for name, attributes in _chat_ctx_to_otel_events(self._chat_ctx): self._llm_request_span.add_event(name, attributes) for i in range(self._conn_options.max_retry + 1): try: with tracer.start_as_current_span("llm_request_run") as attempt_span: attempt_span.set_attribute(trace_types.ATTR_RETRY_COUNT, i) try: return await self._run() except Exception as e: telemetry_utils.record_exception(attempt_span, e) raise except APIError as e: retry_interval = self._conn_options._interval_for_retry(i) if self._conn_options.max_retry == 0 or not e.retryable: self._emit_error(e, recoverable=False) raise elif i == self._conn_options.max_retry: self._emit_error(e, recoverable=False) raise APIConnectionError( f"failed to generate LLM completion after {self._conn_options.max_retry + 1} attempts", # noqa: E501 ) from e else: self._emit_error(e, recoverable=True) logger.warning( f"failed to generate LLM completion, retrying in {retry_interval}s", # noqa: E501 exc_info=e, extra={ "llm": self._llm._label, "attempt": i + 1, }, ) if retry_interval > 0: await asyncio.sleep(retry_interval) # reset the flag when retrying self._current_attempt_has_error = False except Exception as e: self._emit_error(e, recoverable=False) raise def _emit_error(self, api_error: Exception, recoverable: bool) -> None: self._current_attempt_has_error = True self._llm.emit( "error", LLMError( timestamp=time.time(), label=self._llm._label, error=api_error, recoverable=recoverable, ), ) @utils.log_exceptions(logger=logger) async def _metrics_monitor_task(self, event_aiter: AsyncIterable[ChatChunk]) -> None: start_time = time.perf_counter() ttft = -1.0 request_id = "" usage: CompletionUsage | None = None response_content = "" tool_calls: list[FunctionToolCall] = [] completion_start_time: str | None = None async for ev in event_aiter: request_id = ev.id if ttft == -1.0: ttft = time.perf_counter() - start_time completion_start_time = datetime.now(timezone.utc).isoformat() if ev.delta: if ev.delta.content: response_content += ev.delta.content if ev.delta.tool_calls: tool_calls.extend(ev.delta.tool_calls) if ev.usage is not None: usage = ev.usage duration = time.perf_counter() - start_time if self._current_attempt_has_error: return metrics = LLMMetrics( timestamp=time.time(), request_id=request_id, ttft=ttft, duration=duration, cancelled=self._task.cancelled(), label=self._llm._label, completion_tokens=usage.completion_tokens if usage else 0, prompt_tokens=usage.prompt_tokens if usage else 0, prompt_cached_tokens=usage.prompt_cached_tokens if usage else 0, total_tokens=usage.total_tokens if usage else 0, tokens_per_second=usage.completion_tokens / duration if usage else 0.0, ) if self._llm_request_span: # livekit metrics attribute self._llm_request_span.set_attribute( trace_types.ATTR_LLM_METRICS, metrics.model_dump_json() ) # set gen_ai attributes self._llm_request_span.set_attributes( { trace_types.ATTR_GEN_AI_USAGE_INPUT_TOKENS: metrics.prompt_tokens, trace_types.ATTR_GEN_AI_USAGE_OUTPUT_TOKENS: metrics.completion_tokens, }, ) if completion_start_time: self._llm_request_span.set_attribute( trace_types.ATTR_LANGFUSE_COMPLETION_START_TIME, f'"{completion_start_time}"' ) completion_event_body: dict[str, AttributeValue] = {"role": "assistant"} if response_content: completion_event_body["content"] = response_content if tool_calls: completion_event_body["tool_calls"] = [ json.dumps( { "function": {"name": tool_call.name, "arguments": tool_call.arguments}, "id": tool_call.call_id, "type": "function", } ) for tool_call in tool_calls ] self._llm_request_span.add_event(trace_types.EVENT_GEN_AI_CHOICE, completion_event_body) self._llm.emit("metrics_collected", metrics) @property def chat_ctx(self) -> ChatContext: return self._chat_ctx @property def tools(self) -> list[FunctionTool | RawFunctionTool]: return self._tools async def aclose(self) -> None: await aio.cancel_and_wait(self._task) await self._metrics_task if self._llm_request_span: self._llm_request_span.end() self._llm_request_span = None async def __anext__(self) -> ChatChunk: try: val = await self._event_aiter.__anext__() except StopAsyncIteration: if not self._task.cancelled() and (exc := self._task.exception()): raise exc # noqa: B904 raise StopAsyncIteration from None return val def __aiter__(self) -> AsyncIterator[ChatChunk]: return self async def __aenter__(self) -> LLMStream: return self async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.aclose() def to_str_iterable(self) -> AsyncIterable[str]: """ Convert the LLMStream to an async iterable of strings. This assumes the stream will not call any tools. """ async def _iterable() -> AsyncIterable[str]: async with self: async for chunk in self: if chunk.delta and chunk.delta.content: yield chunk.delta.content return _iterable()
Helper class that provides a standard way to create an ABC using inheritance.
Ancestors
- abc.ABC
Subclasses
- livekit.agents.llm.fallback_adapter.FallbackLLMStream
- livekit.plugins.anthropic.llm.LLMStream
- livekit.plugins.aws.llm.LLMStream
- livekit.plugins.google.llm.LLMStream
- livekit.plugins.langchain.langgraph.LangGraphStream
- livekit.plugins.mistralai.llm.LLMStream
- livekit.plugins.openai.llm.LLMStream
Instance variables
prop chat_ctx : ChatContext
-
Expand source code
@property def chat_ctx(self) -> ChatContext: return self._chat_ctx
prop tools : list[FunctionTool | RawFunctionTool]
-
Expand source code
@property def tools(self) -> list[FunctionTool | RawFunctionTool]: return self._tools
Methods
async def aclose(self) ‑> None
-
Expand source code
async def aclose(self) -> None: await aio.cancel_and_wait(self._task) await self._metrics_task if self._llm_request_span: self._llm_request_span.end() self._llm_request_span = None
def to_str_iterable(self) ‑> AsyncIterable[str]
-
Expand source code
def to_str_iterable(self) -> AsyncIterable[str]: """ Convert the LLMStream to an async iterable of strings. This assumes the stream will not call any tools. """ async def _iterable() -> AsyncIterable[str]: async with self: async for chunk in self: if chunk.delta and chunk.delta.content: yield chunk.delta.content return _iterable()
Convert the LLMStream to an async iterable of strings. This assumes the stream will not call any tools.
class MessageGeneration (message_id: str,
text_stream: AsyncIterable[str],
audio_stream: AsyncIterable[rtc.AudioFrame],
modalities: "Awaitable[list[Literal['text', 'audio']]]")-
Expand source code
@dataclass class MessageGeneration: message_id: str text_stream: AsyncIterable[str] # could be io.TimedString audio_stream: AsyncIterable[rtc.AudioFrame] modalities: Awaitable[list[Literal["text", "audio"]]]
MessageGeneration(message_id: 'str', text_stream: 'AsyncIterable[str]', audio_stream: 'AsyncIterable[rtc.AudioFrame]', modalities: "Awaitable[list[Literal['text', 'audio']]]")
Instance variables
var audio_stream : AsyncIterable[AudioFrame]
var message_id : str
var modalities : Awaitable[list[typing.Literal['text', 'audio']]]
var text_stream : AsyncIterable[str]
class RawFunctionTool (*args, **kwargs)
-
Expand source code
@runtime_checkable class RawFunctionTool(Protocol): __livekit_raw_tool_info: _RawFunctionToolInfo def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
Base class for protocol classes.
Protocol classes are defined as::
class Proto(Protocol): def meth(self) -> int: ...
Such classes are primarily used with static type checkers that recognize structural subtyping (static duck-typing).
For example::
class C: def meth(self) -> int: return 0 def func(x: Proto) -> int: return x.meth() func(C()) # Passes static type check
See PEP 544 for details. Protocol classes decorated with @typing.runtime_checkable act as simple-minded runtime protocols that check only the presence of given attributes, ignoring their type signatures. Protocol classes can be generic, they are defined as::
class GenProto[T](Protocol): def meth(self) -> T: ...
Ancestors
- typing.Protocol
- typing.Generic
class RealtimeCapabilities (message_truncation: bool,
turn_detection: bool,
user_transcription: bool,
auto_tool_reply_generation: bool,
audio_output: bool)-
Expand source code
@dataclass class RealtimeCapabilities: message_truncation: bool turn_detection: bool user_transcription: bool auto_tool_reply_generation: bool audio_output: bool
RealtimeCapabilities(message_truncation: 'bool', turn_detection: 'bool', user_transcription: 'bool', auto_tool_reply_generation: 'bool', audio_output: 'bool')
Instance variables
var audio_output : bool
var auto_tool_reply_generation : bool
var message_truncation : bool
var turn_detection : bool
var user_transcription : bool
class RealtimeError (message: str)
-
Expand source code
class RealtimeError(Exception): def __init__(self, message: str) -> None: super().__init__(message)
Common base class for all non-exit exceptions.
Ancestors
- builtins.Exception
- builtins.BaseException
class RealtimeModel (*,
capabilities: RealtimeCapabilities)-
Expand source code
class RealtimeModel: def __init__(self, *, capabilities: RealtimeCapabilities) -> None: self._capabilities = capabilities self._label = f"{type(self).__module__}.{type(self).__name__}" @property def capabilities(self) -> RealtimeCapabilities: return self._capabilities @abstractmethod def session(self) -> RealtimeSession: ... @abstractmethod async def aclose(self) -> None: ... async def __aenter__(self) -> RealtimeModel: return self async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.aclose()
Subclasses
- livekit.plugins.aws.experimental.realtime.realtime_model.RealtimeModel
- RealtimeModel
- livekit.plugins.openai.realtime.realtime_model.RealtimeModel
Instance variables
prop capabilities : RealtimeCapabilities
-
Expand source code
@property def capabilities(self) -> RealtimeCapabilities: return self._capabilities
Methods
async def aclose(self) ‑> None
-
Expand source code
@abstractmethod async def aclose(self) -> None: ...
def session(self) ‑> livekit.agents.llm.realtime.RealtimeSession
-
Expand source code
@abstractmethod def session(self) -> RealtimeSession: ...
class RealtimeModelError (**data: Any)
-
Expand source code
class RealtimeModelError(BaseModel): model_config = ConfigDict(arbitrary_types_allowed=True) type: Literal["realtime_model_error"] = "realtime_model_error" timestamp: float label: str error: Exception = Field(..., exclude=True) recoverable: bool
Usage docs: https://docs.pydantic.dev/2.10/concepts/models/
A base class for creating Pydantic models.
Attributes
__class_vars__
- The names of the class variables defined on the model.
__private_attributes__
- Metadata about the private attributes of the model.
__signature__
- The synthesized
__init__
[Signature
][inspect.Signature] of the model. __pydantic_complete__
- Whether model building is completed, or if there are still undefined fields.
__pydantic_core_schema__
- The core schema of the model.
__pydantic_custom_init__
- Whether the model has a custom
__init__
function. __pydantic_decorators__
- Metadata containing the decorators defined on the model.
This replaces
Model.__validators__
andModel.__root_validators__
from Pydantic V1. __pydantic_generic_metadata__
- Metadata for generic models; contains data used for a similar purpose to args, origin, parameters in typing-module generics. May eventually be replaced by these.
__pydantic_parent_namespace__
- Parent namespace of the model, used for automatic rebuilding of models.
__pydantic_post_init__
- The name of the post-init method for the model, if defined.
__pydantic_root_model__
- Whether the model is a [
RootModel
][pydantic.root_model.RootModel]. __pydantic_serializer__
- The
pydantic-core
SchemaSerializer
used to dump instances of the model. __pydantic_validator__
- The
pydantic-core
SchemaValidator
used to validate instances of the model. __pydantic_fields__
- A dictionary of field names and their corresponding [
FieldInfo
][pydantic.fields.FieldInfo] objects. __pydantic_computed_fields__
- A dictionary of computed field names and their corresponding [
ComputedFieldInfo
][pydantic.fields.ComputedFieldInfo] objects. __pydantic_extra__
- A dictionary containing extra values, if [
extra
][pydantic.config.ConfigDict.extra] is set to'allow'
. __pydantic_fields_set__
- The names of fields explicitly set during instantiation.
__pydantic_private__
- Values of private attributes set on the model instance.
Create a new model by parsing and validating input data from keyword arguments.
Raises [
ValidationError
][pydantic_core.ValidationError] if the input data cannot be validated to form a valid model.self
is explicitly positional-only to allowself
as a field name.Ancestors
- pydantic.main.BaseModel
Class variables
var error : Exception
var label : str
var model_config
var recoverable : bool
var timestamp : float
var type : Literal['realtime_model_error']
class RealtimeSession (realtime_model: RealtimeModel)
-
Expand source code
class RealtimeSession(ABC, rtc.EventEmitter[Union[EventTypes, TEvent]], Generic[TEvent]): def __init__(self, realtime_model: RealtimeModel) -> None: super().__init__() self._realtime_model = realtime_model @property def realtime_model(self) -> RealtimeModel: return self._realtime_model @property @abstractmethod def chat_ctx(self) -> ChatContext: ... @property @abstractmethod def tools(self) -> ToolContext: ... @abstractmethod async def update_instructions(self, instructions: str) -> None: ... @abstractmethod async def update_chat_ctx( self, chat_ctx: ChatContext ) -> None: ... # can raise RealtimeError on Timeout @abstractmethod async def update_tools(self, tools: list[FunctionTool | RawFunctionTool | Any]) -> None: ... @abstractmethod def update_options(self, *, tool_choice: NotGivenOr[ToolChoice | None] = NOT_GIVEN) -> None: ... @abstractmethod def push_audio(self, frame: rtc.AudioFrame) -> None: ... @abstractmethod def push_video(self, frame: rtc.VideoFrame) -> None: ... @abstractmethod def generate_reply( self, *, instructions: NotGivenOr[str] = NOT_GIVEN, ) -> asyncio.Future[GenerationCreatedEvent]: ... # can raise RealtimeError on Timeout # commit the input audio buffer to the server @abstractmethod def commit_audio(self) -> None: ... # clear the input audio buffer to the server @abstractmethod def clear_audio(self) -> None: ... # cancel the current generation (do nothing if no generation is in progress) @abstractmethod def interrupt(self) -> None: ... # message_id is the ID of the message to truncate (inside the ChatCtx) @abstractmethod def truncate( self, *, message_id: str, modalities: list[Literal["text", "audio"]], audio_end_ms: int, audio_transcript: NotGivenOr[str] = NOT_GIVEN, ) -> None: ... @abstractmethod async def aclose(self) -> None: ... def start_user_activity(self) -> None: """notifies the model that user activity has started""" pass
Helper class that provides a standard way to create an ABC using inheritance.
Ancestors
- abc.ABC
- EventEmitter
- typing.Generic
Subclasses
- livekit.plugins.aws.experimental.realtime.realtime_model.RealtimeSession
- RealtimeSession
- livekit.plugins.openai.realtime.realtime_model.RealtimeSession
Instance variables
prop chat_ctx : ChatContext
-
Expand source code
@property @abstractmethod def chat_ctx(self) -> ChatContext: ...
prop realtime_model : RealtimeModel
-
Expand source code
@property def realtime_model(self) -> RealtimeModel: return self._realtime_model
prop tools : ToolContext
-
Expand source code
@property @abstractmethod def tools(self) -> ToolContext: ...
Methods
async def aclose(self) ‑> None
-
Expand source code
@abstractmethod async def aclose(self) -> None: ...
def clear_audio(self) ‑> None
-
Expand source code
@abstractmethod def clear_audio(self) -> None: ...
def commit_audio(self) ‑> None
-
Expand source code
@abstractmethod def commit_audio(self) -> None: ...
def generate_reply(self, *, instructions: NotGivenOr[str] = NOT_GIVEN) ‑> _asyncio.Future[livekit.agents.llm.realtime.GenerationCreatedEvent]
-
Expand source code
@abstractmethod def generate_reply( self, *, instructions: NotGivenOr[str] = NOT_GIVEN, ) -> asyncio.Future[GenerationCreatedEvent]: ... # can raise RealtimeError on Timeout
def interrupt(self) ‑> None
-
Expand source code
@abstractmethod def interrupt(self) -> None: ...
def push_audio(self, frame: rtc.AudioFrame) ‑> None
-
Expand source code
@abstractmethod def push_audio(self, frame: rtc.AudioFrame) -> None: ...
def push_video(self, frame: rtc.VideoFrame) ‑> None
-
Expand source code
@abstractmethod def push_video(self, frame: rtc.VideoFrame) -> None: ...
def start_user_activity(self) ‑> None
-
Expand source code
def start_user_activity(self) -> None: """notifies the model that user activity has started""" pass
notifies the model that user activity has started
def truncate(self,
*,
message_id: str,
modalities: "list[Literal['text', 'audio']]",
audio_end_ms: int,
audio_transcript: NotGivenOr[str] = NOT_GIVEN) ‑> None-
Expand source code
@abstractmethod def truncate( self, *, message_id: str, modalities: list[Literal["text", "audio"]], audio_end_ms: int, audio_transcript: NotGivenOr[str] = NOT_GIVEN, ) -> None: ...
async def update_chat_ctx(self,
chat_ctx: ChatContext) ‑> None-
Expand source code
@abstractmethod async def update_chat_ctx( self, chat_ctx: ChatContext ) -> None: ... # can raise RealtimeError on Timeout
async def update_instructions(self, instructions: str) ‑> None
-
Expand source code
@abstractmethod async def update_instructions(self, instructions: str) -> None: ...
def update_options(self, *, tool_choice: NotGivenOr[ToolChoice | None] = NOT_GIVEN) ‑> None
-
Expand source code
@abstractmethod def update_options(self, *, tool_choice: NotGivenOr[ToolChoice | None] = NOT_GIVEN) -> None: ...
async def update_tools(self,
tools: list[FunctionTool | RawFunctionTool | Any]) ‑> None-
Expand source code
@abstractmethod async def update_tools(self, tools: list[FunctionTool | RawFunctionTool | Any]) -> None: ...
Inherited members
class RealtimeSessionReconnectedEvent
-
Expand source code
@dataclass class RealtimeSessionReconnectedEvent: pass
RealtimeSessionReconnectedEvent()
class StopResponse
-
Expand source code
class StopResponse(Exception): def __init__(self) -> None: """ Exception raised within AI functions. This exception can be raised by the user to indicate that the agent should not generate a response for the current function call. """ super().__init__()
Common base class for all non-exit exceptions.
Exception raised within AI functions.
This exception can be raised by the user to indicate that the agent should not generate a response for the current function call.
Ancestors
- builtins.Exception
- builtins.BaseException
class ToolContext (tools: list[FunctionTool | RawFunctionTool])
-
Expand source code
class ToolContext: """Stateless container for a set of AI functions""" def __init__(self, tools: list[FunctionTool | RawFunctionTool]) -> None: self.update_tools(tools) @classmethod def empty(cls) -> ToolContext: return cls([]) @property def function_tools(self) -> dict[str, FunctionTool | RawFunctionTool]: return self._tools_map.copy() def update_tools(self, tools: list[FunctionTool | RawFunctionTool]) -> None: self._tools = tools.copy() for method in find_function_tools(self): tools.append(method) self._tools_map: dict[str, FunctionTool | RawFunctionTool] = {} info: _FunctionToolInfo | _RawFunctionToolInfo for tool in tools: if is_raw_function_tool(tool): info = get_raw_function_info(tool) elif is_function_tool(tool): info = get_function_info(tool) else: # TODO(theomonnom): MCP servers & other tools raise ValueError(f"unknown tool type: {type(tool)}") if info.name in self._tools_map: raise ValueError(f"duplicate function name: {info.name}") self._tools_map[info.name] = tool def copy(self) -> ToolContext: return ToolContext(self._tools.copy())
Stateless container for a set of AI functions
Static methods
def empty() ‑> livekit.agents.llm.tool_context.ToolContext
Instance variables
prop function_tools : dict[str, FunctionTool | RawFunctionTool]
-
Expand source code
@property def function_tools(self) -> dict[str, FunctionTool | RawFunctionTool]: return self._tools_map.copy()
Methods
def copy(self) ‑> livekit.agents.llm.tool_context.ToolContext
-
Expand source code
def copy(self) -> ToolContext: return ToolContext(self._tools.copy())
def update_tools(self,
tools: list[FunctionTool | RawFunctionTool]) ‑> None-
Expand source code
def update_tools(self, tools: list[FunctionTool | RawFunctionTool]) -> None: self._tools = tools.copy() for method in find_function_tools(self): tools.append(method) self._tools_map: dict[str, FunctionTool | RawFunctionTool] = {} info: _FunctionToolInfo | _RawFunctionToolInfo for tool in tools: if is_raw_function_tool(tool): info = get_raw_function_info(tool) elif is_function_tool(tool): info = get_function_info(tool) else: # TODO(theomonnom): MCP servers & other tools raise ValueError(f"unknown tool type: {type(tool)}") if info.name in self._tools_map: raise ValueError(f"duplicate function name: {info.name}") self._tools_map[info.name] = tool
class ToolError (message: str)
-
Expand source code
class ToolError(Exception): def __init__(self, message: str) -> None: """ Exception raised within AI functions. This exception should be raised by users when an error occurs in the context of AI operations. The provided message will be visible to the LLM, allowing it to understand the context of the error during FunctionOutput generation. """ super().__init__(message) self._message = message @property def message(self) -> str: return self._message
Common base class for all non-exit exceptions.
Exception raised within AI functions.
This exception should be raised by users when an error occurs in the context of AI operations. The provided message will be visible to the LLM, allowing it to understand the context of the error during FunctionOutput generation.
Ancestors
- builtins.Exception
- builtins.BaseException
Instance variables
prop message : str
-
Expand source code
@property def message(self) -> str: return self._message