Module livekit.plugins.mistralai
LiveKit plugin for Mistral AI models. Supports Chat and STT models
Classes
class LLM (model: str | ChatModels = 'ministral-8b-2410',
api_key: str | None = None,
client: Mistral | None = None,
temperature: NotGivenOr[float] = NOT_GIVEN,
max_completion_tokens: NotGivenOr[int] = NOT_GIVEN,
timeout: httpx.Timeout | None = None)-
Expand source code
class LLM(llm.LLM): def __init__( self, model: str | ChatModels = "ministral-8b-2410", api_key: str | None = None, client: Mistral | None = None, temperature: NotGivenOr[float] = NOT_GIVEN, max_completion_tokens: NotGivenOr[int] = NOT_GIVEN, timeout: httpx.Timeout | None = None, ) -> None: super().__init__() self._opts = _LLMOptions( model=model, temperature=temperature, max_completion_tokens=max_completion_tokens, ) self._client = Mistral(api_key=api_key or os.environ.get("MISTRAL_API_KEY")) @property def model(self) -> str: return self._opts.model def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool | RawFunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, response_format: NotGivenOr[type[llm_utils.ResponseFormatT]] = NOT_GIVEN, extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN, ) -> LLMStream: extra: dict[str, Any] = {} if is_given(self._opts.max_completion_tokens): extra["max_completion_tokens"] = self._opts.max_completion_tokens if is_given(self._opts.temperature): extra["temperature"] = self._opts.temperature if is_given(parallel_tool_calls): extra["parallel_tool_calls"] = parallel_tool_calls if is_given(tool_choice): extra["tool_choice"] = tool_choice if is_given(response_format): extra["response_format"] = response_format return LLMStream( self, model=self._opts.model, client=self._client, chat_ctx=chat_ctx, tools=tools or [], conn_options=conn_options, extra_kwargs=extra, )
Helper class that provides a standard way to create an ABC using inheritance.
Ancestors
- livekit.agents.llm.llm.LLM
- abc.ABC
- EventEmitter
- typing.Generic
Instance variables
prop model : str
-
Expand source code
@property def model(self) -> str: return self._opts.model
Get the model name/identifier for this LLM instance.
Returns
The model name if available, "unknown" otherwise.
Note
Plugins should override this property to provide their model information.
Methods
def chat(self,
*,
chat_ctx: ChatContext,
tools: list[FunctionTool | RawFunctionTool] | None = None,
conn_options: APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0),
parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN,
tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
response_format: NotGivenOr[type[llm_utils.ResponseFormatT]] = NOT_GIVEN,
extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN) ‑> livekit.plugins.mistralai.llm.LLMStream-
Expand source code
def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool | RawFunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, parallel_tool_calls: NotGivenOr[bool] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, response_format: NotGivenOr[type[llm_utils.ResponseFormatT]] = NOT_GIVEN, extra_kwargs: NotGivenOr[dict[str, Any]] = NOT_GIVEN, ) -> LLMStream: extra: dict[str, Any] = {} if is_given(self._opts.max_completion_tokens): extra["max_completion_tokens"] = self._opts.max_completion_tokens if is_given(self._opts.temperature): extra["temperature"] = self._opts.temperature if is_given(parallel_tool_calls): extra["parallel_tool_calls"] = parallel_tool_calls if is_given(tool_choice): extra["tool_choice"] = tool_choice if is_given(response_format): extra["response_format"] = response_format return LLMStream( self, model=self._opts.model, client=self._client, chat_ctx=chat_ctx, tools=tools or [], conn_options=conn_options, extra_kwargs=extra, )
Inherited members
class STT (*,
language: str = 'en',
model: STTModels | str = 'voxtral-mini-latest',
api_key: NotGivenOr[str] = NOT_GIVEN,
client: Mistral | None = None)-
Expand source code
class STT(stt.STT): def __init__( self, *, language: str = "en", model: STTModels | str = "voxtral-mini-latest", api_key: NotGivenOr[str] = NOT_GIVEN, client: Mistral | None = None, ): """ Create a new instance of MistralAI STT. Args: language: The language code to use for transcription (e.g., "en" for English). model: The MistralAI model to use for transcription, default is voxtral-mini-latest. api_key: Your MistralAI API key. If not provided, will use the MISTRAL_API_KEY environment variable. client: Optional pre-configured MistralAI client instance. """ super().__init__(capabilities=stt.STTCapabilities(streaming=False, interim_results=False)) self._opts = _STTOptions( language=language, model=model, ) self._client = client or Mistral( api_key=api_key if is_given(api_key) else os.environ.get("MISTRAL_API_KEY"), ) def update_options( self, *, model: NotGivenOr[STTModels | str] = NOT_GIVEN, language: NotGivenOr[str] = NOT_GIVEN, ) -> None: """ Update the options for the STT. Args: language: The language to transcribe in. detect_language: Whether to automatically detect the language. model: The model to use for transcription. """ if is_given(model): self._opts.model = model if is_given(language): self._opts.language = language async def _recognize_impl( self, buffer: AudioBuffer, *, language: NotGivenOr[str] = NOT_GIVEN, conn_options: APIConnectOptions, ) -> stt.SpeechEvent: try: if is_given(language): self._opts.language = language data = rtc.combine_audio_frames(buffer).to_wav_bytes() # MistralAI transcription API call resp = await self._client.audio.transcriptions.complete_async( model=self._opts.model, file={"content": data, "file_name": "audio.wav"}, language=self._opts.language if self._opts.language else None, ) return stt.SpeechEvent( type=stt.SpeechEventType.FINAL_TRANSCRIPT, alternatives=[ stt.SpeechData(text=resp.text, language=self._opts.language), ], ) except SDKError as e: if e.status_code in (408, 504): # Request Timeout, Gateway Timeout raise APITimeoutError() from e else: raise APIStatusError(e.message, status_code=e.status_code, body=e.body) from e except Exception as e: raise APIConnectionError() from e
Helper class that provides a standard way to create an ABC using inheritance.
Create a new instance of MistralAI STT.
Args
language
- The language code to use for transcription (e.g., "en" for English).
model
- The MistralAI model to use for transcription, default is voxtral-mini-latest.
api_key
- Your MistralAI API key. If not provided, will use the MISTRAL_API_KEY environment variable.
client
- Optional pre-configured MistralAI client instance.
Ancestors
- livekit.agents.stt.stt.STT
- abc.ABC
- EventEmitter
- typing.Generic
Methods
def update_options(self,
*,
model: NotGivenOr[STTModels | str] = NOT_GIVEN,
language: NotGivenOr[str] = NOT_GIVEN) ‑> None-
Expand source code
def update_options( self, *, model: NotGivenOr[STTModels | str] = NOT_GIVEN, language: NotGivenOr[str] = NOT_GIVEN, ) -> None: """ Update the options for the STT. Args: language: The language to transcribe in. detect_language: Whether to automatically detect the language. model: The model to use for transcription. """ if is_given(model): self._opts.model = model if is_given(language): self._opts.language = language
Update the options for the STT.
Args
language
- The language to transcribe in.
detect_language
- Whether to automatically detect the language.
model
- The model to use for transcription.
Inherited members