Module livekit.plugins.aws
AWS plugin for LiveKit Agents
Support for AWS AI including Bedrock, Polly, and Transcribe.
See https://docs.livekit.io/agents/integrations/aws/ for more information.
Classes
class ChunkedStream (*,
tts: TTS,
text: str,
session: aioboto3.Session,
conn_options: APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0),
opts: _TTSOptions)-
Expand source code
class ChunkedStream(tts.ChunkedStream): def __init__( self, *, tts: TTS, text: str, session: aioboto3.Session, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, opts: _TTSOptions, ) -> None: super().__init__(tts=tts, input_text=text, conn_options=conn_options) self._opts = opts self._segment_id = utils.shortuuid() self._session = session async def _run(self): request_id = utils.shortuuid() try: async with self._session.client("polly") as client: params = { "Text": self._input_text, "OutputFormat": "mp3", "Engine": self._opts.speech_engine if is_given(self._opts.speech_engine) else DEFAULT_SPEECH_ENGINE, "VoiceId": self._opts.voice if is_given(self._opts.voice) else DEFAULT_VOICE, "TextType": "text", "SampleRate": str(self._opts.sample_rate), "LanguageCode": self._opts.language if is_given(self._opts.language) else None, } response = await client.synthesize_speech(**_strip_nones(params)) if "AudioStream" in response: decoder = utils.codecs.AudioStreamDecoder( sample_rate=self._opts.sample_rate, num_channels=1, ) # Create a task to push data to the decoder async def push_data(): try: async with response["AudioStream"] as resp: async for data, _ in resp.content.iter_chunks(): decoder.push(data) finally: decoder.end_input() # Start pushing data to the decoder push_task = asyncio.create_task(push_data()) try: # Create emitter and process decoded frames emitter = tts.SynthesizedAudioEmitter( event_ch=self._event_ch, request_id=request_id, segment_id=self._segment_id, ) async for frame in decoder: emitter.push(frame) emitter.flush() await push_task finally: await utils.aio.gracefully_cancel(push_task) except asyncio.TimeoutError: raise APITimeoutError() from None except aiohttp.ClientResponseError as e: raise APIStatusError( message=e.message, status_code=e.status, request_id=request_id, body=None, ) from None except Exception as e: raise APIConnectionError() from e
Used by the non-streamed synthesize API, some providers support chunked http responses
Ancestors
- livekit.agents.tts.tts.ChunkedStream
- abc.ABC
class LLM (*,
model: NotGivenOr[str | TEXT_MODEL] = NOT_GIVEN,
api_key: NotGivenOr[str] = NOT_GIVEN,
api_secret: NotGivenOr[str] = NOT_GIVEN,
region: NotGivenOr[str] = NOT_GIVEN,
temperature: NotGivenOr[float] = NOT_GIVEN,
max_output_tokens: NotGivenOr[int] = NOT_GIVEN,
top_p: NotGivenOr[float] = NOT_GIVEN,
tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN,
additional_request_fields: NotGivenOr[dict[str, Any]] = NOT_GIVEN,
session: aioboto3.Session | None = None)-
Expand source code
class LLM(llm.LLM): def __init__( self, *, model: NotGivenOr[str | TEXT_MODEL] = NOT_GIVEN, api_key: NotGivenOr[str] = NOT_GIVEN, api_secret: NotGivenOr[str] = NOT_GIVEN, region: NotGivenOr[str] = NOT_GIVEN, temperature: NotGivenOr[float] = NOT_GIVEN, max_output_tokens: NotGivenOr[int] = NOT_GIVEN, top_p: NotGivenOr[float] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, additional_request_fields: NotGivenOr[dict[str, Any]] = NOT_GIVEN, session: aioboto3.Session | None = None, ) -> None: """ Create a new instance of AWS Bedrock LLM. ``api_key`` and ``api_secret`` must be set to your AWS Access key id and secret access key, either using the argument or by setting the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environmental variables. See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-runtime/client/converse_stream.html for more details on the AWS Bedrock Runtime API. Args: model (TEXT_MODEL, optional): model or inference profile arn to use(https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-use.html). Defaults to 'anthropic.claude-3-5-sonnet-20240620-v1:0'. api_key(str, optional): AWS access key id. api_secret(str, optional): AWS secret access key region (str, optional): The region to use for AWS API requests. Defaults value is "us-east-1". temperature (float, optional): Sampling temperature for response generation. Defaults to 0.8. max_output_tokens (int, optional): Maximum number of tokens to generate in the output. Defaults to None. top_p (float, optional): The nucleus sampling probability for response generation. Defaults to None. tool_choice (ToolChoice, optional): Specifies whether to use tools during response generation. Defaults to "auto". additional_request_fields (dict[str, Any], optional): Additional request fields to send to the AWS Bedrock Converse API. Defaults to None. session (aioboto3.Session, optional): Optional aioboto3 session to use. """ # noqa: E501 super().__init__() self._session = session or get_aws_async_session( api_key=api_key if is_given(api_key) else None, api_secret=api_secret if is_given(api_secret) else None, region=region if is_given(region) else None, ) model = model if is_given(model) else os.environ.get("BEDROCK_INFERENCE_PROFILE_ARN") if not model: raise ValueError( "model or inference profile arn must be set using the argument or by setting the BEDROCK_INFERENCE_PROFILE_ARN environment variable." # noqa: E501 ) self._opts = _LLMOptions( model=model, temperature=temperature, tool_choice=tool_choice, max_output_tokens=max_output_tokens, top_p=top_p, additional_request_fields=additional_request_fields, ) def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, temperature: NotGivenOr[float] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, ) -> LLMStream: opts = {} if is_given(self._opts.model): opts["modelId"] = self._opts.model def _get_tool_config() -> dict[str, Any] | None: nonlocal tool_choice if not tools: return None tool_config: dict[str, Any] = {"tools": to_fnc_ctx(tools)} tool_choice = tool_choice if is_given(tool_choice) else self._opts.tool_choice if is_given(tool_choice): if isinstance(tool_choice, dict) and tool_choice.get("type") == "function": tool_config["toolChoice"] = {"tool": {"name": tool_choice["function"]["name"]}} elif tool_choice == "required": tool_config["toolChoice"] = {"any": {}} elif tool_choice == "auto": tool_config["toolChoice"] = {"auto": {}} else: return None return tool_config tool_config = _get_tool_config() if tool_config: opts["toolConfig"] = tool_config messages, system_message = to_chat_ctx(chat_ctx, id(self)) opts["messages"] = messages if system_message: opts["system"] = [system_message] inference_config = {} if is_given(self._opts.max_output_tokens): inference_config["maxTokens"] = self._opts.max_output_tokens temperature = temperature if is_given(temperature) else self._opts.temperature if is_given(temperature): inference_config["temperature"] = temperature if is_given(self._opts.top_p): inference_config["topP"] = self._opts.top_p opts["inferenceConfig"] = inference_config if is_given(self._opts.additional_request_fields): opts["additionalModelRequestFields"] = self._opts.additional_request_fields return LLMStream( self, chat_ctx=chat_ctx, tools=tools, session=self._session, conn_options=conn_options, extra_kwargs=opts, )
Helper class that provides a standard way to create an ABC using inheritance.
Create a new instance of AWS Bedrock LLM.
api_key
andapi_secret
must be set to your AWS Access key id and secret access key, either using the argument or by setting theAWS_ACCESS_KEY_ID
andAWS_SECRET_ACCESS_KEY
environmental variables.See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/bedrock-runtime/client/converse_stream.html for more details on the AWS Bedrock Runtime API.
Args
model
:TEXT_MODEL
, optional- model or inference profile arn to use(https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-use.html). Defaults to 'anthropic.claude-3-5-sonnet-20240620-v1:0'.
- api_key(str, optional): AWS access key id.
- api_secret(str, optional): AWS secret access key
region
:str
, optional- The region to use for AWS API requests. Defaults value is "us-east-1".
temperature
:float
, optional- Sampling temperature for response generation. Defaults to 0.8.
max_output_tokens
:int
, optional- Maximum number of tokens to generate in the output. Defaults to None.
top_p
:float
, optional- The nucleus sampling probability for response generation. Defaults to None.
tool_choice
:ToolChoice
, optional- Specifies whether to use tools during response generation. Defaults to "auto".
additional_request_fields
:dict[str, Any]
, optional- Additional request fields to send to the AWS Bedrock Converse API. Defaults to None.
session
:aioboto3.Session
, optional- Optional aioboto3 session to use.
Ancestors
- livekit.agents.llm.llm.LLM
- abc.ABC
- EventEmitter
- typing.Generic
Methods
def chat(self,
*,
chat_ctx: ChatContext,
tools: list[FunctionTool] | None = None,
conn_options: APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0),
temperature: NotGivenOr[float] = NOT_GIVEN,
tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN) ‑> livekit.plugins.aws.llm.LLMStream-
Expand source code
def chat( self, *, chat_ctx: ChatContext, tools: list[FunctionTool] | None = None, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, temperature: NotGivenOr[float] = NOT_GIVEN, tool_choice: NotGivenOr[ToolChoice] = NOT_GIVEN, ) -> LLMStream: opts = {} if is_given(self._opts.model): opts["modelId"] = self._opts.model def _get_tool_config() -> dict[str, Any] | None: nonlocal tool_choice if not tools: return None tool_config: dict[str, Any] = {"tools": to_fnc_ctx(tools)} tool_choice = tool_choice if is_given(tool_choice) else self._opts.tool_choice if is_given(tool_choice): if isinstance(tool_choice, dict) and tool_choice.get("type") == "function": tool_config["toolChoice"] = {"tool": {"name": tool_choice["function"]["name"]}} elif tool_choice == "required": tool_config["toolChoice"] = {"any": {}} elif tool_choice == "auto": tool_config["toolChoice"] = {"auto": {}} else: return None return tool_config tool_config = _get_tool_config() if tool_config: opts["toolConfig"] = tool_config messages, system_message = to_chat_ctx(chat_ctx, id(self)) opts["messages"] = messages if system_message: opts["system"] = [system_message] inference_config = {} if is_given(self._opts.max_output_tokens): inference_config["maxTokens"] = self._opts.max_output_tokens temperature = temperature if is_given(temperature) else self._opts.temperature if is_given(temperature): inference_config["temperature"] = temperature if is_given(self._opts.top_p): inference_config["topP"] = self._opts.top_p opts["inferenceConfig"] = inference_config if is_given(self._opts.additional_request_fields): opts["additionalModelRequestFields"] = self._opts.additional_request_fields return LLMStream( self, chat_ctx=chat_ctx, tools=tools, session=self._session, conn_options=conn_options, extra_kwargs=opts, )
Inherited members
class STT (*,
region: NotGivenOr[str] = NOT_GIVEN,
api_key: NotGivenOr[str] = NOT_GIVEN,
api_secret: NotGivenOr[str] = NOT_GIVEN,
sample_rate: int = 48000,
language: str = 'en-US',
encoding: str = 'pcm',
vocabulary_name: NotGivenOr[str] = NOT_GIVEN,
session_id: NotGivenOr[str] = NOT_GIVEN,
vocab_filter_method: NotGivenOr[str] = NOT_GIVEN,
vocab_filter_name: NotGivenOr[str] = NOT_GIVEN,
show_speaker_label: NotGivenOr[bool] = NOT_GIVEN,
enable_channel_identification: NotGivenOr[bool] = NOT_GIVEN,
number_of_channels: NotGivenOr[int] = NOT_GIVEN,
enable_partial_results_stabilization: NotGivenOr[bool] = NOT_GIVEN,
partial_results_stability: NotGivenOr[str] = NOT_GIVEN,
language_model_name: NotGivenOr[str] = NOT_GIVEN,
session: aioboto3.Session | None = None,
refresh_interval: NotGivenOr[int] = NOT_GIVEN)-
Expand source code
class STT(stt.STT): def __init__( self, *, region: NotGivenOr[str] = NOT_GIVEN, api_key: NotGivenOr[str] = NOT_GIVEN, api_secret: NotGivenOr[str] = NOT_GIVEN, sample_rate: int = 48000, language: str = "en-US", encoding: str = "pcm", vocabulary_name: NotGivenOr[str] = NOT_GIVEN, session_id: NotGivenOr[str] = NOT_GIVEN, vocab_filter_method: NotGivenOr[str] = NOT_GIVEN, vocab_filter_name: NotGivenOr[str] = NOT_GIVEN, show_speaker_label: NotGivenOr[bool] = NOT_GIVEN, enable_channel_identification: NotGivenOr[bool] = NOT_GIVEN, number_of_channels: NotGivenOr[int] = NOT_GIVEN, enable_partial_results_stabilization: NotGivenOr[bool] = NOT_GIVEN, partial_results_stability: NotGivenOr[str] = NOT_GIVEN, language_model_name: NotGivenOr[str] = NOT_GIVEN, session: aioboto3.Session | None = None, refresh_interval: NotGivenOr[int] = NOT_GIVEN, ): super().__init__(capabilities=stt.STTCapabilities(streaming=True, interim_results=True)) self._region = region if is_given(region) else DEFAULT_REGION self._session = session or get_aws_async_session( api_key=api_key if is_given(api_key) else None, api_secret=api_secret if is_given(api_secret) else None, region=self._region, ) self._config = STTOptions( language=language, sample_rate=sample_rate, encoding=encoding, vocabulary_name=vocabulary_name, session_id=session_id, vocab_filter_method=vocab_filter_method, vocab_filter_name=vocab_filter_name, show_speaker_label=show_speaker_label, enable_channel_identification=enable_channel_identification, number_of_channels=number_of_channels, enable_partial_results_stabilization=enable_partial_results_stabilization, partial_results_stability=partial_results_stability, language_model_name=language_model_name, ) self._pool = utils.ConnectionPool[TranscribeStreamingClient]( connect_cb=self._create_client, max_session_duration=refresh_interval if is_given(refresh_interval) else REFRESH_INTERVAL, ) async def _create_client(self) -> TranscribeStreamingClient: creds = await self._session.get_credentials() frozen_credentials = await creds.get_frozen_credentials() return TranscribeStreamingClient( region=self._region, credential_resolver=StaticCredentialResolver( access_key_id=frozen_credentials.access_key, secret_access_key=frozen_credentials.secret_key, session_token=frozen_credentials.token, ), ) async def aclose(self) -> None: await self._pool.aclose() await super().aclose() async def _recognize_impl( self, buffer: utils.AudioBuffer, *, language: NotGivenOr[str] = NOT_GIVEN, conn_options: APIConnectOptions, ) -> stt.SpeechEvent: raise NotImplementedError("Amazon Transcribe does not support single frame recognition") def stream( self, *, language: NotGivenOr[str] = NOT_GIVEN, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, ) -> SpeechStream: return SpeechStream( stt=self, pool=self._pool, conn_options=conn_options, opts=self._config, )
Helper class that provides a standard way to create an ABC using inheritance.
Ancestors
- livekit.agents.stt.stt.STT
- abc.ABC
- EventEmitter
- typing.Generic
Methods
async def aclose(self) ‑> None
-
Expand source code
async def aclose(self) -> None: await self._pool.aclose() await super().aclose()
Close the STT, and every stream/requests associated with it
def stream(self,
*,
language: NotGivenOr[str] = NOT_GIVEN,
conn_options: APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0)) ‑> livekit.plugins.aws.stt.SpeechStream-
Expand source code
def stream( self, *, language: NotGivenOr[str] = NOT_GIVEN, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, ) -> SpeechStream: return SpeechStream( stt=self, pool=self._pool, conn_options=conn_options, opts=self._config, )
Inherited members
class SpeechStream (stt: STT,
opts: STTOptions,
pool: utils.ConnectionPool[TranscribeStreamingClient],
conn_options: APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0))-
Expand source code
class SpeechStream(stt.SpeechStream): def __init__( self, stt: STT, opts: STTOptions, pool: utils.ConnectionPool[TranscribeStreamingClient], conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, ) -> None: super().__init__(stt=stt, conn_options=conn_options, sample_rate=opts.sample_rate) self._opts = opts self._pool = pool async def _run(self) -> None: async with self._pool.connection() as client: live_config = { "language_code": self._opts.language, "media_sample_rate_hz": self._opts.sample_rate, "media_encoding": self._opts.encoding, "vocabulary_name": self._opts.vocabulary_name, "session_id": self._opts.session_id, "vocab_filter_method": self._opts.vocab_filter_method, "vocab_filter_name": self._opts.vocab_filter_name, "show_speaker_label": self._opts.show_speaker_label, "enable_channel_identification": self._opts.enable_channel_identification, "number_of_channels": self._opts.number_of_channels, "enable_partial_results_stabilization": self._opts.enable_partial_results_stabilization, # noqa: E501 "partial_results_stability": self._opts.partial_results_stability, "language_model_name": self._opts.language_model_name, } filtered_config = {k: v for k, v in live_config.items() if v and is_given(v)} stream = await client.start_stream_transcription(**filtered_config) @utils.log_exceptions(logger=logger) async def input_generator(): async for frame in self._input_ch: if isinstance(frame, rtc.AudioFrame): await stream.input_stream.send_audio_event(audio_chunk=frame.data.tobytes()) await stream.input_stream.end_stream() @utils.log_exceptions(logger=logger) async def handle_transcript_events(): async for event in stream.output_stream: if isinstance(event, TranscriptEvent): self._process_transcript_event(event) tasks = [ asyncio.create_task(input_generator()), asyncio.create_task(handle_transcript_events()), ] try: await asyncio.gather(*tasks) finally: await utils.aio.gracefully_cancel(*tasks) def _process_transcript_event(self, transcript_event: TranscriptEvent): stream = transcript_event.transcript.results for resp in stream: if resp.start_time and resp.start_time == 0.0: self._event_ch.send_nowait( stt.SpeechEvent(type=stt.SpeechEventType.START_OF_SPEECH) ) if resp.end_time and resp.end_time > 0.0: if resp.is_partial: self._event_ch.send_nowait( stt.SpeechEvent( type=stt.SpeechEventType.INTERIM_TRANSCRIPT, alternatives=[_streaming_recognize_response_to_speech_data(resp)], ) ) else: self._event_ch.send_nowait( stt.SpeechEvent( type=stt.SpeechEventType.FINAL_TRANSCRIPT, alternatives=[_streaming_recognize_response_to_speech_data(resp)], ) ) if not resp.is_partial: self._event_ch.send_nowait(stt.SpeechEvent(type=stt.SpeechEventType.END_OF_SPEECH))
Helper class that provides a standard way to create an ABC using inheritance.
Args: sample_rate : int or None, optional The desired sample rate for the audio input. If specified, the audio input will be automatically resampled to match the given sample rate before being processed for Speech-to-Text. If not provided (None), the input will retain its original sample rate.
Ancestors
- livekit.agents.stt.stt.RecognizeStream
- abc.ABC
class TTS (*,
voice: NotGivenOr[str] = NOT_GIVEN,
language: NotGivenOr[TTS_LANGUAGE | str] = NOT_GIVEN,
speech_engine: NotGivenOr[TTS_SPEECH_ENGINE] = NOT_GIVEN,
sample_rate: int = 16000,
region: NotGivenOr[str] = NOT_GIVEN,
api_key: NotGivenOr[str] = NOT_GIVEN,
api_secret: NotGivenOr[str] = NOT_GIVEN,
session: aioboto3.Session | None = None)-
Expand source code
class TTS(tts.TTS): def __init__( self, *, voice: NotGivenOr[str] = NOT_GIVEN, language: NotGivenOr[TTS_LANGUAGE | str] = NOT_GIVEN, speech_engine: NotGivenOr[TTS_SPEECH_ENGINE] = NOT_GIVEN, sample_rate: int = DEFAULT_SAMPLE_RATE, region: NotGivenOr[str] = NOT_GIVEN, api_key: NotGivenOr[str] = NOT_GIVEN, api_secret: NotGivenOr[str] = NOT_GIVEN, session: aioboto3.Session | None = None, ) -> None: """ Create a new instance of AWS Polly TTS. ``api_key`` and ``api_secret`` must be set to your AWS Access key id and secret access key, either using the argument or by setting the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environmental variables. See https://docs.aws.amazon.com/polly/latest/dg/API_SynthesizeSpeech.html for more details on the the AWS Polly TTS. Args: Voice (TTSModels, optional): Voice ID to use for the synthesis. Defaults to "Ruth". language (TTS_LANGUAGE, optional): language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN). sample_rate(int, optional): The audio frequency specified in Hz. Defaults to 16000. speech_engine(TTS_SPEECH_ENGINE, optional): The engine to use for the synthesis. Defaults to "generative". region(str, optional): The region to use for the synthesis. Defaults to "us-east-1". api_key(str, optional): AWS access key id. api_secret(str, optional): AWS secret access key. session(aioboto3.Session, optional): Optional aioboto3 session to use. """ # noqa: E501 super().__init__( capabilities=tts.TTSCapabilities( streaming=False, ), sample_rate=sample_rate, num_channels=TTS_NUM_CHANNELS, ) self._session = session or get_aws_async_session( api_key=api_key if is_given(api_key) else None, api_secret=api_secret if is_given(api_secret) else None, region=region if is_given(region) else None, ) self._opts = _TTSOptions( voice=voice, speech_engine=speech_engine, region=region, language=language, sample_rate=sample_rate, ) def synthesize( self, text: str, *, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, ) -> ChunkedStream: return ChunkedStream( tts=self, text=text, conn_options=conn_options, session=self._session, opts=self._opts, )
Helper class that provides a standard way to create an ABC using inheritance.
Create a new instance of AWS Polly TTS.
api_key
andapi_secret
must be set to your AWS Access key id and secret access key, either using the argument or by setting theAWS_ACCESS_KEY_ID
andAWS_SECRET_ACCESS_KEY
environmental variables.See https://docs.aws.amazon.com/polly/latest/dg/API_SynthesizeSpeech.html for more details on the the AWS Polly TTS.
Args
Voice
:TTSModels
, optional- Voice ID to use for the synthesis. Defaults to "Ruth".
language
:TTS_LANGUAGE
, optional- language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).
sample_rate(int, optional): The audio frequency specified in Hz. Defaults to 16000. speech_engine(TTS_SPEECH_ENGINE, optional): The engine to use for the synthesis. Defaults to "generative". region(str, optional): The region to use for the synthesis. Defaults to "us-east-1". api_key(str, optional): AWS access key id. api_secret(str, optional): AWS secret access key. session(aioboto3.Session, optional): Optional aioboto3 session to use.
Ancestors
- livekit.agents.tts.tts.TTS
- abc.ABC
- EventEmitter
- typing.Generic
Methods
def synthesize(self,
text: str,
*,
conn_options: APIConnectOptions = APIConnectOptions(max_retry=3, retry_interval=2.0, timeout=10.0)) ‑> livekit.plugins.aws.tts.ChunkedStream-
Expand source code
def synthesize( self, text: str, *, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS, ) -> ChunkedStream: return ChunkedStream( tts=self, text=text, conn_options=conn_options, session=self._session, opts=self._opts, )
Inherited members