This example shows how to expose a function tool that plays a local WAV file into the call. The agent reads the file, wraps it in an audio frame, and streams it via session.say.
Prerequisites
- Add a
.envin this directory with your LiveKit credentials:LIVEKIT_URL=your_livekit_urlLIVEKIT_API_KEY=your_api_keyLIVEKIT_API_SECRET=your_api_secret - Install dependencies:pip install "livekit-agents[silero]" python-dotenv
- Place an
audio.wavfile in the same directory as the script
Load environment, logging, and define an AgentServer
Load environment variables, configure logging, and initialize the AgentServer.
import loggingfrom pathlib import Pathimport wavefrom dotenv import load_dotenvfrom livekit.agents import JobContext, JobProcess, AgentServer, cli, Agent, AgentSession, inference, RunContext, function_toolfrom livekit.plugins import silerofrom livekit import rtcload_dotenv()logger = logging.getLogger("playing-audio")logger.setLevel(logging.INFO)server = AgentServer()
Define the agent with audio playback tool
Create a lightweight agent with instructions and a function tool that reads a WAV file, builds an AudioFrame, and streams it to the user.
class AudioPlayerAgent(Agent):def __init__(self) -> None:super().__init__(instructions="""You are a helpful assistant communicating through voice. Don't use any unpronouncable characters.If asked to play audio, use the `play_audio_file` function.""")@function_toolasync def play_audio_file(self, context: RunContext):"""Play a local audio file"""audio_path = Path(__file__).parent / "audio.wav"with wave.open(str(audio_path), 'rb') as wav_file:num_channels = wav_file.getnchannels()sample_rate = wav_file.getframerate()frames = wav_file.readframes(wav_file.getnframes())audio_frame = rtc.AudioFrame(data=frames,sample_rate=sample_rate,num_channels=num_channels,samples_per_channel=wav_file.getnframes())async def audio_generator():yield audio_frameawait self.session.say("Playing audio file", audio=audio_generator())return None, "I've played the audio file for you."async def on_enter(self):self.session.generate_reply()
Prewarm VAD for faster connections
Preload the VAD model once per process to reduce connection latency.
def prewarm(proc: JobProcess):proc.userdata["vad"] = silero.VAD.load()server.setup_fnc = prewarm
Define the rtc session entrypoint
Create the session with STT/LLM/TTS configuration and start the audio player agent.
@server.rtc_session()async def entrypoint(ctx: JobContext):ctx.log_context_fields = {"room": ctx.room.name}session = AgentSession(stt=inference.STT(model="deepgram/nova-3-general"),llm=inference.LLM(model="openai/gpt-5-mini"),tts=inference.TTS(model="cartesia/sonic-3", voice="9626c31c-bec5-4cca-baa8-f8ba9e84c8bc"),vad=ctx.proc.userdata["vad"],preemptive_generation=True,)await session.start(agent=AudioPlayerAgent(), room=ctx.room)await ctx.connect()
Run the server
Start the agent server with the CLI runner.
if __name__ == "__main__":cli.run_app(server)
Run it
python playing_audio.py console
How it works
- The agent greets the user on entry.
- The LLM can invoke
play_audio_filewhen asked to play audio. - The tool reads a local WAV file, wraps it in an
AudioFrame, and streams it viasession.say. - A short spoken preamble ("Playing audio file") plays before the audio clip.
- The rest of the media pipeline continues unchanged.
Full example
import loggingfrom pathlib import Pathimport wavefrom dotenv import load_dotenvfrom livekit.agents import JobContext, JobProcess, AgentServer, cli, Agent, AgentSession, inference, RunContext, function_toolfrom livekit.plugins import silerofrom livekit import rtcload_dotenv()logger = logging.getLogger("playing-audio")logger.setLevel(logging.INFO)class AudioPlayerAgent(Agent):def __init__(self) -> None:super().__init__(instructions="""You are a helpful assistant communicating through voice. Don't use any unpronouncable characters.If asked to play audio, use the `play_audio_file` function.""")@function_toolasync def play_audio_file(self, context: RunContext):"""Play a local audio file"""audio_path = Path(__file__).parent / "audio.wav"with wave.open(str(audio_path), 'rb') as wav_file:num_channels = wav_file.getnchannels()sample_rate = wav_file.getframerate()frames = wav_file.readframes(wav_file.getnframes())audio_frame = rtc.AudioFrame(data=frames,sample_rate=sample_rate,num_channels=num_channels,samples_per_channel=wav_file.getnframes())async def audio_generator():yield audio_frameawait self.session.say("Playing audio file", audio=audio_generator())return None, "I've played the audio file for you."async def on_enter(self):self.session.generate_reply()server = AgentServer()def prewarm(proc: JobProcess):proc.userdata["vad"] = silero.VAD.load()server.setup_fnc = prewarm@server.rtc_session()async def entrypoint(ctx: JobContext):ctx.log_context_fields = {"room": ctx.room.name}session = AgentSession(stt=inference.STT(model="deepgram/nova-3-general"),llm=inference.LLM(model="openai/gpt-5-mini"),tts=inference.TTS(model="cartesia/sonic-3", voice="9626c31c-bec5-4cca-baa8-f8ba9e84c8bc"),vad=ctx.proc.userdata["vad"],preemptive_generation=True,)await session.start(agent=AudioPlayerAgent(), room=ctx.room)await ctx.connect()if __name__ == "__main__":cli.run_app(server)