Module livekit.agents.voice.report
Classes
class SessionReport (enable_recording: bool,
job_id: str,
room_id: str,
room: str,
options: AgentSessionOptions,
events: list[AgentEvent],
chat_history: ChatContext,
audio_recording_path: Path | None = None,
audio_recording_started_at: float | None = None,
duration: float | None = None,
started_at: float | None = None,
timestamp: float = <factory>)-
Expand source code
@dataclass class SessionReport: enable_recording: bool job_id: str room_id: str room: str options: AgentSessionOptions events: list[AgentEvent] chat_history: ChatContext audio_recording_path: Path | None = None audio_recording_started_at: float | None = None """Timestamp when the audio recording started""" duration: float | None = None started_at: float | None = None """Timestamp when the session started""" timestamp: float = field(default_factory=time.time) """Timestamp when the session report was created, typically at the end of the session""" def to_dict(self) -> dict: events_dict: list[dict] = [] for event in self.events: if event.type == "metrics_collected": continue # metrics are too noisy, Cloud is using the chat_history as the source of thruth events_dict.append(event.model_dump()) return { "job_id": self.job_id, "room_id": self.room_id, "room": self.room, "events": events_dict, "audio_recording_path": ( str(self.audio_recording_path.absolute()) if self.audio_recording_path else None ), "audio_recording_started_at": self.audio_recording_started_at, "options": { "allow_interruptions": self.options.allow_interruptions, "discard_audio_if_uninterruptible": self.options.discard_audio_if_uninterruptible, "min_interruption_duration": self.options.min_interruption_duration, "min_interruption_words": self.options.min_interruption_words, "min_endpointing_delay": self.options.min_endpointing_delay, "max_endpointing_delay": self.options.max_endpointing_delay, "max_tool_steps": self.options.max_tool_steps, "user_away_timeout": self.options.user_away_timeout, "min_consecutive_speech_delay": self.options.min_consecutive_speech_delay, "preemptive_generation": self.options.preemptive_generation, }, "chat_history": self.chat_history.to_dict(exclude_timestamp=False), "timestamp": self.timestamp, }SessionReport(enable_recording: 'bool', job_id: 'str', room_id: 'str', room: 'str', options: 'AgentSessionOptions', events: 'list[AgentEvent]', chat_history: 'ChatContext', audio_recording_path: 'Path | None' = None, audio_recording_started_at: 'float | None' = None, duration: 'float | None' = None, started_at: 'float | None' = None, timestamp: 'float' =
) Instance variables
var audio_recording_path : pathlib.Path | Nonevar audio_recording_started_at : float | None-
Timestamp when the audio recording started
var chat_history : livekit.agents.llm.chat_context.ChatContextvar duration : float | Nonevar enable_recording : boolvar events : list[livekit.agents.voice.events.UserInputTranscribedEvent | livekit.agents.voice.events.UserStateChangedEvent | livekit.agents.voice.events.AgentStateChangedEvent | livekit.agents.voice.events.AgentFalseInterruptionEvent | livekit.agents.voice.events.MetricsCollectedEvent | livekit.agents.voice.events.ConversationItemAddedEvent | livekit.agents.voice.events.FunctionToolsExecutedEvent | livekit.agents.voice.events.SpeechCreatedEvent | livekit.agents.voice.events.ErrorEvent | livekit.agents.voice.events.CloseEvent]var job_id : strvar options : livekit.agents.voice.agent_session.AgentSessionOptionsvar room : strvar room_id : strvar started_at : float | None-
Timestamp when the session started
var timestamp : float-
Timestamp when the session report was created, typically at the end of the session
Methods
def to_dict(self) ‑> dict-
Expand source code
def to_dict(self) -> dict: events_dict: list[dict] = [] for event in self.events: if event.type == "metrics_collected": continue # metrics are too noisy, Cloud is using the chat_history as the source of thruth events_dict.append(event.model_dump()) return { "job_id": self.job_id, "room_id": self.room_id, "room": self.room, "events": events_dict, "audio_recording_path": ( str(self.audio_recording_path.absolute()) if self.audio_recording_path else None ), "audio_recording_started_at": self.audio_recording_started_at, "options": { "allow_interruptions": self.options.allow_interruptions, "discard_audio_if_uninterruptible": self.options.discard_audio_if_uninterruptible, "min_interruption_duration": self.options.min_interruption_duration, "min_interruption_words": self.options.min_interruption_words, "min_endpointing_delay": self.options.min_endpointing_delay, "max_endpointing_delay": self.options.max_endpointing_delay, "max_tool_steps": self.options.max_tool_steps, "user_away_timeout": self.options.user_away_timeout, "min_consecutive_speech_delay": self.options.min_consecutive_speech_delay, "preemptive_generation": self.options.preemptive_generation, }, "chat_history": self.chat_history.to_dict(exclude_timestamp=False), "timestamp": self.timestamp, }