interface LLMOptions {
    apiKey?: string;
    baseURL?: string;
    client?: OpenAI;
    maxOutputTokens?: number;
    metadata?: Record<string, string>;
    model: string;
    parallelToolCalls?: boolean;
    serviceTier?: string;
    store?: boolean;
    strictToolSchema?: boolean;
    temperature?: number;
    toolChoice?: llm.ToolChoice;
    useWebSocket?: boolean;
}

Properties

apiKey?: string
baseURL?: string
client?: OpenAI
maxOutputTokens?: number

Upper bound for the number of tokens that can be generated for a response.

metadata?: Record<string, string>
model: string
parallelToolCalls?: boolean
serviceTier?: string

Specifies the processing tier (e.g. 'auto', 'default', 'priority', 'flex').

store?: boolean
strictToolSchema?: boolean
temperature?: number
toolChoice?: llm.ToolChoice
useWebSocket?: boolean

Whether to use the WebSocket API.

Default

true