Expand source code
def _build_gemini_ctx(
chat_ctx: llm.ChatContext, cache_key: Any
) -> tuple[list[types.Content], Optional[types.Content]]:
turns: list[types.Content] = []
system_instruction: Optional[types.Content] = None
current_role: Optional[str] = None
parts: list[types.Part] = []
for msg in chat_ctx.messages:
if msg.role == "system":
if isinstance(msg.content, str):
system_instruction = types.Content(parts=[types.Part(text=msg.content)])
continue
if msg.role == "assistant":
role = "model"
elif msg.role == "tool":
role = "user"
else:
role = "user"
# If role changed, finalize previous parts into a turn
if role != current_role:
if current_role is not None and parts:
turns.append(types.Content(role=current_role, parts=parts))
current_role = role
parts = []
if msg.tool_calls:
for fnc in msg.tool_calls:
parts.append(
types.Part(
function_call=types.FunctionCall(
name=fnc.function_info.name,
args=fnc.arguments,
)
)
)
if msg.role == "tool":
if msg.content:
if isinstance(msg.content, dict):
parts.append(
types.Part(
function_response=types.FunctionResponse(
name=msg.name,
response=msg.content,
)
)
)
elif isinstance(msg.content, str):
parts.append(
types.Part(
function_response=types.FunctionResponse(
name=msg.name,
response={"result": msg.content},
)
)
)
else:
if msg.content:
if isinstance(msg.content, str):
parts.append(types.Part(text=msg.content))
elif isinstance(msg.content, dict):
parts.append(types.Part(text=json.dumps(msg.content)))
elif isinstance(msg.content, list):
for item in msg.content:
if isinstance(item, str):
parts.append(types.Part(text=item))
elif isinstance(item, llm.ChatImage):
parts.append(_build_gemini_image_part(item, cache_key))
# Finalize last role's parts if any remain
if current_role is not None and parts:
turns.append(types.Content(role=current_role, parts=parts))
return turns, system_instruction