mirror of
https://github.com/ollama/ollama-python.git
synced 2026-05-01 11:48:17 +08:00
fix: handle multiple tool calls in tools examples
The tools.py and async-tools.py examples only sent the last tool call result back to the model, discarding results from earlier tool calls. This fix appends each tool result as a separate message inside the loop, matching the pattern already used in multi-tool.py and gpt-oss-tools.py. Fixes #476
This commit is contained in:
parent
dbccf192ac
commit
1d29f469ce
@ -62,6 +62,9 @@ async def main():
|
||||
|
||||
if response.message.tool_calls:
|
||||
# There may be multiple tool calls in the response
|
||||
# Add the assistant message with tool calls to the conversation
|
||||
messages.append(response.message)
|
||||
|
||||
for tool in response.message.tool_calls:
|
||||
# Ensure the function is available, and then call it
|
||||
if function_to_call := available_functions.get(tool.function.name):
|
||||
@ -71,14 +74,12 @@ async def main():
|
||||
print('Function output:', output)
|
||||
else:
|
||||
print('Function', tool.function.name, 'not found')
|
||||
output = 'Function not found'
|
||||
|
||||
# Only needed to chat with the model using the tool call results
|
||||
if response.message.tool_calls:
|
||||
# Add the function response to messages for the model to use
|
||||
messages.append(response.message)
|
||||
messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name})
|
||||
# Add each tool result as a separate message
|
||||
messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name})
|
||||
|
||||
# Get final response from model with function outputs
|
||||
# Get final response from model with all tool call results
|
||||
final_response = await client.chat('llama3.1', messages=messages)
|
||||
print('Final response:', final_response.message.content)
|
||||
|
||||
|
||||
@ -60,6 +60,9 @@ response: ChatResponse = chat(
|
||||
|
||||
if response.message.tool_calls:
|
||||
# There may be multiple tool calls in the response
|
||||
# Add the assistant message with tool calls to the conversation
|
||||
messages.append(response.message)
|
||||
|
||||
for tool in response.message.tool_calls:
|
||||
# Ensure the function is available, and then call it
|
||||
if function_to_call := available_functions.get(tool.function.name):
|
||||
@ -69,14 +72,12 @@ if response.message.tool_calls:
|
||||
print('Function output:', output)
|
||||
else:
|
||||
print('Function', tool.function.name, 'not found')
|
||||
output = 'Function not found'
|
||||
|
||||
# Only needed to chat with the model using the tool call results
|
||||
if response.message.tool_calls:
|
||||
# Add the function response to messages for the model to use
|
||||
messages.append(response.message)
|
||||
messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name})
|
||||
# Add each tool result as a separate message
|
||||
messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name})
|
||||
|
||||
# Get final response from model with function outputs
|
||||
# Get final response from model with all tool call results
|
||||
final_response = chat('llama3.1', messages=messages)
|
||||
print('Final response:', final_response.message.content)
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user