From 33488eee066d96fc79881811c88ee548be63cec9 Mon Sep 17 00:00:00 2001 From: Parth Sareen Date: Wed, 9 Jul 2025 14:23:33 -0700 Subject: [PATCH] types/examples: add tool_name to message and examples (#537) --- examples/README.md | 1 + examples/async-tools.py | 2 +- examples/multi-tool.py | 88 +++++++++++++++++++++++++++++++++++++++++ examples/tools.py | 2 +- ollama/_types.py | 3 ++ pyproject.toml | 1 + 6 files changed, 95 insertions(+), 2 deletions(-) create mode 100644 examples/multi-tool.py diff --git a/examples/README.md b/examples/README.md index dbe480b..b14d56f 100644 --- a/examples/README.md +++ b/examples/README.md @@ -25,6 +25,7 @@ See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md) ### Tools/Function Calling - Call a function with a model - [tools.py](tools.py) - Simple example of Tools/Function Calling - [async-tools.py](async-tools.py) +- [multi-tool.py](multi-tool.py) - Using multiple tools, with thinking enabled ### Multimodal with Images - Chat with a multimodal (image chat) model diff --git a/examples/async-tools.py b/examples/async-tools.py index 5578229..16e123d 100644 --- a/examples/async-tools.py +++ b/examples/async-tools.py @@ -76,7 +76,7 @@ async def main(): if response.message.tool_calls: # Add the function response to messages for the model to use messages.append(response.message) - messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name}) + messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name}) # Get final response from model with function outputs final_response = await client.chat('llama3.1', messages=messages) diff --git a/examples/multi-tool.py b/examples/multi-tool.py new file mode 100644 index 0000000..c2d6257 --- /dev/null +++ b/examples/multi-tool.py @@ -0,0 +1,88 @@ +import random +from typing import Iterator + +from ollama import ChatResponse, Client + + +def get_temperature(city: str) -> int: + """ + Get the temperature for a city in Celsius + + Args: + city (str): The name of the city + + Returns: + int: The current temperature in Celsius + """ + # This is a mock implementation - would need to use a real weather API + import random + + if city not in ['London', 'Paris', 'New York', 'Tokyo', 'Sydney']: + return 'Unknown city' + + return str(random.randint(0, 35)) + ' degrees Celsius' + + +def get_conditions(city: str) -> str: + """ + Get the weather conditions for a city + """ + if city not in ['London', 'Paris', 'New York', 'Tokyo', 'Sydney']: + return 'Unknown city' + # This is a mock implementation - would need to use a real weather API + conditions = ['sunny', 'cloudy', 'rainy', 'snowy'] + return random.choice(conditions) + + +available_functions = { + 'get_temperature': get_temperature, + 'get_conditions': get_conditions, +} + + +cities = ['London', 'Paris', 'New York', 'Tokyo', 'Sydney'] +city = random.choice(cities) +city2 = random.choice(cities) +messages = [{'role': 'user', 'content': f'What is the temperature in {city}? and what are the weather conditions in {city2}?'}] +print('----- Prompt:', messages[0]['content'], '\n') + +model = 'qwen3' +client = Client() +response: Iterator[ChatResponse] = client.chat(model, stream=True, messages=messages, tools=[get_temperature, get_conditions], think=True) + +for chunk in response: + if chunk.message.thinking: + print(chunk.message.thinking, end='', flush=True) + if chunk.message.content: + print(chunk.message.content, end='', flush=True) + if chunk.message.tool_calls: + for tool in chunk.message.tool_calls: + if function_to_call := available_functions.get(tool.function.name): + print('\nCalling function:', tool.function.name, 'with arguments:', tool.function.arguments) + output = function_to_call(**tool.function.arguments) + print('> Function output:', output, '\n') + + # Add the assistant message and tool call result to the messages + messages.append(chunk.message) + messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name}) + else: + print('Function', tool.function.name, 'not found') + +print('----- Sending result back to model \n') +if any(msg.get('role') == 'tool' for msg in messages): + res = client.chat(model, stream=True, tools=[get_temperature, get_conditions], messages=messages, think=True) + done_thinking = False + for chunk in res: + if chunk.message.thinking: + print(chunk.message.thinking, end='', flush=True) + if chunk.message.content: + if not done_thinking: + print('\n----- Final result:') + done_thinking = True + print(chunk.message.content, end='', flush=True) + if chunk.message.tool_calls: + # Model should be explaining the tool calls and the results in this output + print('Model returned tool calls:') + print(chunk.message.tool_calls) +else: + print('No tool calls returned') diff --git a/examples/tools.py b/examples/tools.py index d6f3fcf..86019fd 100644 --- a/examples/tools.py +++ b/examples/tools.py @@ -74,7 +74,7 @@ if response.message.tool_calls: if response.message.tool_calls: # Add the function response to messages for the model to use messages.append(response.message) - messages.append({'role': 'tool', 'content': str(output), 'name': tool.function.name}) + messages.append({'role': 'tool', 'content': str(output), 'tool_name': tool.function.name}) # Get final response from model with function outputs final_response = chat('llama3.1', messages=messages) diff --git a/ollama/_types.py b/ollama/_types.py index f86c06d..b53aea9 100644 --- a/ollama/_types.py +++ b/ollama/_types.py @@ -284,6 +284,9 @@ class Message(SubscriptableBaseModel): Valid image formats depend on the model. See the model card for more information. """ + tool_name: Optional[str] = None + 'Name of the executed tool.' + class ToolCall(SubscriptableBaseModel): """ Model tool calls. diff --git a/pyproject.toml b/pyproject.toml index ca1b992..6e62fc9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,6 +60,7 @@ select = [ 'FLY', # flynt 'RUF', # ruff-specific rules ] +ignore = ['FBT001'] # Boolean-typed positional argument in function definition [tool.pytest.ini_options] addopts = ['--doctest-modules']