examples: browser tool updates gpt oss

This commit is contained in:
ParthSareen 2025-09-24 15:27:53 -07:00
parent 01eb25a71c
commit 2e7c85d184
6 changed files with 37 additions and 432 deletions

View File

@ -36,8 +36,6 @@ See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md)
- [gpt-oss-tools.py](gpt-oss-tools.py)
- [gpt-oss-tools-stream.py](gpt-oss-tools-stream.py)
- [gpt-oss-tools-browser.py](gpt-oss-tools-browser.py) - Using browser research tools with gpt-oss
- [gpt-oss-tools-browser-stream.py](gpt-oss-tools-browser-stream.py) - Using browser research tools with gpt-oss, with streaming enabled
### Web search
@ -48,6 +46,7 @@ export OLLAMA_API_KEY="your_api_key_here"
```
- [web-search.py](web-search.py)
- [web-search-gpt-oss.py](web-search-gpt-oss.py) - Using browser research tools with gpt-oss
#### MCP server

View File

@ -1,198 +0,0 @@
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "gpt-oss",
# "ollama",
# "rich",
# ]
# ///
import asyncio
import json
from typing import Iterator, Optional
from gpt_oss.tools.simple_browser import ExaBackend, SimpleBrowserTool
from openai_harmony import Author, Role, TextContent
from openai_harmony import Message as HarmonyMessage
from rich import print
from ollama import Client
from ollama._types import ChatResponse
_backend = ExaBackend(source='web')
_browser_tool = SimpleBrowserTool(backend=_backend)
def heading(text):
print(text)
print('=' * (len(text) + 3))
async def _browser_search_async(query: str, topn: int = 10, source: str | None = None) -> str:
# map Ollama message to Harmony format
harmony_message = HarmonyMessage(
author=Author(role=Role.USER),
content=[TextContent(text=json.dumps({'query': query, 'topn': topn}))],
recipient='browser.search',
)
result_text: str = ''
async for response in _browser_tool._process(harmony_message):
if response.content:
for content in response.content:
if isinstance(content, TextContent):
result_text += content.text
return result_text or f'No results for query: {query}'
async def _browser_open_async(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: str | None = None) -> str:
payload = {'id': id, 'cursor': cursor, 'loc': loc, 'num_lines': num_lines, 'view_source': view_source, 'source': source}
harmony_message = HarmonyMessage(
author=Author(role=Role.USER),
content=[TextContent(text=json.dumps(payload))],
recipient='browser.open',
)
result_text: str = ''
async for response in _browser_tool._process(harmony_message):
if response.content:
for content in response.content:
if isinstance(content, TextContent):
result_text += content.text
return result_text or f'Could not open: {id}'
async def _browser_find_async(pattern: str, cursor: int = -1) -> str:
payload = {'pattern': pattern, 'cursor': cursor}
harmony_message = HarmonyMessage(
author=Author(role=Role.USER),
content=[TextContent(text=json.dumps(payload))],
recipient='browser.find',
)
result_text: str = ''
async for response in _browser_tool._process(harmony_message):
if response.content:
for content in response.content:
if isinstance(content, TextContent):
result_text += content.text
return result_text or f'Pattern not found: {pattern}'
def browser_search(query: str, topn: int = 10, source: Optional[str] = None) -> str:
return asyncio.run(_browser_search_async(query=query, topn=topn, source=source))
def browser_open(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: Optional[str] = None) -> str:
return asyncio.run(_browser_open_async(id=id, cursor=cursor, loc=loc, num_lines=num_lines, view_source=view_source, source=source))
def browser_find(pattern: str, cursor: int = -1) -> str:
return asyncio.run(_browser_find_async(pattern=pattern, cursor=cursor))
# Schema definitions for each browser tool
browser_search_schema = {
'type': 'function',
'function': {
'name': 'browser.search',
},
}
browser_open_schema = {
'type': 'function',
'function': {
'name': 'browser.open',
},
}
browser_find_schema = {
'type': 'function',
'function': {
'name': 'browser.find',
},
}
available_tools = {
'browser.search': browser_search,
'browser.open': browser_open,
'browser.find': browser_find,
}
model = 'gpt-oss:20b'
print('Model: ', model, '\n')
prompt = 'What is Ollama?'
print('You: ', prompt, '\n')
messages = [{'role': 'user', 'content': prompt}]
client = Client()
# gpt-oss can call tools while "thinking"
# a loop is needed to call the tools and get the results
final = True
while True:
response_stream: Iterator[ChatResponse] = client.chat(
model=model,
messages=messages,
tools=[browser_search_schema, browser_open_schema, browser_find_schema],
options={'num_ctx': 8192}, # 8192 is the recommended lower limit for the context window
stream=True,
)
tool_calls = []
thinking = ''
content = ''
for chunk in response_stream:
if chunk.message.tool_calls:
tool_calls.extend(chunk.message.tool_calls)
if chunk.message.content:
if not (chunk.message.thinking or chunk.message.thinking == '') and final:
heading('\n\nFinal result: ')
final = False
print(chunk.message.content, end='', flush=True)
if chunk.message.thinking:
thinking += chunk.message.thinking
print(chunk.message.thinking, end='', flush=True)
if thinking != '':
messages.append({'role': 'assistant', 'content': thinking, 'tool_calls': tool_calls})
print()
if tool_calls:
for tool_call in tool_calls:
tool_name = tool_call.function.name
args = tool_call.function.arguments or {}
function_to_call = available_tools.get(tool_name)
if function_to_call:
heading(f'\nCalling tool: {tool_name}')
if args:
print(f'Arguments: {args}')
try:
result = function_to_call(**args)
print(f'Tool result: {result[:200]}')
if len(result) > 200:
heading('... [truncated]')
print()
result_message = {'role': 'tool', 'content': result, 'tool_name': tool_name}
messages.append(result_message)
except Exception as e:
err = f'Error from {tool_name}: {e}'
print(err)
messages.append({'role': 'tool', 'content': err, 'tool_name': tool_name})
else:
print(f'Tool {tool_name} not found')
else:
# no more tool calls, we can stop the loop
break

View File

@ -1,175 +0,0 @@
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "gpt-oss",
# "ollama",
# "rich",
# ]
# ///
import asyncio
import json
from typing import Optional
from gpt_oss.tools.simple_browser import ExaBackend, SimpleBrowserTool
from openai_harmony import Author, Role, TextContent
from openai_harmony import Message as HarmonyMessage
from ollama import Client
_backend = ExaBackend(source='web')
_browser_tool = SimpleBrowserTool(backend=_backend)
def heading(text):
print(text)
print('=' * (len(text) + 3))
async def _browser_search_async(query: str, topn: int = 10, source: str | None = None) -> str:
# map Ollama message to Harmony format
harmony_message = HarmonyMessage(
author=Author(role=Role.USER),
content=[TextContent(text=json.dumps({'query': query, 'topn': topn}))],
recipient='browser.search',
)
result_text: str = ''
async for response in _browser_tool._process(harmony_message):
if response.content:
for content in response.content:
if isinstance(content, TextContent):
result_text += content.text
return result_text or f'No results for query: {query}'
async def _browser_open_async(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: str | None = None) -> str:
payload = {'id': id, 'cursor': cursor, 'loc': loc, 'num_lines': num_lines, 'view_source': view_source, 'source': source}
harmony_message = HarmonyMessage(
author=Author(role=Role.USER),
content=[TextContent(text=json.dumps(payload))],
recipient='browser.open',
)
result_text: str = ''
async for response in _browser_tool._process(harmony_message):
if response.content:
for content in response.content:
if isinstance(content, TextContent):
result_text += content.text
return result_text or f'Could not open: {id}'
async def _browser_find_async(pattern: str, cursor: int = -1) -> str:
payload = {'pattern': pattern, 'cursor': cursor}
harmony_message = HarmonyMessage(
author=Author(role=Role.USER),
content=[TextContent(text=json.dumps(payload))],
recipient='browser.find',
)
result_text: str = ''
async for response in _browser_tool._process(harmony_message):
if response.content:
for content in response.content:
if isinstance(content, TextContent):
result_text += content.text
return result_text or f'Pattern not found: {pattern}'
def browser_search(query: str, topn: int = 10, source: Optional[str] = None) -> str:
return asyncio.run(_browser_search_async(query=query, topn=topn, source=source))
def browser_open(id: int | str = -1, cursor: int = -1, loc: int = -1, num_lines: int = -1, *, view_source: bool = False, source: Optional[str] = None) -> str:
return asyncio.run(_browser_open_async(id=id, cursor=cursor, loc=loc, num_lines=num_lines, view_source=view_source, source=source))
def browser_find(pattern: str, cursor: int = -1) -> str:
return asyncio.run(_browser_find_async(pattern=pattern, cursor=cursor))
# Schema definitions for each browser tool
browser_search_schema = {
'type': 'function',
'function': {
'name': 'browser.search',
},
}
browser_open_schema = {
'type': 'function',
'function': {
'name': 'browser.open',
},
}
browser_find_schema = {
'type': 'function',
'function': {
'name': 'browser.find',
},
}
available_tools = {
'browser.search': browser_search,
'browser.open': browser_open,
'browser.find': browser_find,
}
model = 'gpt-oss:20b'
print('Model: ', model, '\n')
prompt = 'What is Ollama?'
print('You: ', prompt, '\n')
messages = [{'role': 'user', 'content': prompt}]
client = Client()
while True:
response = client.chat(
model=model,
messages=messages,
tools=[browser_search_schema, browser_open_schema, browser_find_schema],
options={'num_ctx': 8192}, # 8192 is the recommended lower limit for the context window
)
if hasattr(response.message, 'thinking') and response.message.thinking:
heading('Thinking')
print(response.message.thinking.strip() + '\n')
if hasattr(response.message, 'content') and response.message.content:
heading('Assistant')
print(response.message.content.strip() + '\n')
# add message to chat history
messages.append(response.message)
if response.message.tool_calls:
for tool_call in response.message.tool_calls:
tool_name = tool_call.function.name
args = tool_call.function.arguments or {}
function_to_call = available_tools.get(tool_name)
if not function_to_call:
print(f'Unknown tool: {tool_name}')
continue
try:
result = function_to_call(**args)
heading(f'Tool: {tool_name}')
if args:
print(f'Arguments: {args}')
print(result[:200])
if len(result) > 200:
print('... [truncated]')
print()
messages.append({'role': 'tool', 'content': result, 'tool_name': tool_name})
except Exception as e:
err = f'Error from {tool_name}: {e}'
print(err)
messages.append({'role': 'tool', 'content': err, 'tool_name': tool_name})
else:
# break on no more tool calls
break

View File

@ -1,9 +1,12 @@
from __future__ import annotations
import os
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "ollama",
# ]
# ///
from typing import Any, Dict, List
from gpt_oss_browser_tool_helper import Browser
from web_search_gpt_oss_helper import Browser
from ollama import Client
@ -12,53 +15,6 @@ def main() -> None:
client = Client()
browser = Browser(initial_state=None, client=client)
# Tool schemas
browser_search_schema = {
'type': 'function',
'function': {
'name': 'browser.search',
'parameters': {
'type': 'object',
'properties': {
'query': {'type': 'string'},
'topn': {'type': 'integer'},
},
'required': ['query'],
},
},
}
browser_open_schema = {
'type': 'function',
'function': {
'name': 'browser.open',
'parameters': {
'type': 'object',
'properties': {
'id': {'anyOf': [{'type': 'integer'}, {'type': 'string'}]},
'cursor': {'type': 'integer'},
'loc': {'type': 'integer'},
'num_lines': {'type': 'integer'},
},
},
},
}
browser_find_schema = {
'type': 'function',
'function': {
'name': 'browser.find',
'parameters': {
'type': 'object',
'properties': {
'pattern': {'type': 'string'},
'cursor': {'type': 'integer'},
},
'required': ['pattern'],
},
},
}
def browser_search(query: str, topn: int = 10) -> str:
return browser.search(query=query, topn=topn)['pageText']
@ -68,29 +24,50 @@ def main() -> None:
def browser_find(pattern: str, cursor: int = -1, **_: Any) -> str:
return browser.find(pattern=pattern, cursor=cursor)['pageText']
browser_search_schema = {
'type': 'function',
'function': {
'name': 'browser.search',
},
}
browser_open_schema = {
'type': 'function',
'function': {
'name': 'browser.open',
},
}
browser_find_schema = {
'type': 'function',
'function': {
'name': 'browser.find',
},
}
available_tools = {
'browser.search': browser_search,
'browser.open': browser_open,
'browser.find': browser_find,
}
query = 'What is Ollama.com?'
query = "what is ollama's new engine"
print('Prompt:', query, '\n')
messages: List[Dict[str, Any]] = [{'role': 'user', 'content': query}]
while True:
resp = client.chat(
model='gpt-oss',
model='gpt-oss:120b-cloud',
messages=messages,
tools=[browser_search_schema, browser_open_schema, browser_find_schema],
think=True,
)
if resp.message.thinking:
if resp.message.thinking:
print('Thinking:\n========\n')
print(resp.message.thinking + '\n')
if resp.message.content:
print('Response:\n========\n')
print(resp.message.content + '\n')
@ -103,6 +80,7 @@ def main() -> None:
for tc in resp.message.tool_calls:
tool_name = tc.function.name
args = tc.function.arguments or {}
print(f'Tool name: {tool_name}, args: {args}')
fn = available_tools.get(tool_name)
if not fn:
messages.append({'role': 'tool', 'content': f'Tool {tool_name} not found', 'tool_name': tool_name})
@ -110,6 +88,7 @@ def main() -> None:
try:
result_text = fn(**args)
print('Result: ', result_text[:200] + '...')
except Exception as e:
result_text = f'Error from {tool_name}: {e}'

View File

@ -49,7 +49,7 @@ print('Query: ', query)
messages = [{'role': 'user', 'content': query}]
while True:
response = chat(model='qwen3', messages=messages, tools=[web_search, web_fetch], think=True)
response = chat(model='deepseek-v3.1:671b-cloud', messages=messages, tools=[web_search, web_fetch], think=True)
if response.message.thinking:
print('Thinking: ')
print(response.message.thinking + '\n\n')