mirror of
https://github.com/ollama/ollama-python.git
synced 2026-01-14 06:07:17 +08:00
examples: update to use gemma3 (#543)
This commit is contained in:
parent
d7978cb234
commit
fe91357d4b
32
README.md
32
README.md
@ -5,7 +5,7 @@ The Ollama Python library provides the easiest way to integrate Python 3.8+ proj
|
|||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
- [Ollama](https://ollama.com/download) should be installed and running
|
- [Ollama](https://ollama.com/download) should be installed and running
|
||||||
- Pull a model to use with the library: `ollama pull <model>` e.g. `ollama pull llama3.2`
|
- Pull a model to use with the library: `ollama pull <model>` e.g. `ollama pull gemma3`
|
||||||
- See [Ollama.com](https://ollama.com/search) for more information on the models available.
|
- See [Ollama.com](https://ollama.com/search) for more information on the models available.
|
||||||
|
|
||||||
## Install
|
## Install
|
||||||
@ -20,7 +20,7 @@ pip install ollama
|
|||||||
from ollama import chat
|
from ollama import chat
|
||||||
from ollama import ChatResponse
|
from ollama import ChatResponse
|
||||||
|
|
||||||
response: ChatResponse = chat(model='llama3.2', messages=[
|
response: ChatResponse = chat(model='gemma3', messages=[
|
||||||
{
|
{
|
||||||
'role': 'user',
|
'role': 'user',
|
||||||
'content': 'Why is the sky blue?',
|
'content': 'Why is the sky blue?',
|
||||||
@ -41,7 +41,7 @@ Response streaming can be enabled by setting `stream=True`.
|
|||||||
from ollama import chat
|
from ollama import chat
|
||||||
|
|
||||||
stream = chat(
|
stream = chat(
|
||||||
model='llama3.2',
|
model='gemma3',
|
||||||
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
|
messages=[{'role': 'user', 'content': 'Why is the sky blue?'}],
|
||||||
stream=True,
|
stream=True,
|
||||||
)
|
)
|
||||||
@ -61,7 +61,7 @@ client = Client(
|
|||||||
host='http://localhost:11434',
|
host='http://localhost:11434',
|
||||||
headers={'x-some-header': 'some-value'}
|
headers={'x-some-header': 'some-value'}
|
||||||
)
|
)
|
||||||
response = client.chat(model='llama3.2', messages=[
|
response = client.chat(model='gemma3', messages=[
|
||||||
{
|
{
|
||||||
'role': 'user',
|
'role': 'user',
|
||||||
'content': 'Why is the sky blue?',
|
'content': 'Why is the sky blue?',
|
||||||
@ -79,7 +79,7 @@ from ollama import AsyncClient
|
|||||||
|
|
||||||
async def chat():
|
async def chat():
|
||||||
message = {'role': 'user', 'content': 'Why is the sky blue?'}
|
message = {'role': 'user', 'content': 'Why is the sky blue?'}
|
||||||
response = await AsyncClient().chat(model='llama3.2', messages=[message])
|
response = await AsyncClient().chat(model='gemma3', messages=[message])
|
||||||
|
|
||||||
asyncio.run(chat())
|
asyncio.run(chat())
|
||||||
```
|
```
|
||||||
@ -92,7 +92,7 @@ from ollama import AsyncClient
|
|||||||
|
|
||||||
async def chat():
|
async def chat():
|
||||||
message = {'role': 'user', 'content': 'Why is the sky blue?'}
|
message = {'role': 'user', 'content': 'Why is the sky blue?'}
|
||||||
async for part in await AsyncClient().chat(model='llama3.2', messages=[message], stream=True):
|
async for part in await AsyncClient().chat(model='gemma3', messages=[message], stream=True):
|
||||||
print(part['message']['content'], end='', flush=True)
|
print(part['message']['content'], end='', flush=True)
|
||||||
|
|
||||||
asyncio.run(chat())
|
asyncio.run(chat())
|
||||||
@ -105,13 +105,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/
|
|||||||
### Chat
|
### Chat
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
|
ollama.chat(model='gemma3', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
|
||||||
```
|
```
|
||||||
|
|
||||||
### Generate
|
### Generate
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.generate(model='llama3.2', prompt='Why is the sky blue?')
|
ollama.generate(model='gemma3', prompt='Why is the sky blue?')
|
||||||
```
|
```
|
||||||
|
|
||||||
### List
|
### List
|
||||||
@ -123,49 +123,49 @@ ollama.list()
|
|||||||
### Show
|
### Show
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.show('llama3.2')
|
ollama.show('gemma3')
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create
|
### Create
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.create(model='example', from_='llama3.2', system="You are Mario from Super Mario Bros.")
|
ollama.create(model='example', from_='gemma3', system="You are Mario from Super Mario Bros.")
|
||||||
```
|
```
|
||||||
|
|
||||||
### Copy
|
### Copy
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.copy('llama3.2', 'user/llama3.2')
|
ollama.copy('gemma3', 'user/gemma3')
|
||||||
```
|
```
|
||||||
|
|
||||||
### Delete
|
### Delete
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.delete('llama3.2')
|
ollama.delete('gemma3')
|
||||||
```
|
```
|
||||||
|
|
||||||
### Pull
|
### Pull
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.pull('llama3.2')
|
ollama.pull('gemma3')
|
||||||
```
|
```
|
||||||
|
|
||||||
### Push
|
### Push
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.push('user/llama3.2')
|
ollama.push('user/gemma3')
|
||||||
```
|
```
|
||||||
|
|
||||||
### Embed
|
### Embed
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.embed(model='llama3.2', input='The sky is blue because of rayleigh scattering')
|
ollama.embed(model='gemma3', input='The sky is blue because of rayleigh scattering')
|
||||||
```
|
```
|
||||||
|
|
||||||
### Embed (batch)
|
### Embed (batch)
|
||||||
|
|
||||||
```python
|
```python
|
||||||
ollama.embed(model='llama3.2', input=['The sky is blue because of rayleigh scattering', 'Grass is green because of chlorophyll'])
|
ollama.embed(model='gemma3', input=['The sky is blue because of rayleigh scattering', 'Grass is green because of chlorophyll'])
|
||||||
```
|
```
|
||||||
|
|
||||||
### Ps
|
### Ps
|
||||||
|
|||||||
@ -12,7 +12,7 @@ async def main():
|
|||||||
]
|
]
|
||||||
|
|
||||||
client = AsyncClient()
|
client = AsyncClient()
|
||||||
response = await client.chat('llama3.2', messages=messages)
|
response = await client.chat('gemma3', messages=messages)
|
||||||
print(response['message']['content'])
|
print(response['message']['content'])
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -5,7 +5,7 @@ import ollama
|
|||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
client = ollama.AsyncClient()
|
client = ollama.AsyncClient()
|
||||||
response = await client.generate('llama3.2', 'Why is the sky blue?')
|
response = await client.generate('gemma3', 'Why is the sky blue?')
|
||||||
print(response['response'])
|
print(response['response'])
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -7,7 +7,5 @@ messages = [
|
|||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
for part in chat('llama3.2', messages=messages, stream=True):
|
for part in chat('gemma3', messages=messages, stream=True):
|
||||||
print(part['message']['content'], end='', flush=True)
|
print(part['message']['content'], end='', flush=True)
|
||||||
|
|
||||||
print()
|
|
||||||
|
|||||||
@ -22,7 +22,7 @@ messages = [
|
|||||||
while True:
|
while True:
|
||||||
user_input = input('Chat with history: ')
|
user_input = input('Chat with history: ')
|
||||||
response = chat(
|
response = chat(
|
||||||
'llama3.2',
|
'gemma3',
|
||||||
messages=[*messages, {'role': 'user', 'content': user_input}],
|
messages=[*messages, {'role': 'user', 'content': user_input}],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@ -7,5 +7,5 @@ messages = [
|
|||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
response = chat('llama3.2', messages=messages)
|
response = chat('gemma3', messages=messages)
|
||||||
print(response['message']['content'])
|
print(response['message']['content'])
|
||||||
|
|||||||
@ -3,7 +3,7 @@ from ollama import Client
|
|||||||
client = Client()
|
client = Client()
|
||||||
response = client.create(
|
response = client.create(
|
||||||
model='my-assistant',
|
model='my-assistant',
|
||||||
from_='llama3.2',
|
from_='gemma3',
|
||||||
system='You are mario from Super Mario Bros.',
|
system='You are mario from Super Mario Bros.',
|
||||||
stream=False,
|
stream=False,
|
||||||
)
|
)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
from ollama import generate
|
from ollama import generate
|
||||||
|
|
||||||
for part in generate('llama3.2', 'Why is the sky blue?', stream=True):
|
for part in generate('gemma3', 'Why is the sky blue?', stream=True):
|
||||||
print(part['response'], end='', flush=True)
|
print(part['response'], end='', flush=True)
|
||||||
|
|||||||
@ -1,4 +1,4 @@
|
|||||||
from ollama import generate
|
from ollama import generate
|
||||||
|
|
||||||
response = generate('llama3.2', 'Why is the sky blue?')
|
response = generate('gemma3', 'Why is the sky blue?')
|
||||||
print(response['response'])
|
print(response['response'])
|
||||||
|
|||||||
@ -11,7 +11,7 @@ path = input('Please enter the path to the image: ')
|
|||||||
# img = Path(path).read_bytes()
|
# img = Path(path).read_bytes()
|
||||||
|
|
||||||
response = chat(
|
response = chat(
|
||||||
model='llama3.2-vision',
|
model='gemma3',
|
||||||
messages=[
|
messages=[
|
||||||
{
|
{
|
||||||
'role': 'user',
|
'role': 'user',
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
from ollama import ProcessResponse, chat, ps, pull
|
from ollama import ProcessResponse, chat, ps, pull
|
||||||
|
|
||||||
# Ensure at least one model is loaded
|
# Ensure at least one model is loaded
|
||||||
response = pull('llama3.2', stream=True)
|
response = pull('gemma3', stream=True)
|
||||||
progress_states = set()
|
progress_states = set()
|
||||||
for progress in response:
|
for progress in response:
|
||||||
if progress.get('status') in progress_states:
|
if progress.get('status') in progress_states:
|
||||||
@ -12,7 +12,7 @@ for progress in response:
|
|||||||
print('\n')
|
print('\n')
|
||||||
|
|
||||||
print('Waiting for model to load... \n')
|
print('Waiting for model to load... \n')
|
||||||
chat(model='llama3.2', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
|
chat(model='gemma3', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}])
|
||||||
|
|
||||||
|
|
||||||
response: ProcessResponse = ps()
|
response: ProcessResponse = ps()
|
||||||
|
|||||||
@ -3,7 +3,7 @@ from tqdm import tqdm
|
|||||||
from ollama import pull
|
from ollama import pull
|
||||||
|
|
||||||
current_digest, bars = '', {}
|
current_digest, bars = '', {}
|
||||||
for progress in pull('llama3.2', stream=True):
|
for progress in pull('gemma3', stream=True):
|
||||||
digest = progress.get('digest', '')
|
digest = progress.get('digest', '')
|
||||||
if digest != current_digest and current_digest in bars:
|
if digest != current_digest and current_digest in bars:
|
||||||
bars[current_digest].close()
|
bars[current_digest].close()
|
||||||
|
|||||||
@ -33,7 +33,7 @@ if not path.exists():
|
|||||||
|
|
||||||
# Set up chat as usual
|
# Set up chat as usual
|
||||||
response = chat(
|
response = chat(
|
||||||
model='llama3.2-vision',
|
model='gemma3',
|
||||||
format=ImageDescription.model_json_schema(), # Pass in the schema for the response
|
format=ImageDescription.model_json_schema(), # Pass in the schema for the response
|
||||||
messages=[
|
messages=[
|
||||||
{
|
{
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user