mirror of
https://github.com/ollama/ollama-python.git
synced 2026-01-13 21:57:16 +08:00
example: update create example (#418)
This commit is contained in:
parent
2cad1f5428
commit
02495ffd77
@ -6,6 +6,8 @@ Run the examples in this directory with:
|
||||
python3 examples/<example>.py
|
||||
```
|
||||
|
||||
See [ollama/docs/api.md](https://github.com/ollama/ollama/blob/main/docs/api.md) for full API documentation
|
||||
|
||||
### Chat - Chat with a model
|
||||
- [chat.py](chat.py)
|
||||
- [async-chat.py](async-chat.py)
|
||||
@ -50,12 +52,8 @@ Requirement: `pip install tqdm`
|
||||
|
||||
|
||||
### Ollama Create - Create a model from a Modelfile
|
||||
```python
|
||||
python create.py <model> <modelfile>
|
||||
```
|
||||
- [create.py](create.py)
|
||||
|
||||
See [ollama/docs/modelfile.md](https://github.com/ollama/ollama/blob/main/docs/modelfile.md) for more information on the Modelfile format.
|
||||
|
||||
|
||||
### Ollama Embed - Generate embeddings with a model
|
||||
|
||||
33
examples/create.py
Normal file → Executable file
33
examples/create.py
Normal file → Executable file
@ -1,30 +1,5 @@
|
||||
import sys
|
||||
from ollama import Client
|
||||
|
||||
from ollama import create
|
||||
|
||||
|
||||
args = sys.argv[1:]
|
||||
if len(args) == 2:
|
||||
# create from local file
|
||||
path = args[1]
|
||||
else:
|
||||
print('usage: python create.py <name> <filepath>')
|
||||
sys.exit(1)
|
||||
|
||||
# TODO: update to real Modelfile values
|
||||
modelfile = f"""
|
||||
FROM {path}
|
||||
"""
|
||||
example_modelfile = """
|
||||
FROM llama3.2
|
||||
# sets the temperature to 1 [higher is more creative, lower is more coherent]
|
||||
PARAMETER temperature 1
|
||||
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
|
||||
PARAMETER num_ctx 4096
|
||||
|
||||
# sets a custom system message to specify the behavior of the chat assistant
|
||||
SYSTEM You are Mario from super mario bros, acting as an assistant.
|
||||
"""
|
||||
|
||||
for response in create(model=args[0], modelfile=modelfile, stream=True):
|
||||
print(response['status'])
|
||||
client = Client()
|
||||
response = client.create(model='my-assistant', from_='llama3.2', stream=False)
|
||||
print(response.status)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user