mirror of
https://github.com/ollama/ollama-python.git
synced 2026-01-13 21:57:16 +08:00
Delete extra async examples
This commit is contained in:
parent
6fa242894f
commit
92a1cd92e3
@ -33,7 +33,6 @@ python3 <example>.py
|
||||
|
||||
### Ollama List - List all downloaded models and their properties
|
||||
- [list.py](list.py)
|
||||
- [async-list.py](async-list.py)
|
||||
|
||||
|
||||
### Ollama ps - Show model status with CPU/GPU usage
|
||||
|
||||
@ -1,21 +0,0 @@
|
||||
import asyncio
|
||||
import ollama
|
||||
|
||||
|
||||
async def main():
|
||||
client = ollama.AsyncClient()
|
||||
|
||||
response = await client.list()
|
||||
for model in response.models:
|
||||
if model.details:
|
||||
print(f'Name: {model.model}')
|
||||
print(f'Size (MB): {(model.size.real / 1024 / 1024):.2f}')
|
||||
print(f'Format: {model.details.format}')
|
||||
print(f'Family: {model.details.family}')
|
||||
print(f'Parameter Size: {model.details.parameter_size}')
|
||||
print(f'Quantization Level: {model.details.quantization_level}')
|
||||
print('-' * 50)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
asyncio.run(main())
|
||||
@ -1,40 +0,0 @@
|
||||
import asyncio
|
||||
from ollama import AsyncClient
|
||||
|
||||
|
||||
async def main():
|
||||
client = AsyncClient()
|
||||
|
||||
response = await client.pull('llama3.1', stream=True)
|
||||
progress_states = set()
|
||||
async for progress in response:
|
||||
if progress.get('status') in progress_states:
|
||||
continue
|
||||
progress_states.add(progress.get('status'))
|
||||
print(progress.get('status'))
|
||||
|
||||
print('\n')
|
||||
|
||||
response = await client.chat('llama3.1', messages=[{'role': 'user', 'content': 'Hello!'}])
|
||||
print(response['message']['content'])
|
||||
|
||||
print('\n')
|
||||
|
||||
response = await client.ps()
|
||||
|
||||
name = response['models'][0]['name']
|
||||
size = response['models'][0]['size']
|
||||
size_vram = response['models'][0]['size_vram']
|
||||
|
||||
if size == size_vram:
|
||||
print(f'{name}: 100% GPU')
|
||||
elif not size_vram:
|
||||
print(f'{name}: 100% CPU')
|
||||
else:
|
||||
size_cpu = size - size_vram
|
||||
cpu_percent = round(size_cpu / size * 100)
|
||||
print(f'{name}: {cpu_percent}% CPU/{100 - cpu_percent}% GPU')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
asyncio.run(main())
|
||||
Loading…
Reference in New Issue
Block a user