docs: add module docstrings to example scripts

Adds descriptive docstrings to 8 example files explaining what each
demonstrates, prerequisites (which model to pull), and how to run it.

Files: chat.py, generate.py, embed.py, tools.py, structured-outputs.py,
chat-stream.py, chat-with-history.py, multimodal-chat.py
This commit is contained in:
Pawan Singh Kapkoti 2026-04-05 08:41:12 +01:00
parent dbccf192ac
commit 3b18b798e5
8 changed files with 95 additions and 0 deletions

View File

@ -1,3 +1,15 @@
"""Streaming chat response.
Prints tokens as they are generated instead of waiting for the full
response, providing a real-time typing effect.
Prerequisites:
ollama pull gemma3
Usage:
python chat-stream.py
"""
from ollama import chat
messages = [

View File

@ -1,3 +1,15 @@
"""Multi-turn chat with conversation history.
Sends multiple messages in sequence, maintaining context across turns
so the model can reference earlier parts of the conversation.
Prerequisites:
ollama pull gemma3
Usage:
python chat-with-history.py
"""
from ollama import chat
messages = [

View File

@ -1,3 +1,14 @@
"""Basic chat completion.
Sends a single user message and prints the model's response.
Prerequisites:
ollama pull gemma3
Usage:
python chat.py
"""
from ollama import chat
messages = [

View File

@ -1,3 +1,15 @@
"""Generate text embeddings.
Produces a vector embedding for the given input text, useful for
semantic search, clustering, and similarity comparisons.
Prerequisites:
ollama pull llama3.2
Usage:
python embed.py
"""
from ollama import embed
response = embed(model='llama3.2', input='Hello, world!')

View File

@ -1,3 +1,14 @@
"""Basic text generation.
Generates a response from a prompt without conversation history.
Prerequisites:
ollama pull gemma3
Usage:
python generate.py
"""
from ollama import generate
response = generate('gemma3', 'Why is the sky blue?')

View File

@ -1,3 +1,15 @@
"""Multimodal chat with image input.
Sends an image alongside a text prompt, allowing the model to describe
or answer questions about the image content.
Prerequisites:
ollama pull gemma3
Usage:
python multimodal-chat.py
"""
from ollama import chat
# from pathlib import Path

View File

@ -1,3 +1,16 @@
"""Structured JSON output with Pydantic validation.
Forces the model to return JSON conforming to a Pydantic schema,
then validates the response automatically.
Prerequisites:
pip install pydantic
ollama pull llama3.1:8b
Usage:
python structured-outputs.py
"""
from pydantic import BaseModel
from ollama import chat

View File

@ -1,3 +1,15 @@
"""Tool calling (function calling) with Ollama.
Demonstrates how to define Python functions as tools that the model can
invoke, dispatch the calls, and feed results back into the conversation.
Prerequisites:
ollama pull llama3.1
Usage:
python tools.py
"""
from ollama import ChatResponse, chat