From 7c2ec01d2f9c0938fe8d61035ea34b302a6c371a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 10 Jan 2024 17:02:36 -0800 Subject: [PATCH] docstrings --- ollama/_client.py | 10 ++++++++++ ollama/_types.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/ollama/_client.py b/ollama/_client.py index 1ac5372..6c2c9b3 100644 --- a/ollama/_client.py +++ b/ollama/_client.py @@ -28,6 +28,16 @@ class BaseClient: timeout: Any = None, **kwargs, ) -> None: + """ + Creates a httpx client. Default parameters are the same as those defined in httpx + except for the following: + + - `base_url`: http://127.0.0.1:11434 + - `follow_redirects`: True + - `timeout`: None + + `kwargs` are passed to the httpx client. + """ self._client = client( base_url=base_url or os.getenv('OLLAMA_HOST', 'http://127.0.0.1:11434'), follow_redirects=follow_redirects, diff --git a/ollama/_types.py b/ollama/_types.py index 8258825..b8fa06c 100644 --- a/ollama/_types.py +++ b/ollama/_types.py @@ -10,31 +10,76 @@ else: class BaseGenerateResponse(TypedDict): model: str + "Model used to generate response." + created_at: str + "Time when the request was created." + done: bool + "True if response is complete, otherwise False. Useful for streaming to detect the final response." total_duration: int + "Total duration in nanoseconds." + load_duration: int + "Load duration in nanoseconds." + prompt_eval_count: int + "Number of tokens evaluated in the prompt." + prompt_eval_duration: int + "Duration of evaluating the prompt in nanoseconds." + eval_count: int + "Number of tokens evaluated in inference." + eval_duration: int + "Duration of evaluating inference in nanoseconds." class GenerateResponse(BaseGenerateResponse): + """ + Response returned by generate requests. + """ + response: str + "Response content. When streaming, this contains a fragment of the response." + context: Sequence[int] + "Tokenized history up to the point of the response." class Message(TypedDict): + """ + Chat message. + """ + role: Literal['user', 'assistant', 'system'] + "Assumed role of the message. Response messages always has role 'assistant'." content: str + "Content of the message. Response messages contains message fragments when streaming." + images: NotRequired[Sequence[Any]] + """ + Optional list of image data for multimodal models. + + Valid input types are: + + - `str` or path-like object: path to image file + - `bytes` or bytes-like object: raw image data + + Valid image formats depend on the model. See the model card for more information. + """ class ChatResponse(BaseGenerateResponse): + """ + Response returned by chat requests. + """ + message: Message + "Response message." class ProgressResponse(TypedDict):