mirror of
https://github.com/microsoft/graphrag.git
synced 2026-01-14 00:57:23 +08:00
Some checks are pending
gh-pages / build (push) Waiting to run
Python CI / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python CI / python-ci (ubuntu-latest, 3.11) (push) Waiting to run
Python CI / python-ci (windows-latest, 3.10) (push) Waiting to run
Python CI / python-ci (windows-latest, 3.11) (push) Waiting to run
Python Integration Tests / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python Integration Tests / python-ci (windows-latest, 3.10) (push) Waiting to run
Python Notebook Tests / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python Notebook Tests / python-ci (windows-latest, 3.10) (push) Waiting to run
Python Publish (pypi) / Upload release to PyPI (push) Waiting to run
Python Smoke Tests / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python Smoke Tests / python-ci (windows-latest, 3.10) (push) Waiting to run
Spellcheck / spellcheck (push) Waiting to run
* Add full_response to llm provider output * Semver * Small leftover cleanup * Add pyi to suppress Pyright errors. full_content is optional * Format * Add missing stubs
72 lines
2.0 KiB
Python
72 lines
2.0 KiB
Python
# Copyright (c) 2025 Microsoft Corporation.
|
|
# Licensed under the MIT License
|
|
|
|
"""Base llm response protocol."""
|
|
|
|
from typing import Any, Generic, Protocol, TypeVar
|
|
|
|
from pydantic import BaseModel, Field
|
|
|
|
T = TypeVar("T", bound=BaseModel, covariant=True)
|
|
|
|
|
|
class ModelOutput(Protocol):
|
|
"""Protocol for Model response's output object."""
|
|
|
|
@property
|
|
def content(self) -> str:
|
|
"""Return the textual content of the output."""
|
|
...
|
|
|
|
@property
|
|
def full_response(self) -> dict[str, Any] | None:
|
|
"""Return the complete JSON response returned by the model."""
|
|
...
|
|
|
|
|
|
class ModelResponse(Protocol, Generic[T]):
|
|
"""Protocol for LLM response."""
|
|
|
|
@property
|
|
def output(self) -> ModelOutput:
|
|
"""Return the output of the response."""
|
|
...
|
|
|
|
@property
|
|
def parsed_response(self) -> T | None:
|
|
"""Return the parsed response."""
|
|
...
|
|
|
|
@property
|
|
def history(self) -> list:
|
|
"""Return the history of the response."""
|
|
...
|
|
|
|
|
|
class BaseModelOutput(BaseModel):
|
|
"""Base class for LLM output."""
|
|
|
|
content: str = Field(..., description="The textual content of the output.")
|
|
"""The textual content of the output."""
|
|
full_response: dict[str, Any] | None = Field(
|
|
None, description="The complete JSON response returned by the LLM provider."
|
|
)
|
|
"""The complete JSON response returned by the LLM provider."""
|
|
|
|
|
|
class BaseModelResponse(BaseModel, Generic[T]):
|
|
"""Base class for a Model response."""
|
|
|
|
output: BaseModelOutput
|
|
""""""
|
|
parsed_response: T | None = None
|
|
"""Parsed response."""
|
|
history: list[Any] = Field(default_factory=list)
|
|
"""History of the response."""
|
|
tool_calls: list = Field(default_factory=list)
|
|
"""Tool calls required by the Model. These will be instances of the LLM tools (with filled parameters)."""
|
|
metrics: Any | None = None
|
|
"""Request/response metrics."""
|
|
cache_hit: bool | None = None
|
|
"""Whether the response was a cache hit."""
|