graphrag/graphrag/language_model/response/base.py
Alonso Guevara 7fba9522d4
Some checks are pending
gh-pages / build (push) Waiting to run
Python CI / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python CI / python-ci (ubuntu-latest, 3.11) (push) Waiting to run
Python CI / python-ci (windows-latest, 3.10) (push) Waiting to run
Python CI / python-ci (windows-latest, 3.11) (push) Waiting to run
Python Integration Tests / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python Integration Tests / python-ci (windows-latest, 3.10) (push) Waiting to run
Python Notebook Tests / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python Notebook Tests / python-ci (windows-latest, 3.10) (push) Waiting to run
Python Publish (pypi) / Upload release to PyPI (push) Waiting to run
Python Smoke Tests / python-ci (ubuntu-latest, 3.10) (push) Waiting to run
Python Smoke Tests / python-ci (windows-latest, 3.10) (push) Waiting to run
Spellcheck / spellcheck (push) Waiting to run
Task/raw model answer (#1947)
* Add full_response to llm provider output

* Semver

* Small leftover cleanup

* Add pyi to suppress Pyright errors. full_content is optional

* Format

* Add missing stubs
2025-05-22 08:22:44 -06:00

72 lines
2.0 KiB
Python

# Copyright (c) 2025 Microsoft Corporation.
# Licensed under the MIT License
"""Base llm response protocol."""
from typing import Any, Generic, Protocol, TypeVar
from pydantic import BaseModel, Field
T = TypeVar("T", bound=BaseModel, covariant=True)
class ModelOutput(Protocol):
"""Protocol for Model response's output object."""
@property
def content(self) -> str:
"""Return the textual content of the output."""
...
@property
def full_response(self) -> dict[str, Any] | None:
"""Return the complete JSON response returned by the model."""
...
class ModelResponse(Protocol, Generic[T]):
"""Protocol for LLM response."""
@property
def output(self) -> ModelOutput:
"""Return the output of the response."""
...
@property
def parsed_response(self) -> T | None:
"""Return the parsed response."""
...
@property
def history(self) -> list:
"""Return the history of the response."""
...
class BaseModelOutput(BaseModel):
"""Base class for LLM output."""
content: str = Field(..., description="The textual content of the output.")
"""The textual content of the output."""
full_response: dict[str, Any] | None = Field(
None, description="The complete JSON response returned by the LLM provider."
)
"""The complete JSON response returned by the LLM provider."""
class BaseModelResponse(BaseModel, Generic[T]):
"""Base class for a Model response."""
output: BaseModelOutput
""""""
parsed_response: T | None = None
"""Parsed response."""
history: list[Any] = Field(default_factory=list)
"""History of the response."""
tool_calls: list = Field(default_factory=list)
"""Tool calls required by the Model. These will be instances of the LLM tools (with filled parameters)."""
metrics: Any | None = None
"""Request/response metrics."""
cache_hit: bool | None = None
"""Whether the response was a cache hit."""