mirror of
https://github.com/microsoft/graphrag.git
synced 2026-01-14 09:07:20 +08:00
* Refactor config
- Add new ModelConfig to represent LLM settings
- Combines LLMParameters, ParallelizationParameters, encoding_model, and async_mode
- Add top level models config that is a list of available LLM ModelConfigs
- Remove LLMConfig inheritance and delete LLMConfig
- Replace the inheritance with a model_id reference to the ModelConfig listed in the top level models config
- Remove all fallbacks and hydration logic from create_graphrag_config
- This removes the automatic env variable overrides
- Support env variables within config files using Templating
- This requires "$" to be escaped with extra "$" so ".*\\.txt$" becomes ".*\\.txt$$"
- Update init content to initialize new config file with the ModelConfig structure
* Use dict of ModelConfig instead of list
* Add model validations and unit tests
* Fix ruff checks
* Add semversioner change
* Fix unit tests
* validate root_dir in pydantic model
* Rename ModelConfig to LanguageModelConfig
* Rename ModelConfigMissingError to LanguageModelConfigMissingError
* Add validationg for unexpected API keys
* Allow skipping pydantic validation for testing/mocking purposes.
* Add default lm configs to verb tests
* smoke test
* remove config from flows to fix llm arg mapping
* Fix embedding llm arg mapping
* Remove timestamp from smoke test outputs
* Remove unused "subworkflows" smoke test properties
* Add models to smoke test configs
* Update smoke test output path
* Send logs to logs folder
* Fix output path
* Fix csv test file pattern
* Update placeholder
* Format
* Instantiate default model configs
* Fix unit tests for config defaults
* Fix migration notebook
* Remove create_pipeline_config
* Remove several unused config models
* Remove indexing embedding and input configs
* Move embeddings function to config
* Remove skip_workflows
* Remove skip embeddings in favor of explicit naming
* fix unit test spelling mistake
* self.models[model_id] is already a language model. Remove redundant casting.
* update validation errors to instruct users to rerun graphrag init
* instantiate LanguageModelConfigs with validation
* skip validation in unit tests
* update verb tests to use default model settings instead of skipping validation
* test using llm settings
* cleanup verb tests
* remove unsafe default model config
* remove the ability to skip pydantic validation
* remove None union types when default values are set
* move vector_store from embeddings to top level of config and delete resolve_paths
* update vector store settings
* fix vector store and smoke tests
* fix serializing vector_store settings
* fix vector_store usage
* fix vector_store type
* support cli overrides for loading graphrag config
* rename storage to output
* Add --force flag to init
* Remove run_id and resume, fix Drift config assignment
* Ruff
---------
Co-authored-by: Nathan Evans <github@talkswithnumbers.com>
Co-authored-by: Alonso Guevara <alonsog@microsoft.com>
81 lines
2.6 KiB
Python
81 lines
2.6 KiB
Python
# Copyright (c) 2024 Microsoft Corporation.
|
|
# Licensed under the MIT License
|
|
|
|
import pandas as pd
|
|
from pandas.testing import assert_series_equal
|
|
|
|
import graphrag.config.defaults as defs
|
|
from graphrag.index.context import PipelineRunContext
|
|
from graphrag.index.run.utils import create_run_context
|
|
from graphrag.utils.storage import write_table_to_storage
|
|
|
|
pd.set_option("display.max_columns", None)
|
|
|
|
FAKE_API_KEY = "NOT_AN_API_KEY"
|
|
|
|
DEFAULT_CHAT_MODEL_CONFIG = {
|
|
"api_key": FAKE_API_KEY,
|
|
"type": defs.LLM_TYPE.value,
|
|
"model": defs.LLM_MODEL,
|
|
}
|
|
|
|
DEFAULT_EMBEDDING_MODEL_CONFIG = {
|
|
"api_key": FAKE_API_KEY,
|
|
"type": defs.EMBEDDING_TYPE.value,
|
|
"model": defs.EMBEDDING_MODEL,
|
|
}
|
|
|
|
DEFAULT_MODEL_CONFIG = {
|
|
defs.DEFAULT_CHAT_MODEL_ID: DEFAULT_CHAT_MODEL_CONFIG,
|
|
defs.DEFAULT_EMBEDDING_MODEL_ID: DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
}
|
|
|
|
|
|
async def create_test_context(storage: list[str] | None = None) -> PipelineRunContext:
|
|
"""Create a test context with tables loaded into storage storage."""
|
|
context = create_run_context(None, None, None)
|
|
|
|
# always set the input docs
|
|
input = load_test_table("source_documents")
|
|
await write_table_to_storage(input, "input", context.storage)
|
|
|
|
if storage:
|
|
for name in storage:
|
|
table = load_test_table(name)
|
|
# normal storage interface insists on bytes
|
|
await write_table_to_storage(table, name, context.storage)
|
|
|
|
return context
|
|
|
|
|
|
def load_test_table(output: str) -> pd.DataFrame:
|
|
"""Pass in the workflow output (generally the workflow name)"""
|
|
return pd.read_parquet(f"tests/verbs/data/{output}.parquet")
|
|
|
|
|
|
def compare_outputs(
|
|
actual: pd.DataFrame, expected: pd.DataFrame, columns: list[str] | None = None
|
|
) -> None:
|
|
"""Compare the actual and expected dataframes, optionally specifying columns to compare.
|
|
This uses assert_series_equal since we are sometimes intentionally omitting columns from the actual output.
|
|
"""
|
|
cols = expected.columns if columns is None else columns
|
|
|
|
assert len(actual) == len(expected), (
|
|
f"Expected: {len(expected)} rows, Actual: {len(actual)} rows"
|
|
)
|
|
|
|
for column in cols:
|
|
assert column in actual.columns
|
|
try:
|
|
# dtypes can differ since the test data is read from parquet and our workflow runs in memory
|
|
assert_series_equal(
|
|
actual[column], expected[column], check_dtype=False, check_index=False
|
|
)
|
|
except AssertionError:
|
|
print("Expected:")
|
|
print(expected[column])
|
|
print("Actual:")
|
|
print(actual[column])
|
|
raise
|