mirror of
https://github.com/microsoft/graphrag.git
synced 2026-01-14 09:07:20 +08:00
* Refactor config
- Add new ModelConfig to represent LLM settings
- Combines LLMParameters, ParallelizationParameters, encoding_model, and async_mode
- Add top level models config that is a list of available LLM ModelConfigs
- Remove LLMConfig inheritance and delete LLMConfig
- Replace the inheritance with a model_id reference to the ModelConfig listed in the top level models config
- Remove all fallbacks and hydration logic from create_graphrag_config
- This removes the automatic env variable overrides
- Support env variables within config files using Templating
- This requires "$" to be escaped with extra "$" so ".*\\.txt$" becomes ".*\\.txt$$"
- Update init content to initialize new config file with the ModelConfig structure
* Use dict of ModelConfig instead of list
* Add model validations and unit tests
* Fix ruff checks
* Add semversioner change
* Fix unit tests
* validate root_dir in pydantic model
* Rename ModelConfig to LanguageModelConfig
* Rename ModelConfigMissingError to LanguageModelConfigMissingError
* Add validationg for unexpected API keys
* Allow skipping pydantic validation for testing/mocking purposes.
* Add default lm configs to verb tests
* smoke test
* remove config from flows to fix llm arg mapping
* Fix embedding llm arg mapping
* Remove timestamp from smoke test outputs
* Remove unused "subworkflows" smoke test properties
* Add models to smoke test configs
* Update smoke test output path
* Send logs to logs folder
* Fix output path
* Fix csv test file pattern
* Update placeholder
* Format
* Instantiate default model configs
* Fix unit tests for config defaults
* Fix migration notebook
* Remove create_pipeline_config
* Remove several unused config models
* Remove indexing embedding and input configs
* Move embeddings function to config
* Remove skip_workflows
* Remove skip embeddings in favor of explicit naming
* fix unit test spelling mistake
* self.models[model_id] is already a language model. Remove redundant casting.
* update validation errors to instruct users to rerun graphrag init
* instantiate LanguageModelConfigs with validation
* skip validation in unit tests
* update verb tests to use default model settings instead of skipping validation
* test using llm settings
* cleanup verb tests
* remove unsafe default model config
* remove the ability to skip pydantic validation
* remove None union types when default values are set
* move vector_store from embeddings to top level of config and delete resolve_paths
* update vector store settings
* fix vector store and smoke tests
* fix serializing vector_store settings
* fix vector_store usage
* fix vector_store type
* support cli overrides for loading graphrag config
* rename storage to output
* Add --force flag to init
* Remove run_id and resume, fix Drift config assignment
* Ruff
---------
Co-authored-by: Nathan Evans <github@talkswithnumbers.com>
Co-authored-by: Alonso Guevara <alonsog@microsoft.com>
169 lines
5.7 KiB
Python
169 lines
5.7 KiB
Python
# Copyright (c) 2024 Microsoft Corporation.
|
|
# Licensed under the MIT License
|
|
|
|
import os
|
|
from pathlib import Path
|
|
from unittest import mock
|
|
|
|
import pytest
|
|
from pydantic import ValidationError
|
|
|
|
import graphrag.config.defaults as defs
|
|
from graphrag.config.create_graphrag_config import create_graphrag_config
|
|
from graphrag.config.enums import AzureAuthType, LLMType
|
|
from graphrag.config.load_config import load_config
|
|
from tests.unit.config.utils import (
|
|
DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
DEFAULT_MODEL_CONFIG,
|
|
FAKE_API_KEY,
|
|
assert_graphrag_configs,
|
|
get_default_graphrag_config,
|
|
)
|
|
|
|
|
|
def test_missing_openai_required_api_key() -> None:
|
|
model_config_missing_api_key = {
|
|
defs.DEFAULT_CHAT_MODEL_ID: {
|
|
"type": LLMType.OpenAIChat,
|
|
"model": defs.LLM_MODEL,
|
|
},
|
|
defs.DEFAULT_EMBEDDING_MODEL_ID: DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
}
|
|
|
|
# API Key required for OpenAIChat
|
|
with pytest.raises(ValidationError):
|
|
create_graphrag_config({"models": model_config_missing_api_key})
|
|
|
|
# API Key required for OpenAIEmbedding
|
|
model_config_missing_api_key[defs.DEFAULT_CHAT_MODEL_ID]["type"] = (
|
|
LLMType.OpenAIEmbedding
|
|
)
|
|
with pytest.raises(ValidationError):
|
|
create_graphrag_config({"models": model_config_missing_api_key})
|
|
|
|
|
|
def test_missing_azure_api_key() -> None:
|
|
model_config_missing_api_key = {
|
|
defs.DEFAULT_CHAT_MODEL_ID: {
|
|
"type": LLMType.AzureOpenAIChat,
|
|
"azure_auth_type": AzureAuthType.APIKey,
|
|
"model": defs.LLM_MODEL,
|
|
"api_base": "some_api_base",
|
|
"api_version": "some_api_version",
|
|
"deployment_name": "some_deployment_name",
|
|
},
|
|
defs.DEFAULT_EMBEDDING_MODEL_ID: DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
}
|
|
|
|
with pytest.raises(ValidationError):
|
|
create_graphrag_config({"models": model_config_missing_api_key})
|
|
|
|
# API Key not required for managed identity
|
|
model_config_missing_api_key[defs.DEFAULT_CHAT_MODEL_ID]["azure_auth_type"] = (
|
|
AzureAuthType.ManagedIdentity
|
|
)
|
|
create_graphrag_config({"models": model_config_missing_api_key})
|
|
|
|
|
|
def test_conflicting_azure_api_key() -> None:
|
|
model_config_conflicting_api_key = {
|
|
defs.DEFAULT_CHAT_MODEL_ID: {
|
|
"type": LLMType.AzureOpenAIChat,
|
|
"azure_auth_type": AzureAuthType.ManagedIdentity,
|
|
"model": defs.LLM_MODEL,
|
|
"api_base": "some_api_base",
|
|
"api_version": "some_api_version",
|
|
"deployment_name": "some_deployment_name",
|
|
"api_key": "THIS_SHOULD_NOT_BE_SET_WHEN_USING_MANAGED_IDENTITY",
|
|
},
|
|
defs.DEFAULT_EMBEDDING_MODEL_ID: DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
}
|
|
|
|
with pytest.raises(ValidationError):
|
|
create_graphrag_config({"models": model_config_conflicting_api_key})
|
|
|
|
|
|
base_azure_model_config = {
|
|
"type": LLMType.AzureOpenAIChat,
|
|
"azure_auth_type": AzureAuthType.ManagedIdentity,
|
|
"model": defs.LLM_MODEL,
|
|
"api_base": "some_api_base",
|
|
"api_version": "some_api_version",
|
|
"deployment_name": "some_deployment_name",
|
|
}
|
|
|
|
|
|
def test_missing_azure_api_base() -> None:
|
|
missing_api_base_config = base_azure_model_config.copy()
|
|
del missing_api_base_config["api_base"]
|
|
|
|
with pytest.raises(ValidationError):
|
|
create_graphrag_config({
|
|
"models": {
|
|
defs.DEFAULT_CHAT_MODEL_ID: missing_api_base_config,
|
|
defs.DEFAULT_EMBEDDING_MODEL_ID: DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
}
|
|
})
|
|
|
|
|
|
def test_missing_azure_api_version() -> None:
|
|
missing_api_version_config = base_azure_model_config.copy()
|
|
del missing_api_version_config["api_version"]
|
|
|
|
with pytest.raises(ValidationError):
|
|
create_graphrag_config({
|
|
"models": {
|
|
defs.DEFAULT_CHAT_MODEL_ID: missing_api_version_config,
|
|
defs.DEFAULT_EMBEDDING_MODEL_ID: DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
}
|
|
})
|
|
|
|
|
|
def test_missing_azure_deployment_name() -> None:
|
|
missing_deployment_name_config = base_azure_model_config.copy()
|
|
del missing_deployment_name_config["deployment_name"]
|
|
|
|
with pytest.raises(ValidationError):
|
|
create_graphrag_config({
|
|
"models": {
|
|
defs.DEFAULT_CHAT_MODEL_ID: missing_deployment_name_config,
|
|
defs.DEFAULT_EMBEDDING_MODEL_ID: DEFAULT_EMBEDDING_MODEL_CONFIG,
|
|
}
|
|
})
|
|
|
|
|
|
def test_default_config() -> None:
|
|
expected = get_default_graphrag_config()
|
|
actual = create_graphrag_config({"models": DEFAULT_MODEL_CONFIG})
|
|
assert_graphrag_configs(actual, expected)
|
|
|
|
|
|
@mock.patch.dict(os.environ, {"CUSTOM_API_KEY": FAKE_API_KEY}, clear=True)
|
|
def test_load_minimal_config() -> None:
|
|
cwd = Path(__file__).parent
|
|
root_dir = (cwd / "fixtures" / "minimal_config").resolve()
|
|
expected = get_default_graphrag_config(str(root_dir))
|
|
actual = load_config(root_dir=root_dir)
|
|
assert_graphrag_configs(actual, expected)
|
|
|
|
|
|
@mock.patch.dict(os.environ, {"CUSTOM_API_KEY": FAKE_API_KEY}, clear=True)
|
|
def test_load_config_with_cli_overrides() -> None:
|
|
cwd = Path(__file__).parent
|
|
root_dir = (cwd / "fixtures" / "minimal_config").resolve()
|
|
output_dir = "some_output_dir"
|
|
expected_output_base_dir = root_dir / output_dir
|
|
expected = get_default_graphrag_config(str(root_dir))
|
|
expected.output.base_dir = str(expected_output_base_dir)
|
|
actual = load_config(
|
|
root_dir=root_dir, cli_overrides={"output.base_dir": output_dir}
|
|
)
|
|
assert_graphrag_configs(actual, expected)
|
|
|
|
|
|
def test_load_config_missing_env_vars() -> None:
|
|
cwd = Path(__file__).parent
|
|
root_dir = (cwd / "fixtures" / "minimal_config_missing_env_var").resolve()
|
|
with pytest.raises(KeyError):
|
|
load_config(root_dir=root_dir)
|