graphrag/tests/verbs/test_create_final_documents.py
Dayenne Souza b94290ec2b
add option to add metadata into text chunks (#1681)
* add new options

* add metadata json into input document

* remove doc change

* add metadata column into text loader

* prepend_metadata

* run fix

* fix tests and patch

* fix test

* add watrning for metadata tokens > config size

* fix typo and run fix

* fix test_integration

* fix test

* run check

* rename and fix chunking

* fix

* fix

* fiz test verbs

* fix

* fix tests

* fix chunking

* fix index

* fix cosmos test

* fix vars

* fix after PR

* fix
2025-02-12 09:38:03 -08:00

66 lines
1.8 KiB
Python

# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks
from graphrag.config.create_graphrag_config import create_graphrag_config
from graphrag.index.workflows.create_final_documents import (
run_workflow,
)
from graphrag.utils.storage import load_table_from_storage
from .util import (
DEFAULT_MODEL_CONFIG,
compare_outputs,
create_test_context,
load_test_table,
)
async def test_create_final_documents():
expected = load_test_table("documents")
context = await create_test_context(
storage=["text_units"],
)
config = create_graphrag_config({"models": DEFAULT_MODEL_CONFIG})
await run_workflow(
config,
context,
NoopWorkflowCallbacks(),
)
actual = await load_table_from_storage("documents", context.storage)
compare_outputs(actual, expected)
async def test_create_final_documents_with_metadata_column():
expected = load_test_table("documents")
context = await create_test_context(
storage=["text_units"],
)
config = create_graphrag_config({"models": DEFAULT_MODEL_CONFIG})
config.input.metadata = ["title"]
await run_workflow(
config,
context,
NoopWorkflowCallbacks(),
)
actual = await load_table_from_storage("documents", context.storage)
# we should have dropped "title" and added "attributes"
# our test dataframe does not have attributes, so we'll assert without it
# and separately confirm it is in the output
compare_outputs(
actual, expected, columns=["id", "human_readable_id", "text", "text_unit_ids"]
)
assert len(actual.columns) == 6
assert "title" in actual.columns
assert "metadata" in actual.columns