mirror of
https://github.com/microsoft/graphrag.git
synced 2026-01-14 00:57:23 +08:00
Input docs API parameter (#2034)
Some checks failed
gh-pages / build (push) Has been cancelled
Python CI / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python CI / python-ci (ubuntu-latest, 3.11) (push) Has been cancelled
Python CI / python-ci (windows-latest, 3.10) (push) Has been cancelled
Python CI / python-ci (windows-latest, 3.11) (push) Has been cancelled
Python Integration Tests / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python Integration Tests / python-ci (windows-latest, 3.10) (push) Has been cancelled
Python Notebook Tests / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python Notebook Tests / python-ci (windows-latest, 3.10) (push) Has been cancelled
Python Publish (pypi) / Upload release to PyPI (push) Has been cancelled
Python Smoke Tests / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python Smoke Tests / python-ci (windows-latest, 3.10) (push) Has been cancelled
Spellcheck / spellcheck (push) Has been cancelled
Some checks failed
gh-pages / build (push) Has been cancelled
Python CI / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python CI / python-ci (ubuntu-latest, 3.11) (push) Has been cancelled
Python CI / python-ci (windows-latest, 3.10) (push) Has been cancelled
Python CI / python-ci (windows-latest, 3.11) (push) Has been cancelled
Python Integration Tests / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python Integration Tests / python-ci (windows-latest, 3.10) (push) Has been cancelled
Python Notebook Tests / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python Notebook Tests / python-ci (windows-latest, 3.10) (push) Has been cancelled
Python Publish (pypi) / Upload release to PyPI (push) Has been cancelled
Python Smoke Tests / python-ci (ubuntu-latest, 3.10) (push) Has been cancelled
Python Smoke Tests / python-ci (windows-latest, 3.10) (push) Has been cancelled
Spellcheck / spellcheck (push) Has been cancelled
* Add optional input_documents to index API * Semver * Add input dataframe example notebook * Format * Fix docs and notebook
This commit is contained in:
parent
2030f94eb4
commit
1cb20b66f5
@ -0,0 +1,4 @@
|
||||
{
|
||||
"type": "minor",
|
||||
"description": "Add optional input documents to indexing API."
|
||||
}
|
||||
194
docs/examples_notebooks/input_documents.ipynb
Normal file
194
docs/examples_notebooks/input_documents.ipynb
Normal file
@ -0,0 +1,194 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Copyright (c) 2024 Microsoft Corporation.\n",
|
||||
"# Licensed under the MIT License."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Example of indexing from an existing in-memory dataframe\n",
|
||||
"\n",
|
||||
"Newer versions of GraphRAG let you submit a dataframe directly instead of running through the input processing step. This notebook demonstrates with regular or update runs.\n",
|
||||
"\n",
|
||||
"If performing an update, the assumption is that your dataframe contains only the new documents to add to the index."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from pathlib import Path\n",
|
||||
"from pprint import pprint\n",
|
||||
"\n",
|
||||
"import pandas as pd\n",
|
||||
"\n",
|
||||
"import graphrag.api as api\n",
|
||||
"from graphrag.config.load_config import load_config\n",
|
||||
"from graphrag.index.typing.pipeline_run_result import PipelineRunResult"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"PROJECT_DIRECTORY = \"<your project directory>\"\n",
|
||||
"UPDATE = False\n",
|
||||
"FILENAME = \"new_documents.parquet\" if UPDATE else \"<original_documents>.parquet\"\n",
|
||||
"inputs = pd.read_parquet(f\"{PROJECT_DIRECTORY}/input/{FILENAME}\")\n",
|
||||
"# Only the bare minimum for input. These are the same fields that would be present after the load_input_documents workflow\n",
|
||||
"inputs = inputs.loc[:, [\"id\", \"title\", \"text\", \"creation_date\"]]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Generate a `GraphRagConfig` object"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"graphrag_config = load_config(Path(PROJECT_DIRECTORY))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Indexing API\n",
|
||||
"\n",
|
||||
"*Indexing* is the process of ingesting raw text data and constructing a knowledge graph. GraphRAG currently supports plaintext (`.txt`) and `.csv` file formats."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Build an index"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"index_result: list[PipelineRunResult] = await api.build_index(\n",
|
||||
" config=graphrag_config, input_documents=inputs, is_update_run=UPDATE\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# index_result is a list of workflows that make up the indexing pipeline that was run\n",
|
||||
"for workflow_result in index_result:\n",
|
||||
" status = f\"error\\n{workflow_result.errors}\" if workflow_result.errors else \"success\"\n",
|
||||
" print(f\"Workflow Name: {workflow_result.workflow}\\tStatus: {status}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Query an index\n",
|
||||
"\n",
|
||||
"To query an index, several index files must first be read into memory and passed to the query API. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"entities = pd.read_parquet(f\"{PROJECT_DIRECTORY}/output/entities.parquet\")\n",
|
||||
"communities = pd.read_parquet(f\"{PROJECT_DIRECTORY}/output/communities.parquet\")\n",
|
||||
"community_reports = pd.read_parquet(\n",
|
||||
" f\"{PROJECT_DIRECTORY}/output/community_reports.parquet\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"response, context = await api.global_search(\n",
|
||||
" config=graphrag_config,\n",
|
||||
" entities=entities,\n",
|
||||
" communities=communities,\n",
|
||||
" community_reports=community_reports,\n",
|
||||
" community_level=2,\n",
|
||||
" dynamic_community_selection=False,\n",
|
||||
" response_type=\"Multiple Paragraphs\",\n",
|
||||
" query=\"What are the top five themes of the dataset?\",\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The response object is the official reponse from graphrag while the context object holds various metadata regarding the querying process used to obtain the final response."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"print(response)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Digging into the context a bit more provides users with extremely granular information such as what sources of data (down to the level of text chunks) were ultimately retrieved and used as part of the context sent to the LLM model)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pprint(context) # noqa: T203"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "graphrag",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
@ -11,6 +11,8 @@ Backwards compatibility is not guaranteed at this time.
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks
|
||||
from graphrag.callbacks.workflow_callbacks import WorkflowCallbacks
|
||||
from graphrag.config.enums import IndexingMethod
|
||||
@ -18,7 +20,6 @@ from graphrag.config.models.graph_rag_config import GraphRagConfig
|
||||
from graphrag.index.run.run_pipeline import run_pipeline
|
||||
from graphrag.index.run.utils import create_callback_chain
|
||||
from graphrag.index.typing.pipeline_run_result import PipelineRunResult
|
||||
from graphrag.index.typing.workflow import WorkflowFunction
|
||||
from graphrag.index.workflows.factory import PipelineFactory
|
||||
from graphrag.logger.standard_logging import init_loggers
|
||||
|
||||
@ -33,6 +34,7 @@ async def build_index(
|
||||
callbacks: list[WorkflowCallbacks] | None = None,
|
||||
additional_context: dict[str, Any] | None = None,
|
||||
verbose: bool = False,
|
||||
input_documents: pd.DataFrame | None = None,
|
||||
) -> list[PipelineRunResult]:
|
||||
"""Run the pipeline with the given configuration.
|
||||
|
||||
@ -48,6 +50,8 @@ async def build_index(
|
||||
A list of callbacks to register.
|
||||
additional_context : dict[str, Any] | None default=None
|
||||
Additional context to pass to the pipeline run. This can be accessed in the pipeline state under the 'additional_context' key.
|
||||
input_documents : pd.DataFrame | None default=None.
|
||||
Override document loading and parsing and supply your own dataframe of documents to index.
|
||||
|
||||
Returns
|
||||
-------
|
||||
@ -79,6 +83,7 @@ async def build_index(
|
||||
callbacks=workflow_callbacks,
|
||||
is_update_run=is_update_run,
|
||||
additional_context=additional_context,
|
||||
input_documents=input_documents,
|
||||
):
|
||||
outputs.append(output)
|
||||
if output.errors and len(output.errors) > 0:
|
||||
@ -91,11 +96,6 @@ async def build_index(
|
||||
return outputs
|
||||
|
||||
|
||||
def register_workflow_function(name: str, workflow: WorkflowFunction):
|
||||
"""Register a custom workflow function. You can then include the name in the settings.yaml workflows list."""
|
||||
PipelineFactory.register(name, workflow)
|
||||
|
||||
|
||||
def _get_method(method: IndexingMethod | str, is_update_run: bool) -> str:
|
||||
m = method.value if isinstance(method, IndexingMethod) else method
|
||||
return f"{m}-update" if is_update_run else m
|
||||
|
||||
@ -11,6 +11,8 @@ from collections.abc import AsyncIterable
|
||||
from dataclasses import asdict
|
||||
from typing import Any
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from graphrag.callbacks.workflow_callbacks import WorkflowCallbacks
|
||||
from graphrag.config.models.graph_rag_config import GraphRagConfig
|
||||
from graphrag.index.run.utils import create_run_context
|
||||
@ -30,6 +32,7 @@ async def run_pipeline(
|
||||
callbacks: WorkflowCallbacks,
|
||||
is_update_run: bool = False,
|
||||
additional_context: dict[str, Any] | None = None,
|
||||
input_documents: pd.DataFrame | None = None,
|
||||
) -> AsyncIterable[PipelineRunResult]:
|
||||
"""Run all workflows using a simplified pipeline."""
|
||||
root_dir = config.root_dir
|
||||
@ -60,6 +63,11 @@ async def run_pipeline(
|
||||
|
||||
state["update_timestamp"] = update_timestamp
|
||||
|
||||
# if the user passes in a df directly, write directly to storage so we can skip finding/parsing later
|
||||
if input_documents is not None:
|
||||
await write_table_to_storage(input_documents, "documents", delta_storage)
|
||||
pipeline.remove("load_update_documents")
|
||||
|
||||
context = create_run_context(
|
||||
input_storage=input_storage,
|
||||
output_storage=delta_storage,
|
||||
@ -72,6 +80,11 @@ async def run_pipeline(
|
||||
else:
|
||||
logger.info("Running standard indexing.")
|
||||
|
||||
# if the user passes in a df directly, write directly to storage so we can skip finding/parsing later
|
||||
if input_documents is not None:
|
||||
await write_table_to_storage(input_documents, "documents", output_storage)
|
||||
pipeline.remove("load_input_documents")
|
||||
|
||||
context = create_run_context(
|
||||
input_storage=input_storage,
|
||||
output_storage=output_storage,
|
||||
|
||||
@ -21,3 +21,7 @@ class Pipeline:
|
||||
def names(self) -> list[str]:
|
||||
"""Return the names of the workflows in the pipeline."""
|
||||
return [name for name, _ in self.workflows]
|
||||
|
||||
def remove(self, name: str) -> None:
|
||||
"""Remove a workflow from the pipeline by name."""
|
||||
self.workflows = [w for w in self.workflows if w[0] != name]
|
||||
|
||||
Loading…
Reference in New Issue
Block a user