Index migration
In [1]:
Copied!
# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License.
# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License.
Index Migration¶
This notebook is used to maintain data model parity with older indexes for the latest versions of GraphRAG. If you have a pre-1.0 index and need to migrate without re-running the entire pipeline, you can use this notebook to only update the pieces necessary for alignment.
NOTE: we recommend regenerating your settings.yml with the latest version of GraphRAG using graphrag init. Copy your LLM settings into it before running this notebook. This ensures your config is aligned with the latest version for the migration. This also ensures that you have default vector store config, which is now required or indexing will fail.
WARNING: This will overwrite your parquet files, you may want to make a backup!
In [2]:
Copied!
# This is the directory that has your settings.yml
# NOTE: much older indexes may have been output with a timestamped directory
# if this is the case, you will need to make sure the storage.base_dir in settings.yml points to it correctly
PROJECT_DIRECTORY = "<your project directory>"
# This is the directory that has your settings.yml
# NOTE: much older indexes may have been output with a timestamped directory
# if this is the case, you will need to make sure the storage.base_dir in settings.yml points to it correctly
PROJECT_DIRECTORY = ""
In [3]:
Copied!
from pathlib import Path
from graphrag.config.load_config import load_config
from graphrag.config.resolve_path import resolve_paths
from graphrag.index.create_pipeline_config import create_pipeline_config
from graphrag.storage.factory import create_storage
# This first block does some config loading, path resolution, and translation that is normally done by the CLI/API when running a full workflow
config = load_config(Path(PROJECT_DIRECTORY))
resolve_paths(config)
pipeline_config = create_pipeline_config(config)
storage = create_storage(pipeline_config.storage)
from pathlib import Path
from graphrag.config.load_config import load_config
from graphrag.config.resolve_path import resolve_paths
from graphrag.index.create_pipeline_config import create_pipeline_config
from graphrag.storage.factory import create_storage
# This first block does some config loading, path resolution, and translation that is normally done by the CLI/API when running a full workflow
config = load_config(Path(PROJECT_DIRECTORY))
resolve_paths(config)
pipeline_config = create_pipeline_config(config)
storage = create_storage(pipeline_config.storage)
--------------------------------------------------------------------------- ImportError Traceback (most recent call last) Cell In[3], line 6 4 from graphrag.config.resolve_path import resolve_paths 5 from graphrag.index.create_pipeline_config import create_pipeline_config ----> 6 from graphrag.storage.factory import create_storage 8 # This first block does some config loading, path resolution, and translation that is normally done by the CLI/API when running a full workflow 9 config = load_config(Path(PROJECT_DIRECTORY)) ImportError: cannot import name 'create_storage' from 'graphrag.storage.factory' (/home/runner/work/graphrag/graphrag/graphrag/storage/factory.py)
In [4]:
Copied!
def remove_columns(df, columns):
"""Remove columns from a DataFrame, suppressing errors."""
df.drop(labels=columns, axis=1, errors="ignore", inplace=True)
def remove_columns(df, columns):
"""Remove columns from a DataFrame, suppressing errors."""
df.drop(labels=columns, axis=1, errors="ignore", inplace=True)
In [5]:
Copied!
def get_community_parent(nodes):
"""Compute the parent community using the node membership as a lookup."""
parent_mapping = nodes.loc[:, ["level", "community", "title"]]
nodes = nodes.loc[:, ["level", "community", "title"]]
# Create a parent mapping by adding 1 to the level column
parent_mapping["level"] += 1 # Shift levels for parent relationship
parent_mapping.rename(columns={"community": "parent"}, inplace=True)
# Merge the parent information back into the base DataFrame
nodes = nodes.merge(parent_mapping, on=["level", "title"], how="left")
# Fill missing parents with -1 (default value)
nodes["parent"] = nodes["parent"].fillna(-1).astype(int)
join = (
nodes.groupby(["community", "level", "parent"])
.agg({"title": list})
.reset_index()
)
return join[join["community"] > -1].loc[:, ["community", "parent"]]
def get_community_parent(nodes):
"""Compute the parent community using the node membership as a lookup."""
parent_mapping = nodes.loc[:, ["level", "community", "title"]]
nodes = nodes.loc[:, ["level", "community", "title"]]
# Create a parent mapping by adding 1 to the level column
parent_mapping["level"] += 1 # Shift levels for parent relationship
parent_mapping.rename(columns={"community": "parent"}, inplace=True)
# Merge the parent information back into the base DataFrame
nodes = nodes.merge(parent_mapping, on=["level", "title"], how="left")
# Fill missing parents with -1 (default value)
nodes["parent"] = nodes["parent"].fillna(-1).astype(int)
join = (
nodes.groupby(["community", "level", "parent"])
.agg({"title": list})
.reset_index()
)
return join[join["community"] > -1].loc[:, ["community", "parent"]]
In [6]:
Copied!
from uuid import uuid4
from graphrag.utils.storage import load_table_from_storage, write_table_to_storage
# First we'll go through any parquet files that had model changes and update them
# The new data model may have removed excess columns as well, but we will only make the minimal changes required for compatibility
final_documents = await load_table_from_storage(
"create_final_documents.parquet", storage
)
final_text_units = await load_table_from_storage(
"create_final_text_units.parquet", storage
)
final_entities = await load_table_from_storage("create_final_entities.parquet", storage)
final_nodes = await load_table_from_storage("create_final_nodes.parquet", storage)
final_relationships = await load_table_from_storage(
"create_final_relationships.parquet", storage
)
final_communities = await load_table_from_storage(
"create_final_communities.parquet", storage
)
final_community_reports = await load_table_from_storage(
"create_final_community_reports.parquet", storage
)
# Documents renames raw_content for consistency
if "raw_content" in final_documents.columns:
final_documents.rename(columns={"raw_content": "text"}, inplace=True)
final_documents["human_readable_id"] = final_documents.index + 1
# Text units just get a human_readable_id or consistency
final_text_units["human_readable_id"] = final_text_units.index + 1
# We renamed "name" to "title" for consistency with the rest of the tables
if "name" in final_entities.columns:
final_entities.rename(columns={"name": "title"}, inplace=True)
remove_columns(
final_entities, ["mname_embedding", "graph_embedding", "description_embedding"]
)
# Final nodes uses community for joins, which is now an int everywhere
final_nodes["community"] = final_nodes["community"].fillna(-1)
final_nodes["community"] = final_nodes["community"].astype(int)
remove_columns(
final_nodes,
[
"type",
"description",
"source_id",
"graph_embedding",
"entity_type",
"top_level_node_id",
"size",
],
)
# Relationships renames "rank" to "combined_degree" to be clear what the default ranking is
if "rank" in final_relationships.columns:
final_relationships.rename(columns={"rank": "combined_degree"}, inplace=True)
# Compute the parents for each community, to add to communities and reports
parent_df = get_community_parent(final_nodes)
# Communities previously used the "id" field for the Leiden id, but we've moved this to the community field and use a uuid for id like the others
if "community" not in final_communities.columns:
final_communities["community"] = final_communities["id"].astype(int)
final_communities["human_readable_id"] = final_communities["community"]
final_communities["id"] = [str(uuid4()) for _ in range(len(final_communities))]
if "parent" not in final_communities.columns:
final_communities = final_communities.merge(parent_df, on="community", how="left")
remove_columns(final_communities, ["raw_community"])
# We need int for community and the human_readable_id copy for consistency
final_community_reports["community"] = final_community_reports["community"].astype(int)
final_community_reports["human_readable_id"] = final_community_reports["community"]
if "parent" not in final_community_reports.columns:
final_community_reports = final_community_reports.merge(
parent_df, on="community", how="left"
)
await write_table_to_storage(final_documents, "create_final_documents.parquet", storage)
await write_table_to_storage(
final_text_units, "create_final_text_units.parquet", storage
)
await write_table_to_storage(final_entities, "create_final_entities.parquet", storage)
await write_table_to_storage(final_nodes, "create_final_nodes.parquet", storage)
await write_table_to_storage(
final_relationships, "create_final_relationships.parquet", storage
)
await write_table_to_storage(
final_communities, "create_final_communities.parquet", storage
)
await write_table_to_storage(
final_community_reports, "create_final_community_reports.parquet", storage
)
from uuid import uuid4
from graphrag.utils.storage import load_table_from_storage, write_table_to_storage
# First we'll go through any parquet files that had model changes and update them
# The new data model may have removed excess columns as well, but we will only make the minimal changes required for compatibility
final_documents = await load_table_from_storage(
"create_final_documents.parquet", storage
)
final_text_units = await load_table_from_storage(
"create_final_text_units.parquet", storage
)
final_entities = await load_table_from_storage("create_final_entities.parquet", storage)
final_nodes = await load_table_from_storage("create_final_nodes.parquet", storage)
final_relationships = await load_table_from_storage(
"create_final_relationships.parquet", storage
)
final_communities = await load_table_from_storage(
"create_final_communities.parquet", storage
)
final_community_reports = await load_table_from_storage(
"create_final_community_reports.parquet", storage
)
# Documents renames raw_content for consistency
if "raw_content" in final_documents.columns:
final_documents.rename(columns={"raw_content": "text"}, inplace=True)
final_documents["human_readable_id"] = final_documents.index + 1
# Text units just get a human_readable_id or consistency
final_text_units["human_readable_id"] = final_text_units.index + 1
# We renamed "name" to "title" for consistency with the rest of the tables
if "name" in final_entities.columns:
final_entities.rename(columns={"name": "title"}, inplace=True)
remove_columns(
final_entities, ["mname_embedding", "graph_embedding", "description_embedding"]
)
# Final nodes uses community for joins, which is now an int everywhere
final_nodes["community"] = final_nodes["community"].fillna(-1)
final_nodes["community"] = final_nodes["community"].astype(int)
remove_columns(
final_nodes,
[
"type",
"description",
"source_id",
"graph_embedding",
"entity_type",
"top_level_node_id",
"size",
],
)
# Relationships renames "rank" to "combined_degree" to be clear what the default ranking is
if "rank" in final_relationships.columns:
final_relationships.rename(columns={"rank": "combined_degree"}, inplace=True)
# Compute the parents for each community, to add to communities and reports
parent_df = get_community_parent(final_nodes)
# Communities previously used the "id" field for the Leiden id, but we've moved this to the community field and use a uuid for id like the others
if "community" not in final_communities.columns:
final_communities["community"] = final_communities["id"].astype(int)
final_communities["human_readable_id"] = final_communities["community"]
final_communities["id"] = [str(uuid4()) for _ in range(len(final_communities))]
if "parent" not in final_communities.columns:
final_communities = final_communities.merge(parent_df, on="community", how="left")
remove_columns(final_communities, ["raw_community"])
# We need int for community and the human_readable_id copy for consistency
final_community_reports["community"] = final_community_reports["community"].astype(int)
final_community_reports["human_readable_id"] = final_community_reports["community"]
if "parent" not in final_community_reports.columns:
final_community_reports = final_community_reports.merge(
parent_df, on="community", how="left"
)
await write_table_to_storage(final_documents, "create_final_documents.parquet", storage)
await write_table_to_storage(
final_text_units, "create_final_text_units.parquet", storage
)
await write_table_to_storage(final_entities, "create_final_entities.parquet", storage)
await write_table_to_storage(final_nodes, "create_final_nodes.parquet", storage)
await write_table_to_storage(
final_relationships, "create_final_relationships.parquet", storage
)
await write_table_to_storage(
final_communities, "create_final_communities.parquet", storage
)
await write_table_to_storage(
final_community_reports, "create_final_community_reports.parquet", storage
)
--------------------------------------------------------------------------- NameError Traceback (most recent call last) Cell In[6], line 9 3 from graphrag.utils.storage import load_table_from_storage, write_table_to_storage 5 # First we'll go through any parquet files that had model changes and update them 6 # The new data model may have removed excess columns as well, but we will only make the minimal changes required for compatibility 8 final_documents = await load_table_from_storage( ----> 9 "create_final_documents.parquet", storage 10 ) 11 final_text_units = await load_table_from_storage( 12 "create_final_text_units.parquet", storage 13 ) 14 final_entities = await load_table_from_storage("create_final_entities.parquet", storage) NameError: name 'storage' is not defined
In [7]:
Copied!
from graphrag.cache.factory import create_cache
from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks
from graphrag.index.flows.generate_text_embeddings import generate_text_embeddings
# We only need to re-run the embeddings workflow, to ensure that embeddings for all required search fields are in place
# We'll construct the context and run this function flow directly to avoid everything else
workflow = next(
(x for x in pipeline_config.workflows if x.name == "generate_text_embeddings"), None
)
config = workflow.config
text_embed = config.get("text_embed", {})
embedded_fields = config.get("embedded_fields", {})
callbacks = NoopWorkflowCallbacks()
cache = create_cache(pipeline_config.cache, PROJECT_DIRECTORY)
await generate_text_embeddings(
final_documents=None,
final_relationships=None,
final_text_units=final_text_units,
final_entities=final_entities,
final_community_reports=final_community_reports,
callbacks=callbacks,
cache=cache,
storage=storage,
text_embed_config=text_embed,
embedded_fields=embedded_fields,
snapshot_embeddings_enabled=False,
)
from graphrag.cache.factory import create_cache
from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks
from graphrag.index.flows.generate_text_embeddings import generate_text_embeddings
# We only need to re-run the embeddings workflow, to ensure that embeddings for all required search fields are in place
# We'll construct the context and run this function flow directly to avoid everything else
workflow = next(
(x for x in pipeline_config.workflows if x.name == "generate_text_embeddings"), None
)
config = workflow.config
text_embed = config.get("text_embed", {})
embedded_fields = config.get("embedded_fields", {})
callbacks = NoopWorkflowCallbacks()
cache = create_cache(pipeline_config.cache, PROJECT_DIRECTORY)
await generate_text_embeddings(
final_documents=None,
final_relationships=None,
final_text_units=final_text_units,
final_entities=final_entities,
final_community_reports=final_community_reports,
callbacks=callbacks,
cache=cache,
storage=storage,
text_embed_config=text_embed,
embedded_fields=embedded_fields,
snapshot_embeddings_enabled=False,
)
--------------------------------------------------------------------------- ImportError Traceback (most recent call last) Cell In[7], line 1 ----> 1 from graphrag.cache.factory import create_cache 2 from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks 3 from graphrag.index.flows.generate_text_embeddings import generate_text_embeddings ImportError: cannot import name 'create_cache' from 'graphrag.cache.factory' (/home/runner/work/graphrag/graphrag/graphrag/cache/factory.py)