mirror of
https://github.com/langgenius/dify.git
synced 2026-01-23 20:22:13 +08:00
Signed-off-by: -LAN- <laipz8200@outlook.com> Signed-off-by: kenwoodjw <blackxin55+@gmail.com> Signed-off-by: Yongtao Huang <yongtaoh2022@gmail.com> Signed-off-by: yihong0618 <zouzou0208@gmail.com> Signed-off-by: zhanluxianshen <zhanluxianshen@163.com> Co-authored-by: -LAN- <laipz8200@outlook.com> Co-authored-by: GuanMu <ballmanjq@gmail.com> Co-authored-by: Davide Delbianco <davide.delbianco@outlook.com> Co-authored-by: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Co-authored-by: kenwoodjw <blackxin55+@gmail.com> Co-authored-by: Yongtao Huang <yongtaoh2022@gmail.com> Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> Co-authored-by: Qiang Lee <18018968632@163.com> Co-authored-by: 李强04 <liqiang04@gaotu.cn> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: Asuka Minato <i@asukaminato.eu.org> Co-authored-by: Matri Qi <matrixdom@126.com> Co-authored-by: huayaoyue6 <huayaoyue@163.com> Co-authored-by: Bowen Liang <liangbowen@gf.com.cn> Co-authored-by: znn <jubinkumarsoni@gmail.com> Co-authored-by: crazywoola <427733928@qq.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: yihong <zouzou0208@gmail.com> Co-authored-by: Muke Wang <shaodwaaron@gmail.com> Co-authored-by: wangmuke <wangmuke@kingsware.cn> Co-authored-by: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Co-authored-by: quicksand <quicksandzn@gmail.com> Co-authored-by: 非法操作 <hjlarry@163.com> Co-authored-by: zxhlyh <jasonapring2015@outlook.com> Co-authored-by: Eric Guo <eric.guocz@gmail.com> Co-authored-by: Zhedong Cen <cenzhedong2@126.com> Co-authored-by: jiangbo721 <jiangbo721@163.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: hjlarry <25834719+hjlarry@users.noreply.github.com> Co-authored-by: lxsummer <35754229+lxjustdoit@users.noreply.github.com> Co-authored-by: 湛露先生 <zhanluxianshen@163.com> Co-authored-by: Guangdong Liu <liugddx@gmail.com> Co-authored-by: QuantumGhost <obelisk.reg+git@gmail.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yessenia-d <yessenia.contact@gmail.com> Co-authored-by: huangzhuo1949 <167434202+huangzhuo1949@users.noreply.github.com> Co-authored-by: huangzhuo <huangzhuo1@xiaomi.com> Co-authored-by: 17hz <0x149527@gmail.com> Co-authored-by: Amy <1530140574@qq.com> Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: Nite Knite <nkCoding@gmail.com> Co-authored-by: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Co-authored-by: Petrus Han <petrus.hanks@gmail.com> Co-authored-by: iamjoel <2120155+iamjoel@users.noreply.github.com> Co-authored-by: Kalo Chin <frog.beepers.0n@icloud.com> Co-authored-by: Ujjwal Maurya <ujjwalsbx@gmail.com> Co-authored-by: Maries <xh001x@hotmail.com>
173 lines
8.4 KiB
Python
173 lines
8.4 KiB
Python
import logging
|
|
import time
|
|
from typing import Literal
|
|
|
|
import click
|
|
from celery import shared_task
|
|
|
|
from core.rag.index_processor.constant.index_type import IndexType
|
|
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
|
|
from core.rag.models.document import ChildDocument, Document
|
|
from extensions.ext_database import db
|
|
from models.dataset import Dataset, DocumentSegment
|
|
from models.dataset import Document as DatasetDocument
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@shared_task(queue="dataset")
|
|
def deal_dataset_vector_index_task(dataset_id: str, action: Literal["remove", "add", "update"]):
|
|
"""
|
|
Async deal dataset from index
|
|
:param dataset_id: dataset_id
|
|
:param action: action
|
|
Usage: deal_dataset_vector_index_task.delay(dataset_id, action)
|
|
"""
|
|
logger.info(click.style(f"Start deal dataset vector index: {dataset_id}", fg="green"))
|
|
start_at = time.perf_counter()
|
|
|
|
try:
|
|
dataset = db.session.query(Dataset).filter_by(id=dataset_id).first()
|
|
|
|
if not dataset:
|
|
raise Exception("Dataset not found")
|
|
index_type = dataset.doc_form or IndexType.PARAGRAPH_INDEX
|
|
index_processor = IndexProcessorFactory(index_type).init_index_processor()
|
|
if action == "remove":
|
|
index_processor.clean(dataset, None, with_keywords=False)
|
|
elif action == "add":
|
|
dataset_documents = (
|
|
db.session.query(DatasetDocument)
|
|
.where(
|
|
DatasetDocument.dataset_id == dataset_id,
|
|
DatasetDocument.indexing_status == "completed",
|
|
DatasetDocument.enabled == True,
|
|
DatasetDocument.archived == False,
|
|
)
|
|
.all()
|
|
)
|
|
|
|
if dataset_documents:
|
|
dataset_documents_ids = [doc.id for doc in dataset_documents]
|
|
db.session.query(DatasetDocument).where(DatasetDocument.id.in_(dataset_documents_ids)).update(
|
|
{"indexing_status": "indexing"}, synchronize_session=False
|
|
)
|
|
db.session.commit()
|
|
|
|
for dataset_document in dataset_documents:
|
|
try:
|
|
# add from vector index
|
|
segments = (
|
|
db.session.query(DocumentSegment)
|
|
.where(DocumentSegment.document_id == dataset_document.id, DocumentSegment.enabled == True)
|
|
.order_by(DocumentSegment.position.asc())
|
|
.all()
|
|
)
|
|
if segments:
|
|
documents = []
|
|
for segment in segments:
|
|
document = Document(
|
|
page_content=segment.content,
|
|
metadata={
|
|
"doc_id": segment.index_node_id,
|
|
"doc_hash": segment.index_node_hash,
|
|
"document_id": segment.document_id,
|
|
"dataset_id": segment.dataset_id,
|
|
},
|
|
)
|
|
|
|
documents.append(document)
|
|
# save vector index
|
|
index_processor.load(dataset, documents, with_keywords=False)
|
|
db.session.query(DatasetDocument).where(DatasetDocument.id == dataset_document.id).update(
|
|
{"indexing_status": "completed"}, synchronize_session=False
|
|
)
|
|
db.session.commit()
|
|
except Exception as e:
|
|
db.session.query(DatasetDocument).where(DatasetDocument.id == dataset_document.id).update(
|
|
{"indexing_status": "error", "error": str(e)}, synchronize_session=False
|
|
)
|
|
db.session.commit()
|
|
elif action == "update":
|
|
dataset_documents = (
|
|
db.session.query(DatasetDocument)
|
|
.where(
|
|
DatasetDocument.dataset_id == dataset_id,
|
|
DatasetDocument.indexing_status == "completed",
|
|
DatasetDocument.enabled == True,
|
|
DatasetDocument.archived == False,
|
|
)
|
|
.all()
|
|
)
|
|
# add new index
|
|
if dataset_documents:
|
|
# update document status
|
|
dataset_documents_ids = [doc.id for doc in dataset_documents]
|
|
db.session.query(DatasetDocument).where(DatasetDocument.id.in_(dataset_documents_ids)).update(
|
|
{"indexing_status": "indexing"}, synchronize_session=False
|
|
)
|
|
db.session.commit()
|
|
|
|
# clean index
|
|
index_processor.clean(dataset, None, with_keywords=False, delete_child_chunks=False)
|
|
|
|
for dataset_document in dataset_documents:
|
|
# update from vector index
|
|
try:
|
|
segments = (
|
|
db.session.query(DocumentSegment)
|
|
.where(DocumentSegment.document_id == dataset_document.id, DocumentSegment.enabled == True)
|
|
.order_by(DocumentSegment.position.asc())
|
|
.all()
|
|
)
|
|
if segments:
|
|
documents = []
|
|
for segment in segments:
|
|
document = Document(
|
|
page_content=segment.content,
|
|
metadata={
|
|
"doc_id": segment.index_node_id,
|
|
"doc_hash": segment.index_node_hash,
|
|
"document_id": segment.document_id,
|
|
"dataset_id": segment.dataset_id,
|
|
},
|
|
)
|
|
if dataset_document.doc_form == IndexType.PARENT_CHILD_INDEX:
|
|
child_chunks = segment.get_child_chunks()
|
|
if child_chunks:
|
|
child_documents = []
|
|
for child_chunk in child_chunks:
|
|
child_document = ChildDocument(
|
|
page_content=child_chunk.content,
|
|
metadata={
|
|
"doc_id": child_chunk.index_node_id,
|
|
"doc_hash": child_chunk.index_node_hash,
|
|
"document_id": segment.document_id,
|
|
"dataset_id": segment.dataset_id,
|
|
},
|
|
)
|
|
child_documents.append(child_document)
|
|
document.children = child_documents
|
|
documents.append(document)
|
|
# save vector index
|
|
index_processor.load(dataset, documents, with_keywords=False)
|
|
db.session.query(DatasetDocument).where(DatasetDocument.id == dataset_document.id).update(
|
|
{"indexing_status": "completed"}, synchronize_session=False
|
|
)
|
|
db.session.commit()
|
|
except Exception as e:
|
|
db.session.query(DatasetDocument).where(DatasetDocument.id == dataset_document.id).update(
|
|
{"indexing_status": "error", "error": str(e)}, synchronize_session=False
|
|
)
|
|
db.session.commit()
|
|
else:
|
|
# clean collection
|
|
index_processor.clean(dataset, None, with_keywords=False, delete_child_chunks=False)
|
|
|
|
end_at = time.perf_counter()
|
|
logger.info(click.style(f"Deal dataset vector index: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
|
except Exception:
|
|
logger.exception("Deal dataset vector index failed")
|
|
finally:
|
|
db.session.close()
|