updated paralle
This commit is contained in:
@@ -790,6 +790,7 @@ class LightRAG:
|
|||||||
|
|
||||||
logger.info(f"Number of batches to process: {len(docs_batches)}.")
|
logger.info(f"Number of batches to process: {len(docs_batches)}.")
|
||||||
|
|
||||||
|
tasks: list[tuple[str, DocProcessingStatus, dict[str, Any], Any]] = []
|
||||||
# 3. iterate over batches
|
# 3. iterate over batches
|
||||||
for batch_idx, docs_batch in enumerate(docs_batches):
|
for batch_idx, docs_batch in enumerate(docs_batches):
|
||||||
# 4. iterate over batch
|
# 4. iterate over batch
|
||||||
@@ -825,18 +826,55 @@ class LightRAG:
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
# Process document (text chunks and full docs) in parallel
|
# Prepare async tasks with full context
|
||||||
tasks = [
|
tasks.extend(
|
||||||
|
[
|
||||||
|
(
|
||||||
|
doc_status_id,
|
||||||
|
status_doc,
|
||||||
|
chunks,
|
||||||
self.chunks_vdb.upsert(chunks),
|
self.chunks_vdb.upsert(chunks),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
doc_status_id,
|
||||||
|
status_doc,
|
||||||
|
chunks,
|
||||||
self._process_entity_relation_graph(chunks),
|
self._process_entity_relation_graph(chunks),
|
||||||
self.full_docs.upsert({doc_id: {"content": status_doc.content}}),
|
),
|
||||||
|
(
|
||||||
|
doc_status_id,
|
||||||
|
status_doc,
|
||||||
|
chunks,
|
||||||
|
self.full_docs.upsert(
|
||||||
|
{doc_id: {"content": status_doc.content}}
|
||||||
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
doc_status_id,
|
||||||
|
status_doc,
|
||||||
|
chunks,
|
||||||
self.text_chunks.upsert(chunks),
|
self.text_chunks.upsert(chunks),
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Execute tasks as they complete
|
||||||
|
for future in asyncio.as_completed([task for _, _, _, task in tasks]):
|
||||||
try:
|
try:
|
||||||
await asyncio.gather(*tasks)
|
# Wait for the completed task
|
||||||
|
await future
|
||||||
|
|
||||||
|
# Retrieve the full context of the completed task
|
||||||
|
completed_doc_status_id, status_doc, chunks, _ = next(
|
||||||
|
(doc_id, s_doc, ch, task)
|
||||||
|
for doc_id, s_doc, ch, task in tasks
|
||||||
|
if task == future
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update status to processed
|
||||||
await self.doc_status.upsert(
|
await self.doc_status.upsert(
|
||||||
{
|
{
|
||||||
doc_status_id: {
|
completed_doc_status_id: {
|
||||||
"status": DocStatus.PROCESSED,
|
"status": DocStatus.PROCESSED,
|
||||||
"chunks_count": len(chunks),
|
"chunks_count": len(chunks),
|
||||||
"content": status_doc.content,
|
"content": status_doc.content,
|
||||||
@@ -847,13 +885,21 @@ class LightRAG:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
await self._insert_done()
|
logger.info(f"Completed doc_id: {completed_doc_status_id}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Failed to process document {doc_id}: {str(e)}")
|
# Retrieve the context of the failed task
|
||||||
|
failed_doc_status_id, status_doc, chunks, _ = next(
|
||||||
|
(doc_id, s_doc, ch, task)
|
||||||
|
for doc_id, s_doc, ch, task in tasks
|
||||||
|
if task == future
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
f"Failed to process document {failed_doc_status_id}: {str(e)}"
|
||||||
|
)
|
||||||
|
|
||||||
await self.doc_status.upsert(
|
await self.doc_status.upsert(
|
||||||
{
|
{
|
||||||
doc_status_id: {
|
failed_doc_status_id: {
|
||||||
"status": DocStatus.FAILED,
|
"status": DocStatus.FAILED,
|
||||||
"error": str(e),
|
"error": str(e),
|
||||||
"content": status_doc.content,
|
"content": status_doc.content,
|
||||||
@@ -864,8 +910,7 @@ class LightRAG:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
continue
|
await self._insert_done()
|
||||||
logger.info(f"Completed batch {batch_idx + 1} of {len(docs_batches)}.")
|
|
||||||
|
|
||||||
async def _process_entity_relation_graph(self, chunk: dict[str, Any]) -> None:
|
async def _process_entity_relation_graph(self, chunk: dict[str, Any]) -> None:
|
||||||
try:
|
try:
|
||||||
|
@@ -1326,15 +1326,12 @@ async def _get_edge_data(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
if not all([n is not None for n in edge_datas]):
|
|
||||||
logger.warning("Some edges are missing, maybe the storage is damaged")
|
|
||||||
|
|
||||||
edge_datas = [
|
edge_datas = [
|
||||||
{
|
{
|
||||||
"src_id": k["src_id"],
|
"src_id": k["src_id"],
|
||||||
"tgt_id": k["tgt_id"],
|
"tgt_id": k["tgt_id"],
|
||||||
"rank": d,
|
"rank": d,
|
||||||
"created_at": k.get("__created_at__", None), # 从 KV 存储中获取时间元数据
|
"created_at": k.get("__created_at__", None),
|
||||||
**v,
|
**v,
|
||||||
}
|
}
|
||||||
for k, v, d in zip(results, edge_datas, edge_degree)
|
for k, v, d in zip(results, edge_datas, edge_degree)
|
||||||
@@ -1343,16 +1340,11 @@ async def _get_edge_data(
|
|||||||
edge_datas = sorted(
|
edge_datas = sorted(
|
||||||
edge_datas, key=lambda x: (x["rank"], x["weight"]), reverse=True
|
edge_datas, key=lambda x: (x["rank"], x["weight"]), reverse=True
|
||||||
)
|
)
|
||||||
len_edge_datas = len(edge_datas)
|
|
||||||
edge_datas = truncate_list_by_token_size(
|
edge_datas = truncate_list_by_token_size(
|
||||||
edge_datas,
|
edge_datas,
|
||||||
key=lambda x: x["description"],
|
key=lambda x: x["description"],
|
||||||
max_token_size=query_param.max_token_for_global_context,
|
max_token_size=query_param.max_token_for_global_context,
|
||||||
)
|
)
|
||||||
logger.debug(
|
|
||||||
f"Truncate relations from {len_edge_datas} to {len(edge_datas)} (max tokens:{query_param.max_token_for_global_context})"
|
|
||||||
)
|
|
||||||
|
|
||||||
use_entities, use_text_units = await asyncio.gather(
|
use_entities, use_text_units = await asyncio.gather(
|
||||||
_find_most_related_entities_from_relationships(
|
_find_most_related_entities_from_relationships(
|
||||||
edge_datas, query_param, knowledge_graph_inst
|
edge_datas, query_param, knowledge_graph_inst
|
||||||
|
Reference in New Issue
Block a user