Merge pull request #887 from YanSte/paralle

Improved Paralles
This commit is contained in:
Yannick Stephan
2025-02-19 23:32:57 +01:00
committed by GitHub
2 changed files with 85 additions and 48 deletions

View File

@@ -791,6 +791,7 @@ class LightRAG:
logger.info(f"Number of batches to process: {len(docs_batches)}.") logger.info(f"Number of batches to process: {len(docs_batches)}.")
tasks: list[tuple[str, DocProcessingStatus, dict[str, Any], Any]] = []
# 3. iterate over batches # 3. iterate over batches
for batch_idx, docs_batch in enumerate(docs_batches): for batch_idx, docs_batch in enumerate(docs_batches):
# 4. iterate over batch # 4. iterate over batch
@@ -826,47 +827,91 @@ class LightRAG:
) )
} }
# Process document (text chunks and full docs) in parallel # Prepare async tasks with full context
tasks = [ tasks.extend(
self.chunks_vdb.upsert(chunks), [
self._process_entity_relation_graph(chunks), (
self.full_docs.upsert({doc_id: {"content": status_doc.content}}), doc_status_id,
self.text_chunks.upsert(chunks), status_doc,
] chunks,
try: self.chunks_vdb.upsert(chunks),
await asyncio.gather(*tasks) ),
await self.doc_status.upsert( (
{ doc_status_id,
doc_status_id: { status_doc,
"status": DocStatus.PROCESSED, chunks,
"chunks_count": len(chunks), self._process_entity_relation_graph(chunks),
"content": status_doc.content, ),
"content_summary": status_doc.content_summary, (
"content_length": status_doc.content_length, doc_status_id,
"created_at": status_doc.created_at, status_doc,
"updated_at": datetime.now().isoformat(), chunks,
} self.full_docs.upsert(
} {doc_id: {"content": status_doc.content}}
) ),
await self._insert_done() ),
(
doc_status_id,
status_doc,
chunks,
self.text_chunks.upsert(chunks),
),
]
)
except Exception as e: # Execute tasks as they complete
logger.error(f"Failed to process document {doc_id}: {str(e)}") for future in asyncio.as_completed([task for _, _, _, task in tasks]):
await self.doc_status.upsert( try:
{ # Wait for the completed task
doc_status_id: { await future
"status": DocStatus.FAILED,
"error": str(e), # Retrieve the full context of the completed task
"content": status_doc.content, completed_doc_status_id, status_doc, chunks, _ = next(
"content_summary": status_doc.content_summary, (doc_id, s_doc, ch, task)
"content_length": status_doc.content_length, for doc_id, s_doc, ch, task in tasks
"created_at": status_doc.created_at, if task == future
"updated_at": datetime.now().isoformat(), )
}
# Update status to processed
await self.doc_status.upsert(
{
completed_doc_status_id: {
"status": DocStatus.PROCESSED,
"chunks_count": len(chunks),
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(),
} }
) }
continue )
logger.info(f"Completed batch {batch_idx + 1} of {len(docs_batches)}.") logger.info(f"Completed doc_id: {completed_doc_status_id}")
except Exception as e:
# Retrieve the context of the failed task
failed_doc_status_id, status_doc, chunks, _ = next(
(doc_id, s_doc, ch, task)
for doc_id, s_doc, ch, task in tasks
if task == future
)
logger.error(
f"Failed to process document {failed_doc_status_id}: {str(e)}"
)
await self.doc_status.upsert(
{
failed_doc_status_id: {
"status": DocStatus.FAILED,
"error": str(e),
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(),
}
}
)
await self._insert_done()
async def _process_entity_relation_graph(self, chunk: dict[str, Any]) -> None: async def _process_entity_relation_graph(self, chunk: dict[str, Any]) -> None:
try: try:

View File

@@ -1326,15 +1326,12 @@ async def _get_edge_data(
), ),
) )
if not all([n is not None for n in edge_datas]):
logger.warning("Some edges are missing, maybe the storage is damaged")
edge_datas = [ edge_datas = [
{ {
"src_id": k["src_id"], "src_id": k["src_id"],
"tgt_id": k["tgt_id"], "tgt_id": k["tgt_id"],
"rank": d, "rank": d,
"created_at": k.get("__created_at__", None), # 从 KV 存储中获取时间元数据 "created_at": k.get("__created_at__", None),
**v, **v,
} }
for k, v, d in zip(results, edge_datas, edge_degree) for k, v, d in zip(results, edge_datas, edge_degree)
@@ -1343,16 +1340,11 @@ async def _get_edge_data(
edge_datas = sorted( edge_datas = sorted(
edge_datas, key=lambda x: (x["rank"], x["weight"]), reverse=True edge_datas, key=lambda x: (x["rank"], x["weight"]), reverse=True
) )
len_edge_datas = len(edge_datas)
edge_datas = truncate_list_by_token_size( edge_datas = truncate_list_by_token_size(
edge_datas, edge_datas,
key=lambda x: x["description"], key=lambda x: x["description"],
max_token_size=query_param.max_token_for_global_context, max_token_size=query_param.max_token_for_global_context,
) )
logger.debug(
f"Truncate relations from {len_edge_datas} to {len(edge_datas)} (max tokens:{query_param.max_token_for_global_context})"
)
use_entities, use_text_units = await asyncio.gather( use_entities, use_text_units = await asyncio.gather(
_find_most_related_entities_from_relationships( _find_most_related_entities_from_relationships(
edge_datas, query_param, knowledge_graph_inst edge_datas, query_param, knowledge_graph_inst