Merge branch 'Fix-pipeline-batch' into feat-node-expand

This commit is contained in:
yangdx
2025-03-17 04:02:42 +08:00

View File

@@ -769,7 +769,7 @@ class LightRAG:
async with pipeline_status_lock: async with pipeline_status_lock:
# Ensure only one worker is processing documents # Ensure only one worker is processing documents
if not pipeline_status.get("busy", False): if not pipeline_status.get("busy", False):
# 先检查是否有需要处理的文档
processing_docs, failed_docs, pending_docs = await asyncio.gather( processing_docs, failed_docs, pending_docs = await asyncio.gather(
self.doc_status.get_docs_by_status(DocStatus.PROCESSING), self.doc_status.get_docs_by_status(DocStatus.PROCESSING),
self.doc_status.get_docs_by_status(DocStatus.FAILED), self.doc_status.get_docs_by_status(DocStatus.FAILED),
@@ -781,12 +781,10 @@ class LightRAG:
to_process_docs.update(failed_docs) to_process_docs.update(failed_docs)
to_process_docs.update(pending_docs) to_process_docs.update(pending_docs)
# 如果没有需要处理的文档,直接返回,保留 pipeline_status 中的内容不变
if not to_process_docs: if not to_process_docs:
logger.info("No documents to process") logger.info("No documents to process")
return return
# 有文档需要处理,更新 pipeline_status
pipeline_status.update( pipeline_status.update(
{ {
"busy": True, "busy": True,
@@ -825,7 +823,7 @@ class LightRAG:
for i in range(0, len(to_process_docs), self.max_parallel_insert) for i in range(0, len(to_process_docs), self.max_parallel_insert)
] ]
log_message = f"Number of batches to process: {len(docs_batches)}." log_message = f"Processing {len(to_process_docs)} document(s) in {len(docs_batches)} batches"
logger.info(log_message) logger.info(log_message)
# Update pipeline status with current batch information # Update pipeline status with current batch information
@@ -834,140 +832,151 @@ class LightRAG:
pipeline_status["latest_message"] = log_message pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message) pipeline_status["history_messages"].append(log_message)
batches: list[Any] = [] async def process_document(
# 3. iterate over batches doc_id: str,
for batch_idx, docs_batch in enumerate(docs_batches): status_doc: DocProcessingStatus,
# Update current batch in pipeline status (directly, as it's atomic) split_by_character: str | None,
pipeline_status["cur_batch"] += 1 split_by_character_only: bool,
pipeline_status: dict,
async def batch( pipeline_status_lock: asyncio.Lock
batch_idx: int, ) -> None:
docs_batch: list[tuple[str, DocProcessingStatus]], """Process single document"""
size_batch: int, try:
) -> None: # Generate chunks from document
log_message = ( chunks: dict[str, Any] = {
f"Start processing batch {batch_idx + 1} of {size_batch}." compute_mdhash_id(dp["content"], prefix="chunk-"): {
) **dp,
logger.info(log_message) "full_doc_id": doc_id,
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
# 4. iterate over batch
for doc_id_processing_status in docs_batch:
doc_id, status_doc = doc_id_processing_status
# Generate chunks from document
chunks: dict[str, Any] = {
compute_mdhash_id(dp["content"], prefix="chunk-"): {
**dp,
"full_doc_id": doc_id,
}
for dp in self.chunking_func(
status_doc.content,
split_by_character,
split_by_character_only,
self.chunk_overlap_token_size,
self.chunk_token_size,
self.tiktoken_model_name,
)
} }
# Process document (text chunks and full docs) in parallel for dp in self.chunking_func(
# Create tasks with references for potential cancellation status_doc.content,
doc_status_task = asyncio.create_task( split_by_character,
self.doc_status.upsert( split_by_character_only,
{ self.chunk_overlap_token_size,
doc_id: { self.chunk_token_size,
"status": DocStatus.PROCESSING, self.tiktoken_model_name,
"updated_at": datetime.now().isoformat(), )
"content": status_doc.content, }
"content_summary": status_doc.content_summary, # Process document (text chunks and full docs) in parallel
"content_length": status_doc.content_length, # Create tasks with references for potential cancellation
"created_at": status_doc.created_at, doc_status_task = asyncio.create_task(
} self.doc_status.upsert(
{
doc_id: {
"status": DocStatus.PROCESSING,
"updated_at": datetime.now().isoformat(),
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
} }
) }
) )
chunks_vdb_task = asyncio.create_task( )
self.chunks_vdb.upsert(chunks) chunks_vdb_task = asyncio.create_task(
self.chunks_vdb.upsert(chunks)
)
entity_relation_task = asyncio.create_task(
self._process_entity_relation_graph(
chunks, pipeline_status, pipeline_status_lock
) )
entity_relation_task = asyncio.create_task( )
self._process_entity_relation_graph( full_docs_task = asyncio.create_task(
chunks, pipeline_status, pipeline_status_lock self.full_docs.upsert(
) {doc_id: {"content": status_doc.content}}
) )
full_docs_task = asyncio.create_task( )
self.full_docs.upsert( text_chunks_task = asyncio.create_task(
{doc_id: {"content": status_doc.content}} self.text_chunks.upsert(chunks)
) )
) tasks = [
text_chunks_task = asyncio.create_task( doc_status_task,
self.text_chunks.upsert(chunks) chunks_vdb_task,
) entity_relation_task,
tasks = [ full_docs_task,
doc_status_task, text_chunks_task,
]
await asyncio.gather(*tasks)
await self.doc_status.upsert(
{
doc_id: {
"status": DocStatus.PROCESSED,
"chunks_count": len(chunks),
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(),
}
}
)
except Exception as e:
# Log error and update pipeline status
error_msg = (
f"Failed to process document {doc_id}: {str(e)}"
)
logger.error(error_msg)
async with pipeline_status_lock:
pipeline_status["latest_message"] = error_msg
pipeline_status["history_messages"].append(error_msg)
# Cancel other tasks as they are no longer meaningful
for task in [
chunks_vdb_task, chunks_vdb_task,
entity_relation_task, entity_relation_task,
full_docs_task, full_docs_task,
text_chunks_task, text_chunks_task,
] ]:
try: if not task.done():
await asyncio.gather(*tasks) task.cancel()
await self.doc_status.upsert( # Update document status to failed
{ await self.doc_status.upsert(
doc_id: { {
"status": DocStatus.PROCESSED, doc_id: {
"chunks_count": len(chunks), "status": DocStatus.FAILED,
"content": status_doc.content, "error": str(e),
"content_summary": status_doc.content_summary, "content": status_doc.content,
"content_length": status_doc.content_length, "content_summary": status_doc.content_summary,
"created_at": status_doc.created_at, "content_length": status_doc.content_length,
"updated_at": datetime.now().isoformat(), "created_at": status_doc.created_at,
} "updated_at": datetime.now().isoformat(),
} }
) }
except Exception as e:
# Log error and update pipeline status
error_msg = (
f"Failed to process document {doc_id}: {str(e)}"
)
logger.error(error_msg)
pipeline_status["latest_message"] = error_msg
pipeline_status["history_messages"].append(error_msg)
# Cancel other tasks as they are no longer meaningful
for task in [
chunks_vdb_task,
entity_relation_task,
full_docs_task,
text_chunks_task,
]:
if not task.done():
task.cancel()
# Update document status to failed
await self.doc_status.upsert(
{
doc_id: {
"status": DocStatus.FAILED,
"error": str(e),
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(),
}
}
)
continue
log_message = (
f"Completed batch {batch_idx + 1} of {len(docs_batches)}."
) )
logger.info(log_message)
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
batches.append(batch(batch_idx, docs_batch, len(docs_batches))) # 3. iterate over batches
total_batches = len(docs_batches)
for batch_idx, docs_batch in enumerate(docs_batches):
current_batch = batch_idx + 1
log_message = f"Start processing batch {current_batch} of {total_batches}."
logger.info(log_message)
pipeline_status["cur_batch"] = current_batch
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
doc_tasks = []
for doc_id, status_doc in docs_batch:
doc_tasks.append(
process_document(
doc_id,
status_doc,
split_by_character,
split_by_character_only,
pipeline_status,
pipeline_status_lock
)
)
# Process documents in one batch parallelly
await asyncio.gather(*doc_tasks)
await self._insert_done()
log_message = f"Completed batch {current_batch} of {total_batches}."
logger.info(log_message)
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
await asyncio.gather(*batches)
await self._insert_done()
# Check if there's a pending request to process more documents (with lock) # Check if there's a pending request to process more documents (with lock)
has_pending_request = False has_pending_request = False
@@ -1042,7 +1051,7 @@ class LightRAG:
] ]
await asyncio.gather(*tasks) await asyncio.gather(*tasks)
log_message = "All Insert done" log_message = "All data persist to disk"
logger.info(log_message) logger.info(log_message)
if pipeline_status is not None and pipeline_status_lock is not None: if pipeline_status is not None and pipeline_status_lock is not None: