Merge pull request #1447 from danielaskdd/improve-pipeline-file-batch
Improve parallel handling for documents processing
This commit is contained in:
@@ -674,27 +674,9 @@ async def run_scanning_process(rag: LightRAG, doc_manager: DocumentManager):
|
|||||||
if not new_files:
|
if not new_files:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Get MAX_PARALLEL_INSERT from global_args
|
# Process all files at once
|
||||||
max_parallel = global_args.max_parallel_insert
|
await pipeline_index_files(rag, new_files)
|
||||||
# Calculate batch size as 2 * MAX_PARALLEL_INSERT
|
logger.info(f"Scanning process completed: {total_files} files Processed.")
|
||||||
batch_size = 2 * max_parallel
|
|
||||||
|
|
||||||
# Process files in batches
|
|
||||||
for i in range(0, total_files, batch_size):
|
|
||||||
batch_files = new_files[i : i + batch_size]
|
|
||||||
batch_num = i // batch_size + 1
|
|
||||||
total_batches = (total_files + batch_size - 1) // batch_size
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f"Processing batch {batch_num}/{total_batches} with {len(batch_files)} files"
|
|
||||||
)
|
|
||||||
await pipeline_index_files(rag, batch_files)
|
|
||||||
|
|
||||||
# Log progress
|
|
||||||
processed = min(i + batch_size, total_files)
|
|
||||||
logger.info(
|
|
||||||
f"Processed {processed}/{total_files} files ({processed/total_files*100:.1f}%)"
|
|
||||||
)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error during scanning process: {str(e)}")
|
logger.error(f"Error during scanning process: {str(e)}")
|
||||||
|
@@ -841,8 +841,8 @@ class LightRAG:
|
|||||||
"job_name": "Default Job",
|
"job_name": "Default Job",
|
||||||
"job_start": datetime.now().isoformat(),
|
"job_start": datetime.now().isoformat(),
|
||||||
"docs": 0,
|
"docs": 0,
|
||||||
"batchs": 0,
|
"batchs": 0, # Total number of files to be processed
|
||||||
"cur_batch": 0,
|
"cur_batch": 0, # Number of files already processed
|
||||||
"request_pending": False, # Clear any previous request
|
"request_pending": False, # Clear any previous request
|
||||||
"latest_message": "",
|
"latest_message": "",
|
||||||
}
|
}
|
||||||
@@ -867,18 +867,13 @@ class LightRAG:
|
|||||||
pipeline_status["history_messages"].append(log_message)
|
pipeline_status["history_messages"].append(log_message)
|
||||||
break
|
break
|
||||||
|
|
||||||
# 2. split docs into chunks, insert chunks, update doc status
|
log_message = f"Processing {len(to_process_docs)} document(s)"
|
||||||
docs_batches = [
|
|
||||||
list(to_process_docs.items())[i : i + self.max_parallel_insert]
|
|
||||||
for i in range(0, len(to_process_docs), self.max_parallel_insert)
|
|
||||||
]
|
|
||||||
|
|
||||||
log_message = f"Processing {len(to_process_docs)} document(s) in {len(docs_batches)} batches"
|
|
||||||
logger.info(log_message)
|
logger.info(log_message)
|
||||||
|
|
||||||
# Update pipeline status with current batch information
|
# Update pipeline_status, batchs now represents the total number of files to be processed
|
||||||
pipeline_status["docs"] = len(to_process_docs)
|
pipeline_status["docs"] = len(to_process_docs)
|
||||||
pipeline_status["batchs"] = len(docs_batches)
|
pipeline_status["batchs"] = len(to_process_docs)
|
||||||
|
pipeline_status["cur_batch"] = 0
|
||||||
pipeline_status["latest_message"] = log_message
|
pipeline_status["latest_message"] = log_message
|
||||||
pipeline_status["history_messages"].append(log_message)
|
pipeline_status["history_messages"].append(log_message)
|
||||||
|
|
||||||
@@ -892,6 +887,11 @@ class LightRAG:
|
|||||||
job_name = f"{path_prefix}[{total_files} files]"
|
job_name = f"{path_prefix}[{total_files} files]"
|
||||||
pipeline_status["job_name"] = job_name
|
pipeline_status["job_name"] = job_name
|
||||||
|
|
||||||
|
# Create a counter to track the number of processed files
|
||||||
|
processed_count = 0
|
||||||
|
# Create a semaphore to limit the number of concurrent file processing
|
||||||
|
semaphore = asyncio.Semaphore(self.max_parallel_insert)
|
||||||
|
|
||||||
async def process_document(
|
async def process_document(
|
||||||
doc_id: str,
|
doc_id: str,
|
||||||
status_doc: DocProcessingStatus,
|
status_doc: DocProcessingStatus,
|
||||||
@@ -899,45 +899,97 @@ class LightRAG:
|
|||||||
split_by_character_only: bool,
|
split_by_character_only: bool,
|
||||||
pipeline_status: dict,
|
pipeline_status: dict,
|
||||||
pipeline_status_lock: asyncio.Lock,
|
pipeline_status_lock: asyncio.Lock,
|
||||||
|
semaphore: asyncio.Semaphore,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Process single document"""
|
"""Process single document"""
|
||||||
try:
|
async with semaphore:
|
||||||
# Get file path from status document
|
nonlocal processed_count
|
||||||
file_path = getattr(status_doc, "file_path", "unknown_source")
|
current_file_number = 0
|
||||||
|
try:
|
||||||
async with pipeline_status_lock:
|
# Get file path from status document
|
||||||
log_message = f"Processing file: {file_path}"
|
file_path = getattr(
|
||||||
logger.info(log_message)
|
status_doc, "file_path", "unknown_source"
|
||||||
pipeline_status["history_messages"].append(log_message)
|
|
||||||
log_message = f"Processing d-id: {doc_id}"
|
|
||||||
logger.info(log_message)
|
|
||||||
pipeline_status["latest_message"] = log_message
|
|
||||||
pipeline_status["history_messages"].append(log_message)
|
|
||||||
|
|
||||||
# Generate chunks from document
|
|
||||||
chunks: dict[str, Any] = {
|
|
||||||
compute_mdhash_id(dp["content"], prefix="chunk-"): {
|
|
||||||
**dp,
|
|
||||||
"full_doc_id": doc_id,
|
|
||||||
"file_path": file_path, # Add file path to each chunk
|
|
||||||
}
|
|
||||||
for dp in self.chunking_func(
|
|
||||||
self.tokenizer,
|
|
||||||
status_doc.content,
|
|
||||||
split_by_character,
|
|
||||||
split_by_character_only,
|
|
||||||
self.chunk_overlap_token_size,
|
|
||||||
self.chunk_token_size,
|
|
||||||
)
|
)
|
||||||
}
|
|
||||||
|
|
||||||
# Process document (text chunks and full docs) in parallel
|
async with pipeline_status_lock:
|
||||||
# Create tasks with references for potential cancellation
|
# Update processed file count and save current file number
|
||||||
doc_status_task = asyncio.create_task(
|
processed_count += 1
|
||||||
self.doc_status.upsert(
|
current_file_number = (
|
||||||
|
processed_count # Save the current file number
|
||||||
|
)
|
||||||
|
pipeline_status["cur_batch"] = processed_count
|
||||||
|
|
||||||
|
log_message = f"Processing file ({current_file_number}/{total_files}): {file_path}"
|
||||||
|
logger.info(log_message)
|
||||||
|
pipeline_status["history_messages"].append(log_message)
|
||||||
|
log_message = f"Processing d-id: {doc_id}"
|
||||||
|
logger.info(log_message)
|
||||||
|
pipeline_status["latest_message"] = log_message
|
||||||
|
pipeline_status["history_messages"].append(log_message)
|
||||||
|
|
||||||
|
# Generate chunks from document
|
||||||
|
chunks: dict[str, Any] = {
|
||||||
|
compute_mdhash_id(dp["content"], prefix="chunk-"): {
|
||||||
|
**dp,
|
||||||
|
"full_doc_id": doc_id,
|
||||||
|
"file_path": file_path, # Add file path to each chunk
|
||||||
|
}
|
||||||
|
for dp in self.chunking_func(
|
||||||
|
self.tokenizer,
|
||||||
|
status_doc.content,
|
||||||
|
split_by_character,
|
||||||
|
split_by_character_only,
|
||||||
|
self.chunk_overlap_token_size,
|
||||||
|
self.chunk_token_size,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Process document (text chunks and full docs) in parallel
|
||||||
|
# Create tasks with references for potential cancellation
|
||||||
|
doc_status_task = asyncio.create_task(
|
||||||
|
self.doc_status.upsert(
|
||||||
|
{
|
||||||
|
doc_id: {
|
||||||
|
"status": DocStatus.PROCESSING,
|
||||||
|
"chunks_count": len(chunks),
|
||||||
|
"content": status_doc.content,
|
||||||
|
"content_summary": status_doc.content_summary,
|
||||||
|
"content_length": status_doc.content_length,
|
||||||
|
"created_at": status_doc.created_at,
|
||||||
|
"updated_at": datetime.now().isoformat(),
|
||||||
|
"file_path": file_path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
chunks_vdb_task = asyncio.create_task(
|
||||||
|
self.chunks_vdb.upsert(chunks)
|
||||||
|
)
|
||||||
|
entity_relation_task = asyncio.create_task(
|
||||||
|
self._process_entity_relation_graph(
|
||||||
|
chunks, pipeline_status, pipeline_status_lock
|
||||||
|
)
|
||||||
|
)
|
||||||
|
full_docs_task = asyncio.create_task(
|
||||||
|
self.full_docs.upsert(
|
||||||
|
{doc_id: {"content": status_doc.content}}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
text_chunks_task = asyncio.create_task(
|
||||||
|
self.text_chunks.upsert(chunks)
|
||||||
|
)
|
||||||
|
tasks = [
|
||||||
|
doc_status_task,
|
||||||
|
chunks_vdb_task,
|
||||||
|
entity_relation_task,
|
||||||
|
full_docs_task,
|
||||||
|
text_chunks_task,
|
||||||
|
]
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
await self.doc_status.upsert(
|
||||||
{
|
{
|
||||||
doc_id: {
|
doc_id: {
|
||||||
"status": DocStatus.PROCESSING,
|
"status": DocStatus.PROCESSED,
|
||||||
"chunks_count": len(chunks),
|
"chunks_count": len(chunks),
|
||||||
"content": status_doc.content,
|
"content": status_doc.content,
|
||||||
"content_summary": status_doc.content_summary,
|
"content_summary": status_doc.content_summary,
|
||||||
@@ -948,112 +1000,67 @@ class LightRAG:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
)
|
|
||||||
chunks_vdb_task = asyncio.create_task(
|
# Call _insert_done after processing each file
|
||||||
self.chunks_vdb.upsert(chunks)
|
await self._insert_done()
|
||||||
)
|
|
||||||
entity_relation_task = asyncio.create_task(
|
async with pipeline_status_lock:
|
||||||
self._process_entity_relation_graph(
|
log_message = f"Completed processing file {current_file_number}/{total_files}: {file_path}"
|
||||||
chunks, pipeline_status, pipeline_status_lock
|
logger.info(log_message)
|
||||||
)
|
pipeline_status["latest_message"] = log_message
|
||||||
)
|
pipeline_status["history_messages"].append(log_message)
|
||||||
full_docs_task = asyncio.create_task(
|
|
||||||
self.full_docs.upsert(
|
except Exception as e:
|
||||||
{doc_id: {"content": status_doc.content}}
|
# Log error and update pipeline status
|
||||||
)
|
error_msg = f"Failed to process document {doc_id}: {traceback.format_exc()}"
|
||||||
)
|
|
||||||
text_chunks_task = asyncio.create_task(
|
logger.error(error_msg)
|
||||||
self.text_chunks.upsert(chunks)
|
async with pipeline_status_lock:
|
||||||
)
|
pipeline_status["latest_message"] = error_msg
|
||||||
tasks = [
|
pipeline_status["history_messages"].append(error_msg)
|
||||||
doc_status_task,
|
|
||||||
chunks_vdb_task,
|
# Cancel other tasks as they are no longer meaningful
|
||||||
entity_relation_task,
|
for task in [
|
||||||
full_docs_task,
|
chunks_vdb_task,
|
||||||
text_chunks_task,
|
entity_relation_task,
|
||||||
]
|
full_docs_task,
|
||||||
await asyncio.gather(*tasks)
|
text_chunks_task,
|
||||||
await self.doc_status.upsert(
|
]:
|
||||||
{
|
if not task.done():
|
||||||
doc_id: {
|
task.cancel()
|
||||||
"status": DocStatus.PROCESSED,
|
# Update document status to failed
|
||||||
"chunks_count": len(chunks),
|
await self.doc_status.upsert(
|
||||||
"content": status_doc.content,
|
{
|
||||||
"content_summary": status_doc.content_summary,
|
doc_id: {
|
||||||
"content_length": status_doc.content_length,
|
"status": DocStatus.FAILED,
|
||||||
"created_at": status_doc.created_at,
|
"error": str(e),
|
||||||
"updated_at": datetime.now().isoformat(),
|
"content": status_doc.content,
|
||||||
"file_path": file_path,
|
"content_summary": status_doc.content_summary,
|
||||||
|
"content_length": status_doc.content_length,
|
||||||
|
"created_at": status_doc.created_at,
|
||||||
|
"updated_at": datetime.now().isoformat(),
|
||||||
|
"file_path": file_path,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
)
|
||||||
|
|
||||||
|
# Create processing tasks for all documents
|
||||||
|
doc_tasks = []
|
||||||
|
for doc_id, status_doc in to_process_docs.items():
|
||||||
|
doc_tasks.append(
|
||||||
|
process_document(
|
||||||
|
doc_id,
|
||||||
|
status_doc,
|
||||||
|
split_by_character,
|
||||||
|
split_by_character_only,
|
||||||
|
pipeline_status,
|
||||||
|
pipeline_status_lock,
|
||||||
|
semaphore,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
|
||||||
# Log error and update pipeline status
|
|
||||||
error_msg = f"Failed to process document {doc_id}: {traceback.format_exc()}"
|
|
||||||
|
|
||||||
logger.error(error_msg)
|
|
||||||
async with pipeline_status_lock:
|
|
||||||
pipeline_status["latest_message"] = error_msg
|
|
||||||
pipeline_status["history_messages"].append(error_msg)
|
|
||||||
|
|
||||||
# Cancel other tasks as they are no longer meaningful
|
|
||||||
for task in [
|
|
||||||
chunks_vdb_task,
|
|
||||||
entity_relation_task,
|
|
||||||
full_docs_task,
|
|
||||||
text_chunks_task,
|
|
||||||
]:
|
|
||||||
if not task.done():
|
|
||||||
task.cancel()
|
|
||||||
# Update document status to failed
|
|
||||||
await self.doc_status.upsert(
|
|
||||||
{
|
|
||||||
doc_id: {
|
|
||||||
"status": DocStatus.FAILED,
|
|
||||||
"error": str(e),
|
|
||||||
"content": status_doc.content,
|
|
||||||
"content_summary": status_doc.content_summary,
|
|
||||||
"content_length": status_doc.content_length,
|
|
||||||
"created_at": status_doc.created_at,
|
|
||||||
"updated_at": datetime.now().isoformat(),
|
|
||||||
"file_path": file_path,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# 3. iterate over batches
|
|
||||||
total_batches = len(docs_batches)
|
|
||||||
for batch_idx, docs_batch in enumerate(docs_batches):
|
|
||||||
current_batch = batch_idx + 1
|
|
||||||
log_message = (
|
|
||||||
f"Start processing batch {current_batch} of {total_batches}."
|
|
||||||
)
|
)
|
||||||
logger.info(log_message)
|
|
||||||
pipeline_status["cur_batch"] = current_batch
|
|
||||||
pipeline_status["latest_message"] = log_message
|
|
||||||
pipeline_status["history_messages"].append(log_message)
|
|
||||||
|
|
||||||
doc_tasks = []
|
# Wait for all document processing to complete
|
||||||
for doc_id, status_doc in docs_batch:
|
await asyncio.gather(*doc_tasks)
|
||||||
doc_tasks.append(
|
|
||||||
process_document(
|
|
||||||
doc_id,
|
|
||||||
status_doc,
|
|
||||||
split_by_character,
|
|
||||||
split_by_character_only,
|
|
||||||
pipeline_status,
|
|
||||||
pipeline_status_lock,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Process documents in one batch parallelly
|
|
||||||
await asyncio.gather(*doc_tasks)
|
|
||||||
await self._insert_done()
|
|
||||||
|
|
||||||
log_message = f"Completed batch {current_batch} of {total_batches}."
|
|
||||||
logger.info(log_message)
|
|
||||||
pipeline_status["latest_message"] = log_message
|
|
||||||
pipeline_status["history_messages"].append(log_message)
|
|
||||||
|
|
||||||
# Check if there's a pending request to process more documents (with lock)
|
# Check if there's a pending request to process more documents (with lock)
|
||||||
has_pending_request = False
|
has_pending_request = False
|
||||||
@@ -1107,9 +1114,11 @@ class LightRAG:
|
|||||||
llm_response_cache=self.llm_response_cache,
|
llm_response_cache=self.llm_response_cache,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(
|
error_msg = f"Failed to extract entities and relationships: {str(e)}"
|
||||||
f"Failed to extract entities and relationships : {traceback.format_exc()} 。"
|
logger.error(error_msg)
|
||||||
)
|
async with pipeline_status_lock:
|
||||||
|
pipeline_status["latest_message"] = error_msg
|
||||||
|
pipeline_status["history_messages"].append(error_msg)
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
async def _insert_done(
|
async def _insert_done(
|
||||||
|
Reference in New Issue
Block a user