Using semaphore to control parallel doc processing instead of batching.

This commit is contained in:
yangdx
2025-04-24 13:45:44 +08:00
parent 47c4520a65
commit 4f68f3e410

View File

@@ -841,8 +841,8 @@ class LightRAG:
"job_name": "Default Job",
"job_start": datetime.now().isoformat(),
"docs": 0,
"batchs": 0,
"cur_batch": 0,
"batchs": 0, # 将被重新定义为待处理的文件总数
"cur_batch": 0, # 将被重新定义为当前处理的第几个文件
"request_pending": False, # Clear any previous request
"latest_message": "",
}
@@ -867,18 +867,13 @@ class LightRAG:
pipeline_status["history_messages"].append(log_message)
break
# 2. split docs into chunks, insert chunks, update doc status
docs_batches = [
list(to_process_docs.items())[i : i + self.max_parallel_insert]
for i in range(0, len(to_process_docs), self.max_parallel_insert)
]
log_message = f"Processing {len(to_process_docs)} document(s) in {len(docs_batches)} batches"
log_message = f"Processing {len(to_process_docs)} document(s)"
logger.info(log_message)
# Update pipeline status with current batch information
# 更新 pipeline_statusbatchs 现在表示待处理的文件总数
pipeline_status["docs"] = len(to_process_docs)
pipeline_status["batchs"] = len(docs_batches)
pipeline_status["batchs"] = len(to_process_docs)
pipeline_status["cur_batch"] = 0 # 初始化为0表示当前已处理的文件数量
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
@@ -892,6 +887,11 @@ class LightRAG:
job_name = f"{path_prefix}[{total_files} files]"
pipeline_status["job_name"] = job_name
# 创建一个计数器,用于跟踪已处理的文件数量
processed_count = 0
# 创建一个信号量,限制并发处理文件的数量
semaphore = asyncio.Semaphore(self.max_parallel_insert)
async def process_document(
doc_id: str,
status_doc: DocProcessingStatus,
@@ -899,14 +899,25 @@ class LightRAG:
split_by_character_only: bool,
pipeline_status: dict,
pipeline_status_lock: asyncio.Lock,
semaphore: asyncio.Semaphore,
) -> None:
"""Process single document"""
# 使用信号量控制并发
async with semaphore:
nonlocal processed_count
# 获取并保存当前文件的序号
current_file_number = 0
try:
# Get file path from status document
file_path = getattr(status_doc, "file_path", "unknown_source")
async with pipeline_status_lock:
log_message = f"Processing file: {file_path}"
# 更新已处理文件数量并保存当前文件序号
processed_count += 1
current_file_number = processed_count # 保存当前文件的序号
pipeline_status["cur_batch"] = processed_count
log_message = f"Processing file ({current_file_number}/{total_files}): {file_path}"
logger.info(log_message)
pipeline_status["history_messages"].append(log_message)
log_message = f"Processing d-id: {doc_id}"
@@ -987,6 +998,16 @@ class LightRAG:
}
}
)
# 每处理完一个文件,就调用一次 _insert_done
await self._insert_done()
async with pipeline_status_lock:
log_message = f"Completed processing file {current_file_number}/{total_files}: {file_path}"
logger.info(log_message)
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
except Exception as e:
# Log error and update pipeline status
error_msg = f"Failed to process document {doc_id}: {traceback.format_exc()}"
@@ -1021,20 +1042,9 @@ class LightRAG:
}
)
# 3. iterate over batches
total_batches = len(docs_batches)
for batch_idx, docs_batch in enumerate(docs_batches):
current_batch = batch_idx + 1
log_message = (
f"Start processing batch {current_batch} of {total_batches}."
)
logger.info(log_message)
pipeline_status["cur_batch"] = current_batch
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
# 创建所有文档的处理任务
doc_tasks = []
for doc_id, status_doc in docs_batch:
for doc_id, status_doc in to_process_docs.items():
doc_tasks.append(
process_document(
doc_id,
@@ -1043,17 +1053,12 @@ class LightRAG:
split_by_character_only,
pipeline_status,
pipeline_status_lock,
semaphore,
)
)
# Process documents in one batch parallelly
# 等待所有文档处理完成
await asyncio.gather(*doc_tasks)
await self._insert_done()
log_message = f"Completed batch {current_batch} of {total_batches}."
logger.info(log_message)
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
# Check if there's a pending request to process more documents (with lock)
has_pending_request = False