Add pipeline status control for concurrent document indexing processes
• Add shared pipeline status namespace • Implement concurrent process control • Add request queuing for pending jobs
This commit is contained in:
@@ -80,6 +80,18 @@ def initialize_share_data(workers: int = 1):
|
|||||||
|
|
||||||
# Mark as initialized
|
# Mark as initialized
|
||||||
_initialized = True
|
_initialized = True
|
||||||
|
|
||||||
|
# Initialize pipeline status for document indexing control
|
||||||
|
pipeline_namespace = get_namespace_data("pipeline_status")
|
||||||
|
pipeline_namespace.update({
|
||||||
|
"busy": False, # Control concurrent processes
|
||||||
|
"job_name": "Default Job", # Current job name (indexing files/indexing texts)
|
||||||
|
"job_start": None, # Job start time
|
||||||
|
"docs": 0, # Total number of documents to be indexed
|
||||||
|
"batchs": 0, # Number of batches for processing documents
|
||||||
|
"cur_batch": 0, # Current processing batch
|
||||||
|
"request_pending": False, # Flag for pending request for processing
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
def try_initialize_namespace(namespace: str) -> bool:
|
def try_initialize_namespace(namespace: str) -> bool:
|
||||||
|
@@ -273,8 +273,6 @@ class LightRAG:
|
|||||||
|
|
||||||
from lightrag.kg.shared_storage import (
|
from lightrag.kg.shared_storage import (
|
||||||
initialize_share_data,
|
initialize_share_data,
|
||||||
try_initialize_namespace,
|
|
||||||
get_namespace_data,
|
|
||||||
)
|
)
|
||||||
initialize_share_data()
|
initialize_share_data()
|
||||||
|
|
||||||
@@ -672,117 +670,179 @@ class LightRAG:
|
|||||||
3. Process each chunk for entity and relation extraction
|
3. Process each chunk for entity and relation extraction
|
||||||
4. Update the document status
|
4. Update the document status
|
||||||
"""
|
"""
|
||||||
# 1. Get all pending, failed, and abnormally terminated processing documents.
|
from lightrag.kg.shared_storage import get_namespace_data, get_storage_lock
|
||||||
# Run the asynchronous status retrievals in parallel using asyncio.gather
|
|
||||||
processing_docs, failed_docs, pending_docs = await asyncio.gather(
|
# Get pipeline status shared data and lock
|
||||||
self.doc_status.get_docs_by_status(DocStatus.PROCESSING),
|
pipeline_status = get_namespace_data("pipeline_status")
|
||||||
self.doc_status.get_docs_by_status(DocStatus.FAILED),
|
storage_lock = get_storage_lock()
|
||||||
self.doc_status.get_docs_by_status(DocStatus.PENDING),
|
|
||||||
)
|
# Check if another process is already processing the queue
|
||||||
|
process_documents = False
|
||||||
to_process_docs: dict[str, DocProcessingStatus] = {}
|
with storage_lock:
|
||||||
to_process_docs.update(processing_docs)
|
if not pipeline_status.get("busy", False):
|
||||||
to_process_docs.update(failed_docs)
|
# No other process is busy, we can process documents
|
||||||
to_process_docs.update(pending_docs)
|
pipeline_status.update({
|
||||||
|
"busy": True,
|
||||||
if not to_process_docs:
|
"job_name": "indexing files",
|
||||||
logger.info("All documents have been processed or are duplicates")
|
"job_start": datetime.now().isoformat(),
|
||||||
|
"docs": 0,
|
||||||
|
"batchs": 0,
|
||||||
|
"cur_batch": 0,
|
||||||
|
"request_pending": False # Clear any previous request
|
||||||
|
})
|
||||||
|
process_documents = True
|
||||||
|
else:
|
||||||
|
# Another process is busy, just set request flag and return
|
||||||
|
pipeline_status["request_pending"] = True
|
||||||
|
logger.info("Another process is already processing the document queue. Request queued.")
|
||||||
|
|
||||||
|
if not process_documents:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Process documents until no more documents or requests
|
||||||
|
while True:
|
||||||
|
# 1. Get all pending, failed, and abnormally terminated processing documents.
|
||||||
|
processing_docs, failed_docs, pending_docs = await asyncio.gather(
|
||||||
|
self.doc_status.get_docs_by_status(DocStatus.PROCESSING),
|
||||||
|
self.doc_status.get_docs_by_status(DocStatus.FAILED),
|
||||||
|
self.doc_status.get_docs_by_status(DocStatus.PENDING),
|
||||||
|
)
|
||||||
|
|
||||||
# 2. split docs into chunks, insert chunks, update doc status
|
to_process_docs: dict[str, DocProcessingStatus] = {}
|
||||||
docs_batches = [
|
to_process_docs.update(processing_docs)
|
||||||
list(to_process_docs.items())[i : i + self.max_parallel_insert]
|
to_process_docs.update(failed_docs)
|
||||||
for i in range(0, len(to_process_docs), self.max_parallel_insert)
|
to_process_docs.update(pending_docs)
|
||||||
]
|
|
||||||
|
|
||||||
logger.info(f"Number of batches to process: {len(docs_batches)}.")
|
if not to_process_docs:
|
||||||
|
logger.info("All documents have been processed or are duplicates")
|
||||||
|
break
|
||||||
|
|
||||||
batches: list[Any] = []
|
# Update pipeline status with document count (with lock)
|
||||||
# 3. iterate over batches
|
with storage_lock:
|
||||||
for batch_idx, docs_batch in enumerate(docs_batches):
|
pipeline_status["docs"] = len(to_process_docs)
|
||||||
|
|
||||||
|
# 2. split docs into chunks, insert chunks, update doc status
|
||||||
|
docs_batches = [
|
||||||
|
list(to_process_docs.items())[i : i + self.max_parallel_insert]
|
||||||
|
for i in range(0, len(to_process_docs), self.max_parallel_insert)
|
||||||
|
]
|
||||||
|
|
||||||
async def batch(
|
# Update pipeline status with batch information (directly, as it's atomic)
|
||||||
batch_idx: int,
|
pipeline_status.update({
|
||||||
docs_batch: list[tuple[str, DocProcessingStatus]],
|
"batchs": len(docs_batches),
|
||||||
size_batch: int,
|
"cur_batch": 0
|
||||||
) -> None:
|
})
|
||||||
logger.info(f"Start processing batch {batch_idx + 1} of {size_batch}.")
|
|
||||||
# 4. iterate over batch
|
logger.info(f"Number of batches to process: {len(docs_batches)}.")
|
||||||
for doc_id_processing_status in docs_batch:
|
|
||||||
doc_id, status_doc = doc_id_processing_status
|
batches: list[Any] = []
|
||||||
# Generate chunks from document
|
# 3. iterate over batches
|
||||||
chunks: dict[str, Any] = {
|
for batch_idx, docs_batch in enumerate(docs_batches):
|
||||||
compute_mdhash_id(dp["content"], prefix="chunk-"): {
|
# Update current batch in pipeline status (directly, as it's atomic)
|
||||||
**dp,
|
pipeline_status["cur_batch"] = batch_idx + 1
|
||||||
"full_doc_id": doc_id,
|
|
||||||
}
|
async def batch(
|
||||||
for dp in self.chunking_func(
|
batch_idx: int,
|
||||||
status_doc.content,
|
docs_batch: list[tuple[str, DocProcessingStatus]],
|
||||||
split_by_character,
|
size_batch: int,
|
||||||
split_by_character_only,
|
) -> None:
|
||||||
self.chunk_overlap_token_size,
|
logger.info(f"Start processing batch {batch_idx + 1} of {size_batch}.")
|
||||||
self.chunk_token_size,
|
# 4. iterate over batch
|
||||||
self.tiktoken_model_name,
|
for doc_id_processing_status in docs_batch:
|
||||||
)
|
doc_id, status_doc = doc_id_processing_status
|
||||||
}
|
# Generate chunks from document
|
||||||
# Process document (text chunks and full docs) in parallel
|
chunks: dict[str, Any] = {
|
||||||
tasks = [
|
compute_mdhash_id(dp["content"], prefix="chunk-"): {
|
||||||
self.doc_status.upsert(
|
**dp,
|
||||||
{
|
"full_doc_id": doc_id,
|
||||||
doc_id: {
|
|
||||||
"status": DocStatus.PROCESSING,
|
|
||||||
"updated_at": datetime.now().isoformat(),
|
|
||||||
"content": status_doc.content,
|
|
||||||
"content_summary": status_doc.content_summary,
|
|
||||||
"content_length": status_doc.content_length,
|
|
||||||
"created_at": status_doc.created_at,
|
|
||||||
}
|
}
|
||||||
|
for dp in self.chunking_func(
|
||||||
|
status_doc.content,
|
||||||
|
split_by_character,
|
||||||
|
split_by_character_only,
|
||||||
|
self.chunk_overlap_token_size,
|
||||||
|
self.chunk_token_size,
|
||||||
|
self.tiktoken_model_name,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
),
|
# Process document (text chunks and full docs) in parallel
|
||||||
self.chunks_vdb.upsert(chunks),
|
tasks = [
|
||||||
self._process_entity_relation_graph(chunks),
|
self.doc_status.upsert(
|
||||||
self.full_docs.upsert(
|
{
|
||||||
{doc_id: {"content": status_doc.content}}
|
doc_id: {
|
||||||
),
|
"status": DocStatus.PROCESSING,
|
||||||
self.text_chunks.upsert(chunks),
|
"updated_at": datetime.now().isoformat(),
|
||||||
]
|
"content": status_doc.content,
|
||||||
try:
|
"content_summary": status_doc.content_summary,
|
||||||
await asyncio.gather(*tasks)
|
"content_length": status_doc.content_length,
|
||||||
await self.doc_status.upsert(
|
"created_at": status_doc.created_at,
|
||||||
{
|
}
|
||||||
doc_id: {
|
}
|
||||||
"status": DocStatus.PROCESSED,
|
),
|
||||||
"chunks_count": len(chunks),
|
self.chunks_vdb.upsert(chunks),
|
||||||
"content": status_doc.content,
|
self._process_entity_relation_graph(chunks),
|
||||||
"content_summary": status_doc.content_summary,
|
self.full_docs.upsert(
|
||||||
"content_length": status_doc.content_length,
|
{doc_id: {"content": status_doc.content}}
|
||||||
"created_at": status_doc.created_at,
|
),
|
||||||
"updated_at": datetime.now().isoformat(),
|
self.text_chunks.upsert(chunks),
|
||||||
}
|
]
|
||||||
}
|
try:
|
||||||
)
|
await asyncio.gather(*tasks)
|
||||||
except Exception as e:
|
await self.doc_status.upsert(
|
||||||
logger.error(f"Failed to process document {doc_id}: {str(e)}")
|
{
|
||||||
await self.doc_status.upsert(
|
doc_id: {
|
||||||
{
|
"status": DocStatus.PROCESSED,
|
||||||
doc_id: {
|
"chunks_count": len(chunks),
|
||||||
"status": DocStatus.FAILED,
|
"content": status_doc.content,
|
||||||
"error": str(e),
|
"content_summary": status_doc.content_summary,
|
||||||
"content": status_doc.content,
|
"content_length": status_doc.content_length,
|
||||||
"content_summary": status_doc.content_summary,
|
"created_at": status_doc.created_at,
|
||||||
"content_length": status_doc.content_length,
|
"updated_at": datetime.now().isoformat(),
|
||||||
"created_at": status_doc.created_at,
|
}
|
||||||
"updated_at": datetime.now().isoformat(),
|
}
|
||||||
}
|
)
|
||||||
}
|
except Exception as e:
|
||||||
)
|
logger.error(f"Failed to process document {doc_id}: {str(e)}")
|
||||||
continue
|
await self.doc_status.upsert(
|
||||||
logger.info(f"Completed batch {batch_idx + 1} of {len(docs_batches)}.")
|
{
|
||||||
|
doc_id: {
|
||||||
|
"status": DocStatus.FAILED,
|
||||||
|
"error": str(e),
|
||||||
|
"content": status_doc.content,
|
||||||
|
"content_summary": status_doc.content_summary,
|
||||||
|
"content_length": status_doc.content_length,
|
||||||
|
"created_at": status_doc.created_at,
|
||||||
|
"updated_at": datetime.now().isoformat(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
logger.info(f"Completed batch {batch_idx + 1} of {len(docs_batches)}.")
|
||||||
|
|
||||||
batches.append(batch(batch_idx, docs_batch, len(docs_batches)))
|
batches.append(batch(batch_idx, docs_batch, len(docs_batches)))
|
||||||
|
|
||||||
await asyncio.gather(*batches)
|
await asyncio.gather(*batches)
|
||||||
await self._insert_done()
|
await self._insert_done()
|
||||||
|
|
||||||
|
# Check if there's a pending request to process more documents (with lock)
|
||||||
|
has_pending_request = False
|
||||||
|
with storage_lock:
|
||||||
|
has_pending_request = pipeline_status.get("request_pending", False)
|
||||||
|
if has_pending_request:
|
||||||
|
# Clear the request flag before checking for more documents
|
||||||
|
pipeline_status["request_pending"] = False
|
||||||
|
|
||||||
|
if not has_pending_request:
|
||||||
|
break
|
||||||
|
|
||||||
|
logger.info("Processing additional documents due to pending request")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Always reset busy status when done or if an exception occurs (with lock)
|
||||||
|
with storage_lock:
|
||||||
|
pipeline_status["busy"] = False
|
||||||
|
logger.info("Document processing pipeline completed")
|
||||||
|
|
||||||
async def _process_entity_relation_graph(self, chunk: dict[str, Any]) -> None:
|
async def _process_entity_relation_graph(self, chunk: dict[str, Any]) -> None:
|
||||||
try:
|
try:
|
||||||
|
Reference in New Issue
Block a user