Remove redundancy set_logger function and related calls
This commit is contained in:
@@ -80,24 +80,26 @@ def initialize_share_data(workers: int = 1):
|
||||
|
||||
# Mark as initialized
|
||||
_initialized = True
|
||||
|
||||
|
||||
# Initialize pipeline status for document indexing control
|
||||
pipeline_namespace = get_namespace_data("pipeline_status")
|
||||
|
||||
# 创建一个共享列表对象用于 history_messages
|
||||
|
||||
# Create a shared list object for history_messages
|
||||
history_messages = _manager.list() if is_multiprocess else []
|
||||
|
||||
pipeline_namespace.update({
|
||||
"busy": False, # Control concurrent processes
|
||||
"job_name": "Default Job", # Current job name (indexing files/indexing texts)
|
||||
"job_start": None, # Job start time
|
||||
"docs": 0, # Total number of documents to be indexed
|
||||
"batchs": 0, # Number of batches for processing documents
|
||||
"cur_batch": 0, # Current processing batch
|
||||
"request_pending": False, # Flag for pending request for processing
|
||||
"latest_message": "", # Latest message from pipeline processing
|
||||
"history_messages": history_messages, # 使用共享列表对象
|
||||
})
|
||||
|
||||
pipeline_namespace.update(
|
||||
{
|
||||
"busy": False, # Control concurrent processes
|
||||
"job_name": "Default Job", # Current job name (indexing files/indexing texts)
|
||||
"job_start": None, # Job start time
|
||||
"docs": 0, # Total number of documents to be indexed
|
||||
"batchs": 0, # Number of batches for processing documents
|
||||
"cur_batch": 0, # Current processing batch
|
||||
"request_pending": False, # Flag for pending request for processing
|
||||
"latest_message": "", # Latest message from pipeline processing
|
||||
"history_messages": history_messages, # 使用共享列表对象
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def try_initialize_namespace(namespace: str) -> bool:
|
||||
|
@@ -45,7 +45,6 @@ from .utils import (
|
||||
lazy_external_import,
|
||||
limit_async_func_call,
|
||||
logger,
|
||||
set_logger,
|
||||
)
|
||||
from .types import KnowledgeGraph
|
||||
from dotenv import load_dotenv
|
||||
@@ -268,7 +267,6 @@ class LightRAG:
|
||||
|
||||
def __post_init__(self):
|
||||
os.makedirs(os.path.dirname(self.log_file_path), exist_ok=True)
|
||||
set_logger(self.log_file_path, self.log_level)
|
||||
logger.info(f"Logger initialized for working directory: {self.working_dir}")
|
||||
|
||||
from lightrag.kg.shared_storage import (
|
||||
@@ -682,7 +680,7 @@ class LightRAG:
|
||||
with storage_lock:
|
||||
# Ensure only one worker is processing documents
|
||||
if not pipeline_status.get("busy", False):
|
||||
# Cleaning history_messages without breaking it as a shared list object
|
||||
# Cleaning history_messages without breaking it as a shared list object
|
||||
current_history = pipeline_status.get("history_messages", [])
|
||||
if hasattr(current_history, "clear"):
|
||||
current_history.clear()
|
||||
|
@@ -336,7 +336,6 @@ async def extract_entities(
|
||||
global_config: dict[str, str],
|
||||
llm_response_cache: BaseKVStorage | None = None,
|
||||
) -> None:
|
||||
|
||||
from lightrag.kg.shared_storage import get_namespace_data
|
||||
|
||||
pipeline_status = get_namespace_data("pipeline_status")
|
||||
|
@@ -68,52 +68,6 @@ logger.setLevel(logging.INFO)
|
||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||
|
||||
|
||||
def set_logger(log_file: str, level: int = logging.DEBUG):
|
||||
"""Set up file logging with the specified level.
|
||||
|
||||
Args:
|
||||
log_file: Path to the log file
|
||||
level: Logging level (e.g. logging.DEBUG, logging.INFO)
|
||||
"""
|
||||
|
||||
logger.setLevel(level)
|
||||
log_file = os.path.abspath(log_file)
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
has_file_handler = False
|
||||
has_console_handler = False
|
||||
|
||||
for handler in logger.handlers:
|
||||
if isinstance(handler, logging.FileHandler):
|
||||
has_file_handler = True
|
||||
elif isinstance(handler, logging.StreamHandler) and not isinstance(
|
||||
handler, logging.FileHandler
|
||||
):
|
||||
has_console_handler = True
|
||||
|
||||
if not has_file_handler:
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
file_handler = RotatingFileHandler(
|
||||
log_file,
|
||||
maxBytes=10 * 1024 * 1024, # 10MB
|
||||
backupCount=5,
|
||||
encoding="utf-8",
|
||||
)
|
||||
file_handler.setLevel(level)
|
||||
file_handler.setFormatter(formatter)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
if not has_console_handler:
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setLevel(level)
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
logger.propagate = False
|
||||
|
||||
|
||||
class UnlimitedSemaphore:
|
||||
"""A context manager that allows unlimited access."""
|
||||
|
||||
|
@@ -125,7 +125,6 @@ def main():
|
||||
if callable(value):
|
||||
self.cfg.set(key, value)
|
||||
|
||||
|
||||
if hasattr(gunicorn_config, "logconfig_dict"):
|
||||
self.cfg.set(
|
||||
"logconfig_dict", getattr(gunicorn_config, "logconfig_dict")
|
||||
|
Reference in New Issue
Block a user