Remove redundancy set_logger function and related calls
This commit is contained in:
@@ -84,20 +84,22 @@ def initialize_share_data(workers: int = 1):
|
|||||||
# Initialize pipeline status for document indexing control
|
# Initialize pipeline status for document indexing control
|
||||||
pipeline_namespace = get_namespace_data("pipeline_status")
|
pipeline_namespace = get_namespace_data("pipeline_status")
|
||||||
|
|
||||||
# 创建一个共享列表对象用于 history_messages
|
# Create a shared list object for history_messages
|
||||||
history_messages = _manager.list() if is_multiprocess else []
|
history_messages = _manager.list() if is_multiprocess else []
|
||||||
|
|
||||||
pipeline_namespace.update({
|
pipeline_namespace.update(
|
||||||
"busy": False, # Control concurrent processes
|
{
|
||||||
"job_name": "Default Job", # Current job name (indexing files/indexing texts)
|
"busy": False, # Control concurrent processes
|
||||||
"job_start": None, # Job start time
|
"job_name": "Default Job", # Current job name (indexing files/indexing texts)
|
||||||
"docs": 0, # Total number of documents to be indexed
|
"job_start": None, # Job start time
|
||||||
"batchs": 0, # Number of batches for processing documents
|
"docs": 0, # Total number of documents to be indexed
|
||||||
"cur_batch": 0, # Current processing batch
|
"batchs": 0, # Number of batches for processing documents
|
||||||
"request_pending": False, # Flag for pending request for processing
|
"cur_batch": 0, # Current processing batch
|
||||||
"latest_message": "", # Latest message from pipeline processing
|
"request_pending": False, # Flag for pending request for processing
|
||||||
"history_messages": history_messages, # 使用共享列表对象
|
"latest_message": "", # Latest message from pipeline processing
|
||||||
})
|
"history_messages": history_messages, # 使用共享列表对象
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def try_initialize_namespace(namespace: str) -> bool:
|
def try_initialize_namespace(namespace: str) -> bool:
|
||||||
|
@@ -45,7 +45,6 @@ from .utils import (
|
|||||||
lazy_external_import,
|
lazy_external_import,
|
||||||
limit_async_func_call,
|
limit_async_func_call,
|
||||||
logger,
|
logger,
|
||||||
set_logger,
|
|
||||||
)
|
)
|
||||||
from .types import KnowledgeGraph
|
from .types import KnowledgeGraph
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
@@ -268,7 +267,6 @@ class LightRAG:
|
|||||||
|
|
||||||
def __post_init__(self):
|
def __post_init__(self):
|
||||||
os.makedirs(os.path.dirname(self.log_file_path), exist_ok=True)
|
os.makedirs(os.path.dirname(self.log_file_path), exist_ok=True)
|
||||||
set_logger(self.log_file_path, self.log_level)
|
|
||||||
logger.info(f"Logger initialized for working directory: {self.working_dir}")
|
logger.info(f"Logger initialized for working directory: {self.working_dir}")
|
||||||
|
|
||||||
from lightrag.kg.shared_storage import (
|
from lightrag.kg.shared_storage import (
|
||||||
|
@@ -336,7 +336,6 @@ async def extract_entities(
|
|||||||
global_config: dict[str, str],
|
global_config: dict[str, str],
|
||||||
llm_response_cache: BaseKVStorage | None = None,
|
llm_response_cache: BaseKVStorage | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
from lightrag.kg.shared_storage import get_namespace_data
|
from lightrag.kg.shared_storage import get_namespace_data
|
||||||
|
|
||||||
pipeline_status = get_namespace_data("pipeline_status")
|
pipeline_status = get_namespace_data("pipeline_status")
|
||||||
|
@@ -68,52 +68,6 @@ logger.setLevel(logging.INFO)
|
|||||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
|
||||||
def set_logger(log_file: str, level: int = logging.DEBUG):
|
|
||||||
"""Set up file logging with the specified level.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
log_file: Path to the log file
|
|
||||||
level: Logging level (e.g. logging.DEBUG, logging.INFO)
|
|
||||||
"""
|
|
||||||
|
|
||||||
logger.setLevel(level)
|
|
||||||
log_file = os.path.abspath(log_file)
|
|
||||||
formatter = logging.Formatter(
|
|
||||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
||||||
)
|
|
||||||
has_file_handler = False
|
|
||||||
has_console_handler = False
|
|
||||||
|
|
||||||
for handler in logger.handlers:
|
|
||||||
if isinstance(handler, logging.FileHandler):
|
|
||||||
has_file_handler = True
|
|
||||||
elif isinstance(handler, logging.StreamHandler) and not isinstance(
|
|
||||||
handler, logging.FileHandler
|
|
||||||
):
|
|
||||||
has_console_handler = True
|
|
||||||
|
|
||||||
if not has_file_handler:
|
|
||||||
from logging.handlers import RotatingFileHandler
|
|
||||||
|
|
||||||
file_handler = RotatingFileHandler(
|
|
||||||
log_file,
|
|
||||||
maxBytes=10 * 1024 * 1024, # 10MB
|
|
||||||
backupCount=5,
|
|
||||||
encoding="utf-8",
|
|
||||||
)
|
|
||||||
file_handler.setLevel(level)
|
|
||||||
file_handler.setFormatter(formatter)
|
|
||||||
logger.addHandler(file_handler)
|
|
||||||
|
|
||||||
if not has_console_handler:
|
|
||||||
console_handler = logging.StreamHandler()
|
|
||||||
console_handler.setLevel(level)
|
|
||||||
console_handler.setFormatter(formatter)
|
|
||||||
logger.addHandler(console_handler)
|
|
||||||
|
|
||||||
logger.propagate = False
|
|
||||||
|
|
||||||
|
|
||||||
class UnlimitedSemaphore:
|
class UnlimitedSemaphore:
|
||||||
"""A context manager that allows unlimited access."""
|
"""A context manager that allows unlimited access."""
|
||||||
|
|
||||||
|
@@ -125,7 +125,6 @@ def main():
|
|||||||
if callable(value):
|
if callable(value):
|
||||||
self.cfg.set(key, value)
|
self.cfg.set(key, value)
|
||||||
|
|
||||||
|
|
||||||
if hasattr(gunicorn_config, "logconfig_dict"):
|
if hasattr(gunicorn_config, "logconfig_dict"):
|
||||||
self.cfg.set(
|
self.cfg.set(
|
||||||
"logconfig_dict", getattr(gunicorn_config, "logconfig_dict")
|
"logconfig_dict", getattr(gunicorn_config, "logconfig_dict")
|
||||||
|
Reference in New Issue
Block a user