From 0284469fd490411b437d190f1b54c8a7bdb28487 Mon Sep 17 00:00:00 2001 From: ultrageopro Date: Mon, 3 Feb 2025 11:25:09 +0300 Subject: [PATCH] doc: add information about log_dir parameter --- README.md | 1 + lightrag/lightrag.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 950c5c5a..dd570608 100644 --- a/README.md +++ b/README.md @@ -799,6 +799,7 @@ if __name__ == "__main__": | **addon\_params** | `dict` | Additional parameters, e.g., `{"example_number": 1, "language": "Simplified Chinese", "entity_types": ["organization", "person", "geo", "event"], "insert_batch_size": 10}`: sets example limit, output language, and batch size for document processing | `example_number: all examples, language: English, insert_batch_size: 10` | | **convert\_response\_to\_json\_func** | `callable` | Not used | `convert_response_to_json` | | **embedding\_cache\_config** | `dict` | Configuration for question-answer caching. Contains three parameters:
- `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers.
- `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM.
- `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default: `{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` | +|**log\_dir** | `str` | Directory to store logs. | `./` | ### Error Handling diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 712c60bb..42e6e9cc 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -129,7 +129,7 @@ class LightRAG: # logging current_log_level = logger.level log_level: str = field(default=current_log_level) - logging_dir: str = field(default=os.getcwd()) + log_dir: str = field(default=os.getcwd()) # text chunking chunk_token_size: int = 1200 @@ -184,8 +184,8 @@ class LightRAG: chunking_func_kwargs: dict = field(default_factory=dict) def __post_init__(self): - os.makedirs(self.logging_dir, exist_ok=True) - log_file = os.path.join(self.logging_dir, "lightrag.log") + os.makedirs(self.log_dir, exist_ok=True) + log_file = os.path.join(self.log_dir, "lightrag.log") set_logger(log_file) logger.setLevel(self.log_level)