From bee462205291d6b57b33bda2cae2a068dcaada5c Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 13:18:26 +0800 Subject: [PATCH 01/25] fix: handle null bytes (0x00) in text processing - Fix PostgreSQL encoding error by properly handling null bytes (0x00) in text processing. - The clean_text function now removes null bytes from all input text during the indexing phase. --- lightrag/lightrag.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index db61788a..72554791 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -474,6 +474,11 @@ class LightRAG: storage_class = lazy_external_import(import_path, storage_name) return storage_class + @staticmethod + def clean_text(text: str) -> str: + """Clean text by removing null bytes (0x00) and whitespace""" + return text.strip().replace('\x00', '') + def insert( self, input: str | list[str], @@ -521,8 +526,13 @@ class LightRAG: ) -> None: update_storage = False try: - doc_key = compute_mdhash_id(full_text.strip(), prefix="doc-") - new_docs = {doc_key: {"content": full_text.strip()}} + # Clean input texts + full_text = self.clean_text(full_text) + text_chunks = [self.clean_text(chunk) for chunk in text_chunks] + + # Process cleaned texts + doc_key = compute_mdhash_id(full_text, prefix="doc-") + new_docs = {doc_key: {"content": full_text}} _add_doc_keys = await self.full_docs.filter_keys(set(doc_key)) new_docs = {k: v for k, v in new_docs.items() if k in _add_doc_keys} @@ -535,11 +545,10 @@ class LightRAG: inserting_chunks: dict[str, Any] = {} for chunk_text in text_chunks: - chunk_text_stripped = chunk_text.strip() - chunk_key = compute_mdhash_id(chunk_text_stripped, prefix="chunk-") + chunk_key = compute_mdhash_id(chunk_text, prefix="chunk-") inserting_chunks[chunk_key] = { - "content": chunk_text_stripped, + "content": chunk_text, "full_doc_id": doc_key, } @@ -576,8 +585,8 @@ class LightRAG: if isinstance(input, str): input = [input] - # 1. Remove duplicate contents from the list - unique_contents = list(set(doc.strip() for doc in input)) + # Clean input text and remove duplicates + unique_contents = list(set(self.clean_text(doc) for doc in input)) # 2. Generate document IDs and initial status new_docs: dict[str, Any] = { @@ -779,7 +788,7 @@ class LightRAG: all_chunks_data: dict[str, dict[str, str]] = {} chunk_to_source_map: dict[str, str] = {} for chunk_data in custom_kg.get("chunks", {}): - chunk_content = chunk_data["content"].strip() + chunk_content = self.clean_text(chunk_data["content"]) source_id = chunk_data["source_id"] tokens = len( encode_string_by_tiktoken( From f5bd3f2b16f2d270bdcf6659951db68cd2e4ba8e Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 13:23:55 +0800 Subject: [PATCH 02/25] Fix linting --- lightrag/lightrag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 72554791..9cd1cfb3 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -477,7 +477,7 @@ class LightRAG: @staticmethod def clean_text(text: str) -> str: """Clean text by removing null bytes (0x00) and whitespace""" - return text.strip().replace('\x00', '') + return text.strip().replace("\x00", "") def insert( self, From 8164c57b7ecf54913b4300bfba73312bd6c414d4 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 14:59:50 +0800 Subject: [PATCH 03/25] Fix linting --- lightrag/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightrag/utils.py b/lightrag/utils.py index ae7e8dce..ecaed2ab 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -59,7 +59,7 @@ logging.getLogger("httpx").setLevel(logging.WARNING) def set_logger(log_file: str, level: int = logging.DEBUG): """Set up file logging with the specified level. - + Args: log_file: Path to the log file level: Logging level (e.g. logging.DEBUG, logging.INFO) From 2d8a262ac37104a27ecc7778498f2dd9c698509e Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 16:26:56 +0800 Subject: [PATCH 04/25] Improve entity extraction logging with cleaner summary and verbose debug output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Add count-based summary log message • Move detailed data to verbose debug logs --- lightrag/operate.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lightrag/operate.py b/lightrag/operate.py index a79192ac..4e235327 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -23,6 +23,7 @@ from .utils import ( CacheData, statistic_data, get_conversation_turns, + verbose_debug, ) from .base import ( BaseGraphStorage, @@ -532,7 +533,13 @@ async def extract_entities( logger.info("Didn't extract any relationships") logger.info( - f"New entities or relationships extracted, entities:{all_entities_data}, relationships:{all_relationships_data}" + f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships" + ) + verbose_debug( + f"New entities:{all_entities_data}, relationships:{all_relationships_data}" + ) + verbose_debug( + f"New relationships:{all_relationships_data}" ) if entity_vdb is not None: From c95656ca878043476886b487610d37a5a233310e Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 16:28:08 +0800 Subject: [PATCH 05/25] feat: improve debug message handling with better truncation and formatting --- lightrag/operate.py | 4 +--- lightrag/utils.py | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/lightrag/operate.py b/lightrag/operate.py index 4e235327..772815ce 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -538,9 +538,7 @@ async def extract_entities( verbose_debug( f"New entities:{all_entities_data}, relationships:{all_relationships_data}" ) - verbose_debug( - f"New relationships:{all_relationships_data}" - ) + verbose_debug(f"New relationships:{all_relationships_data}") if entity_vdb is not None: data_for_vdb = { diff --git a/lightrag/utils.py b/lightrag/utils.py index ecaed2ab..d17ce87d 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -25,10 +25,26 @@ VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true" def verbose_debug(msg: str, *args, **kwargs): """Function for outputting detailed debug information. When VERBOSE_DEBUG=True, outputs the complete message. - When VERBOSE_DEBUG=False, outputs only the first 30 characters. + When VERBOSE_DEBUG=False, outputs only the first 50 characters. + + Args: + msg: The message format string + *args: Arguments to be formatted into the message + **kwargs: Keyword arguments passed to logger.debug() """ if VERBOSE_DEBUG: logger.debug(msg, *args, **kwargs) + else: + # Format the message with args first + if args: + formatted_msg = msg % args + else: + formatted_msg = msg + # Then truncate the formatted message + truncated_msg = ( + formatted_msg[:50] + "..." if len(formatted_msg) > 50 else formatted_msg + ) + logger.debug(truncated_msg, **kwargs) def set_verbose_debug(enabled: bool): From e8efcc335d9d062072d6bd90ed9eddf65a638d12 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 17:53:01 +0800 Subject: [PATCH 06/25] Add access log filtering to reduce noise from high-frequency API endpoints --- lightrag/api/lightrag_server.py | 68 +++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 3 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index f6cee412..b656b67f 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -19,20 +19,17 @@ from ascii_colors import ASCIIColors from fastapi.middleware.cors import CORSMiddleware from contextlib import asynccontextmanager from dotenv import load_dotenv - from .utils_api import ( get_api_key_dependency, parse_args, get_default_host, display_splash_screen, ) - from lightrag import LightRAG from lightrag.types import GPTKeywordExtractionFormat from lightrag.api import __api_version__ from lightrag.utils import EmbeddingFunc from lightrag.utils import logger - from .routers.document_routes import ( DocumentManager, create_document_routes, @@ -68,6 +65,38 @@ scan_progress: Dict = { progress_lock = threading.Lock() +class AccessLogFilter(logging.Filter): + def __init__(self): + super().__init__() + # Define paths to be filtered + self.filtered_paths = ["/documents", "/health", "/webui/"] + + def filter(self, record): + try: + if not hasattr(record, "args") or not isinstance(record.args, tuple): + return True + if len(record.args) < 5: + return True + + method = record.args[1] + path = record.args[2] + status = record.args[4] + # print(f"Debug - Method: {method}, Path: {path}, Status: {status}") + # print(f"Debug - Filtered paths: {self.filtered_paths}") + + if ( + method == "GET" + and (status == 200 or status == 304) + and path in self.filtered_paths + ): + return False + + return True + + except Exception: + return True + + def create_app(args): # Set global top_k global global_top_k @@ -409,6 +438,38 @@ def create_app(args): def main(): args = parse_args() import uvicorn + import logging.config + + # Configure uvicorn logging + logging.config.dictConfig( + { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "default": { + "format": "%(levelname)s: %(message)s", + }, + }, + "handlers": { + "default": { + "formatter": "default", + "class": "logging.StreamHandler", + "stream": "ext://sys.stderr", + }, + }, + "loggers": { + "uvicorn.access": { + "handlers": ["default"], + "level": "INFO", + "propagate": False, + }, + }, + } + ) + + # Add filter to uvicorn access logger + uvicorn_access_logger = logging.getLogger("uvicorn.access") + uvicorn_access_logger.addFilter(AccessLogFilter()) app = create_app(args) display_splash_screen(args) @@ -416,6 +477,7 @@ def main(): "app": app, "host": args.host, "port": args.port, + "log_config": None, # Disable default config } if args.ssl: uvicorn_config.update( From 9ba12a4f3128728e400f43bd4f5d4c63edcde09e Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 18:53:43 +0800 Subject: [PATCH 07/25] refactor: remove redundant top_k checks in query routes The top_k parameter already has a default value set in the QueryParam class (base.py), making these checks unnecessary. This change simplifies the code while maintaining the same functionality. Changes: Remove top_k check in query_text function Remove top_k check in query_text_stream function --- lightrag/api/routers/query_routes.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lightrag/api/routers/query_routes.py b/lightrag/api/routers/query_routes.py index b86c170e..10bfe7a8 100644 --- a/lightrag/api/routers/query_routes.py +++ b/lightrag/api/routers/query_routes.py @@ -161,8 +161,6 @@ def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60): """ try: param = request.to_query_params(False) - if param.top_k is None: - param.top_k = top_k response = await rag.aquery(request.query, param=param) # If response is a string (e.g. cache hit), return directly @@ -192,8 +190,6 @@ def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60): """ try: param = request.to_query_params(True) - if param.top_k is None: - param.top_k = top_k response = await rag.aquery(request.query, param=param) from fastapi.responses import StreamingResponse From a848884a7b4a0ac6608f19b2bfb0170fc6b212fc Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 19:34:17 +0800 Subject: [PATCH 08/25] Remove unnesessary CLI arguments, reduce CLI arguments complexity MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Move storage config from CLI • Move LLM and embedding binding config from CLI • Remove chunk config from CLI --- lightrag/api/utils_api.py | 148 ++++++++++---------------------------- 1 file changed, 36 insertions(+), 112 deletions(-) diff --git a/lightrag/api/utils_api.py b/lightrag/api/utils_api.py index a24e731e..b24331af 100644 --- a/lightrag/api/utils_api.py +++ b/lightrag/api/utils_api.py @@ -122,47 +122,6 @@ def parse_args() -> argparse.Namespace: description="LightRAG FastAPI Server with separate working and input directories" ) - parser.add_argument( - "--kv-storage", - default=get_env_value( - "LIGHTRAG_KV_STORAGE", DefaultRAGStorageConfig.KV_STORAGE - ), - help=f"KV storage implementation (default: {DefaultRAGStorageConfig.KV_STORAGE})", - ) - parser.add_argument( - "--doc-status-storage", - default=get_env_value( - "LIGHTRAG_DOC_STATUS_STORAGE", DefaultRAGStorageConfig.DOC_STATUS_STORAGE - ), - help=f"Document status storage implementation (default: {DefaultRAGStorageConfig.DOC_STATUS_STORAGE})", - ) - parser.add_argument( - "--graph-storage", - default=get_env_value( - "LIGHTRAG_GRAPH_STORAGE", DefaultRAGStorageConfig.GRAPH_STORAGE - ), - help=f"Graph storage implementation (default: {DefaultRAGStorageConfig.GRAPH_STORAGE})", - ) - parser.add_argument( - "--vector-storage", - default=get_env_value( - "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE - ), - help=f"Vector storage implementation (default: {DefaultRAGStorageConfig.VECTOR_STORAGE})", - ) - - # Bindings configuration - parser.add_argument( - "--llm-binding", - default=get_env_value("LLM_BINDING", "ollama"), - help="LLM binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)", - ) - parser.add_argument( - "--embedding-binding", - default=get_env_value("EMBEDDING_BINDING", "ollama"), - help="Embedding binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)", - ) - # Server configuration parser.add_argument( "--host", @@ -188,65 +147,6 @@ def parse_args() -> argparse.Namespace: help="Directory containing input documents (default: from env or ./inputs)", ) - # LLM Model configuration - parser.add_argument( - "--llm-binding-host", - default=get_env_value("LLM_BINDING_HOST", None), - help="LLM server host URL. If not provided, defaults based on llm-binding:\n" - + "- ollama: http://localhost:11434\n" - + "- lollms: http://localhost:9600\n" - + "- openai: https://api.openai.com/v1", - ) - - default_llm_api_key = get_env_value("LLM_BINDING_API_KEY", None) - - parser.add_argument( - "--llm-binding-api-key", - default=default_llm_api_key, - help="llm server API key (default: from env or empty string)", - ) - - parser.add_argument( - "--llm-model", - default=get_env_value("LLM_MODEL", "mistral-nemo:latest"), - help="LLM model name (default: from env or mistral-nemo:latest)", - ) - - # Embedding model configuration - parser.add_argument( - "--embedding-binding-host", - default=get_env_value("EMBEDDING_BINDING_HOST", None), - help="Embedding server host URL. If not provided, defaults based on embedding-binding:\n" - + "- ollama: http://localhost:11434\n" - + "- lollms: http://localhost:9600\n" - + "- openai: https://api.openai.com/v1", - ) - - default_embedding_api_key = get_env_value("EMBEDDING_BINDING_API_KEY", "") - parser.add_argument( - "--embedding-binding-api-key", - default=default_embedding_api_key, - help="embedding server API key (default: from env or empty string)", - ) - - parser.add_argument( - "--embedding-model", - default=get_env_value("EMBEDDING_MODEL", "bge-m3:latest"), - help="Embedding model name (default: from env or bge-m3:latest)", - ) - - parser.add_argument( - "--chunk_size", - default=get_env_value("CHUNK_SIZE", 1200), - help="chunk chunk size default 1200", - ) - - parser.add_argument( - "--chunk_overlap_size", - default=get_env_value("CHUNK_OVERLAP_SIZE", 100), - help="chunk overlap size default 100", - ) - def timeout_type(value): if value is None or value == "None": return None @@ -272,18 +172,6 @@ def parse_args() -> argparse.Namespace: default=get_env_value("MAX_TOKENS", 32768, int), help="Maximum token size (default: from env or 32768)", ) - parser.add_argument( - "--embedding-dim", - type=int, - default=get_env_value("EMBEDDING_DIM", 1024, int), - help="Embedding dimensions (default: from env or 1024)", - ) - parser.add_argument( - "--max-embed-tokens", - type=int, - default=get_env_value("MAX_EMBED_TOKENS", 8192, int), - help="Maximum embedding token size (default: from env or 8192)", - ) # Logging configuration parser.add_argument( @@ -376,6 +264,42 @@ def parse_args() -> argparse.Namespace: args.working_dir = os.path.abspath(args.working_dir) args.input_dir = os.path.abspath(args.input_dir) + # Inject storage configuration from environment variables + args.kv_storage = get_env_value( + "LIGHTRAG_KV_STORAGE", DefaultRAGStorageConfig.KV_STORAGE + ) + args.doc_status_storage = get_env_value( + "LIGHTRAG_DOC_STATUS_STORAGE", DefaultRAGStorageConfig.DOC_STATUS_STORAGE + ) + args.graph_storage = get_env_value( + "LIGHTRAG_GRAPH_STORAGE", DefaultRAGStorageConfig.GRAPH_STORAGE + ) + args.vector_storage = get_env_value( + "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE + ) + + # Inject binding configuration + args.llm_binding = get_env_value("LLM_BINDING", "ollama") + args.embedding_binding = get_env_value("EMBEDDING_BINDING", "ollama") + args.llm_binding_host = get_env_value( + "LLM_BINDING_HOST", get_default_host(args.llm_binding) + ) + args.embedding_binding_host = get_env_value( + "EMBEDDING_BINDING_HOST", get_default_host(args.embedding_binding) + ) + args.llm_binding_api_key = get_env_value("LLM_BINDING_API_KEY", None) + args.embedding_binding_api_key = get_env_value("EMBEDDING_BINDING_API_KEY", "") + + # Inject model configuration + args.llm_model = get_env_value("LLM_MODEL", "mistral-nemo:latest") + args.embedding_model = get_env_value("EMBEDDING_MODEL", "bge-m3:latest") + args.embedding_dim = get_env_value("EMBEDDING_DIM", 1024, int) + args.max_embed_tokens = get_env_value("MAX_EMBED_TOKENS", 8192, int) + + # Inject chunk configuration + args.chunk_size = get_env_value("CHUNK_SIZE", 1200, int) + args.chunk_overlap_size = get_env_value("CHUNK_OVERLAP_SIZE", 100, int) + ollama_server_infos.LIGHTRAG_MODEL = args.simulated_model_name return args From 2fdbbc2062558e8a52ca726399c33095e0426a07 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 21 Feb 2025 20:01:43 +0800 Subject: [PATCH 09/25] Reorder and improve command line argument definitions for better organization - Change verbose to store_true action - Move verbose flag to logging section - Move auto-scan flag to end of options --- lightrag/api/utils_api.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lightrag/api/utils_api.py b/lightrag/api/utils_api.py index b24331af..5d29cb6e 100644 --- a/lightrag/api/utils_api.py +++ b/lightrag/api/utils_api.py @@ -180,6 +180,12 @@ def parse_args() -> argparse.Namespace: choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="Logging level (default: from env or INFO)", ) + parser.add_argument( + "--verbose", + action="store_true", + default=get_env_value("VERBOSE", False, bool), + help="Enable verbose debug output(only valid for DEBUG log-level)", + ) parser.add_argument( "--key", @@ -205,12 +211,6 @@ def parse_args() -> argparse.Namespace: default=get_env_value("SSL_KEYFILE", None), help="Path to SSL private key file (required if --ssl is enabled)", ) - parser.add_argument( - "--auto-scan-at-startup", - action="store_true", - default=False, - help="Enable automatic scanning when the program starts", - ) parser.add_argument( "--history-turns", @@ -252,10 +252,10 @@ def parse_args() -> argparse.Namespace: ) parser.add_argument( - "--verbose", - type=bool, - default=get_env_value("VERBOSE", False, bool), - help="Verbose debug output(default: from env or false)", + "--auto-scan-at-startup", + action="store_true", + default=False, + help="Enable automatic scanning when the program starts", ) args = parser.parse_args() From 411782797b0913b738932f725c911095bca8456c Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 22 Feb 2025 10:18:39 +0800 Subject: [PATCH 10/25] Fix linting --- lightrag/lightrag.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index eca23d4f..67ef3aab 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -591,7 +591,7 @@ class LightRAG: if isinstance(input, str): input = [input] - # Clean input text and remove duplicates + # Clean input text and remove duplicates input = list(set(self.clean_text(doc) for doc in input)) # 1. Validate ids if provided or generate MD5 hash IDs @@ -608,10 +608,7 @@ class LightRAG: contents = {id_: doc for id_, doc in zip(ids, input)} else: # Generate contents dict of MD5 hash IDs and documents - contents = { - compute_mdhash_id(doc, prefix="doc-"): doc - for doc in input - } + contents = {compute_mdhash_id(doc, prefix="doc-"): doc for doc in input} # 2. Remove duplicate contents unique_contents = { From 351c8db849d8124aac020ff592f8e9fc1ff2824d Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 22 Feb 2025 10:18:39 +0800 Subject: [PATCH 11/25] Fix linting --- lightrag/lightrag.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index eca23d4f..67ef3aab 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -591,7 +591,7 @@ class LightRAG: if isinstance(input, str): input = [input] - # Clean input text and remove duplicates + # Clean input text and remove duplicates input = list(set(self.clean_text(doc) for doc in input)) # 1. Validate ids if provided or generate MD5 hash IDs @@ -608,10 +608,7 @@ class LightRAG: contents = {id_: doc for id_, doc in zip(ids, input)} else: # Generate contents dict of MD5 hash IDs and documents - contents = { - compute_mdhash_id(doc, prefix="doc-"): doc - for doc in input - } + contents = {compute_mdhash_id(doc, prefix="doc-"): doc for doc in input} # 2. Remove duplicate contents unique_contents = { From f9780830ad10d0293a175ba36bde68bd4024c76e Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 22 Feb 2025 10:46:54 +0800 Subject: [PATCH 12/25] Revert: get llm-binding and embedding-binding from cli --- lightrag/api/utils_api.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/lightrag/api/utils_api.py b/lightrag/api/utils_api.py index 5d29cb6e..bbb8303c 100644 --- a/lightrag/api/utils_api.py +++ b/lightrag/api/utils_api.py @@ -258,6 +258,22 @@ def parse_args() -> argparse.Namespace: help="Enable automatic scanning when the program starts", ) + # LLM and embedding bindings + parser.add_argument( + "--llm-binding", + type=str, + default=get_env_value("LLM_BINDING", "ollama"), + choices=["lollms", "ollama", "openai", "openai-ollama", "azure_openai"], + help="LLM binding type (default: from env or ollama)", + ) + parser.add_argument( + "--embedding-binding", + type=str, + default=get_env_value("EMBEDDING_BINDING", "ollama"), + choices=["lollms", "ollama", "openai", "azure_openai"], + help="Embedding binding type (default: from env or ollama)", + ) + args = parser.parse_args() # convert relative path to absolute path @@ -277,10 +293,6 @@ def parse_args() -> argparse.Namespace: args.vector_storage = get_env_value( "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE ) - - # Inject binding configuration - args.llm_binding = get_env_value("LLM_BINDING", "ollama") - args.embedding_binding = get_env_value("EMBEDDING_BINDING", "ollama") args.llm_binding_host = get_env_value( "LLM_BINDING_HOST", get_default_host(args.llm_binding) ) From e935fed50e24ad527ec0dc8e8ba928082e54ca31 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 22 Feb 2025 13:25:12 +0800 Subject: [PATCH 13/25] Add automatic comment handling in .env files --- lightrag/lightrag.py | 4 ++++ lightrag/operate.py | 5 +++++ lightrag/utils.py | 5 ++++- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 67ef3aab..8ac41721 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -48,6 +48,10 @@ from .utils import ( set_logger, ) from .types import KnowledgeGraph +from dotenv import load_dotenv + +# Load environment variables +load_dotenv(override=True) # TODO: TO REMOVE @Yannick config = configparser.ConfigParser() diff --git a/lightrag/operate.py b/lightrag/operate.py index 772815ce..a0111719 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -5,6 +5,7 @@ import json import re from typing import Any, AsyncIterator from collections import Counter, defaultdict + from .utils import ( logger, clean_str, @@ -34,6 +35,10 @@ from .base import ( ) from .prompt import GRAPH_FIELD_SEP, PROMPTS import time +from dotenv import load_dotenv + +# Load environment variables +load_dotenv(override=True) def chunking_by_token_size( diff --git a/lightrag/utils.py b/lightrag/utils.py index d17ce87d..e7217def 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -15,8 +15,11 @@ from typing import Any, Callable import xml.etree.ElementTree as ET import numpy as np import tiktoken - from lightrag.prompt import PROMPTS +from dotenv import load_dotenv + +# Load environment variables +load_dotenv(override=True) VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true" From d84b90bcd446c02e21fc2897bd026478d7b32862 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 22 Feb 2025 15:27:19 +0800 Subject: [PATCH 14/25] Handle special case of CLI argument openai-ollama --- lightrag/api/utils_api.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lightrag/api/utils_api.py b/lightrag/api/utils_api.py index bbb8303c..e32cdc41 100644 --- a/lightrag/api/utils_api.py +++ b/lightrag/api/utils_api.py @@ -293,6 +293,12 @@ def parse_args() -> argparse.Namespace: args.vector_storage = get_env_value( "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE ) + + # Handle openai-ollama special case + if args.llm_binding == "openai-ollama": + args.llm_binding = "openai" + args.embedding_binding = "ollama" + args.llm_binding_host = get_env_value( "LLM_BINDING_HOST", get_default_host(args.llm_binding) ) From 637d6756b856fc73b0b316e2ed24c4f6e98a5de5 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 11:54:36 +0800 Subject: [PATCH 15/25] Optimize RAG initialization for openai-ollama --- lightrag/api/lightrag_server.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index b656b67f..673177ac 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -314,7 +314,7 @@ def create_app(args): ) # Initialize RAG - if args.llm_binding in ["lollms", "ollama", "openai-ollama"]: + if args.llm_binding in ["lollms", "ollama", "openai"]: rag = LightRAG( working_dir=args.working_dir, llm_model_func=lollms_model_complete @@ -353,12 +353,10 @@ def create_app(args): namespace_prefix=args.namespace_prefix, auto_manage_storages_states=False, ) - else: + else: # azure_openai rag = LightRAG( working_dir=args.working_dir, - llm_model_func=azure_openai_model_complete - if args.llm_binding == "azure_openai" - else openai_alike_model_complete, + llm_model_func=azure_openai_model_complete, chunk_token_size=int(args.chunk_size), chunk_overlap_token_size=int(args.chunk_overlap_size), llm_model_kwargs={ From 460bc3a6aaa0e31a264235452482aed97c95d0c6 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 13:26:38 +0800 Subject: [PATCH 16/25] update README.md and .env.example --- .env.example | 93 ++++++------- lightrag/api/README.md | 297 ++++++++++++++++++++--------------------- 2 files changed, 188 insertions(+), 202 deletions(-) diff --git a/.env.example b/.env.example index f70244e5..b15f5758 100644 --- a/.env.example +++ b/.env.example @@ -16,70 +16,65 @@ # WORKING_DIR= # INPUT_DIR= -### Logging level -LOG_LEVEL=INFO -VERBOSE=False - -### Optional Timeout -TIMEOUT=300 - -# Ollama Emulating Model Tag +### Ollama Emulating Model Tag # OLLAMA_EMULATING_MODEL_TAG=latest -### RAG Configuration -MAX_ASYNC=4 -EMBEDDING_DIM=1024 -MAX_EMBED_TOKENS=8192 -### Settings relative to query -HISTORY_TURNS=3 -COSINE_THRESHOLD=0.2 -TOP_K=60 -MAX_TOKEN_TEXT_CHUNK=4000 -MAX_TOKEN_RELATION_DESC=4000 -MAX_TOKEN_ENTITY_DESC=4000 -### Settings relative to indexing -CHUNK_SIZE=1200 -CHUNK_OVERLAP_SIZE=100 -MAX_TOKENS=32768 -MAX_TOKEN_SUMMARY=500 -SUMMARY_LANGUAGE=English +### Logging level +# LOG_LEVEL=INFO +# VERBOSE=False -### LLM Configuration (Use valid host. For local services, you can use host.docker.internal) +### Max async calls for LLM +# MAX_ASYNC=4 +### Optional Timeout for LLM +# TIMEOUT=None # Time out in seconds, None for infinite timeout + +### Settings for RAG query +# HISTORY_TURNS=3 +# COSINE_THRESHOLD=0.2 +# TOP_K=60 +# MAX_TOKEN_TEXT_CHUNK=4000 +# MAX_TOKEN_RELATION_DESC=4000 +# MAX_TOKEN_ENTITY_DESC=4000 + +### Settings for document indexing +# CHUNK_SIZE=1200 +# CHUNK_OVERLAP_SIZE=100 +# MAX_TOKENS=32768 # Max tokens send to LLM for summarization +# MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary +# SUMMARY_LANGUAGE=English +# MAX_EMBED_TOKENS=8192 + +### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +LLM_MODEL=mistral-nemo:latest +LLM_BINDING_API_KEY=your_api_key ### Ollama example LLM_BINDING=ollama -LLM_BINDING_HOST=http://host.docker.internal:11434 -LLM_MODEL=mistral-nemo:latest - +LLM_BINDING_HOST=http://localhost:11434 ### OpenAI alike example # LLM_BINDING=openai -# LLM_MODEL=deepseek-chat -# LLM_BINDING_HOST=https://api.deepseek.com -# LLM_BINDING_API_KEY=your_api_key - -### for OpenAI LLM (LLM_BINDING_API_KEY take priority) -# OPENAI_API_KEY=your_api_key - -### Lollms example +# LLM_BINDING_HOST=https://api.openai.com/v1 +### lollms example # LLM_BINDING=lollms -# LLM_BINDING_HOST=http://host.docker.internal:9600 -# LLM_MODEL=mistral-nemo:latest +# LLM_BINDING_HOST=http://localhost:9600 - -### Embedding Configuration (Use valid host. For local services, you can use host.docker.internal) -# Ollama example -EMBEDDING_BINDING=ollama -EMBEDDING_BINDING_HOST=http://host.docker.internal:11434 +### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) EMBEDDING_MODEL=bge-m3:latest - +EMBEDDING_DIM=1024 +# EMBEDDING_BINDING_API_KEY=your_api_key +### ollama example +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +### OpenAI alike example +# EMBEDDING_BINDING=openai +# LLM_BINDING_HOST=https://api.openai.com/v1 ### Lollms example # EMBEDDING_BINDING=lollms -# EMBEDDING_BINDING_HOST=http://host.docker.internal:9600 -# EMBEDDING_MODEL=bge-m3:latest +# EMBEDDING_BINDING_HOST=http://localhost:9600 ### Optional for Azure (LLM_BINDING_HOST, LLM_BINDING_API_KEY take priority) # AZURE_OPENAI_API_VERSION=2024-08-01-preview # AZURE_OPENAI_DEPLOYMENT=gpt-4o -# AZURE_OPENAI_API_KEY=myapikey +# AZURE_OPENAI_API_KEY=your_api_key # AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com # AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large @@ -138,4 +133,4 @@ MONGODB_GRAPH=false # deprecated (keep for backward compatibility) ### Qdrant QDRANT_URL=http://localhost:16333 -QDRANT_API_KEY=your-api-key # 可选 +# QDRANT_API_KEY=your-api-key diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 69023685..e9dd817b 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -1,14 +1,14 @@ -## Install with API Support +## Install LightRAG as an API Server -LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways: +LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG API Server in two ways: -### 1. Installation from PyPI +### Installation from PyPI ```bash pip install "lightrag-hku[api]" ``` -### 2. Installation from Source (Development) +### Installation from Source (Development) ```bash # Clone the repository @@ -22,33 +22,80 @@ cd lightrag pip install -e ".[api]" ``` -### Prerequisites +### Starting API Server with Default Settings + +LightRAG requires both LLM and Embedding Model to work together to complete document indexing and querying tasks. LightRAG supports binding to various LLM/Embedding backends: + +* ollama +* lollms +* openai & openai compatible +* azure_openai Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding. -The new api allows you to mix different bindings for llm/embeddings. -For example, you have the possibility to use ollama for the embedding and openai for the llm. +The LightRAG API Server provides default parameters for LLM and Embedding, allowing users to easily start the service through command line. These default configurations are: -#### For LoLLMs Server -- LoLLMs must be running and accessible -- Default connection: http://localhost:9600 -- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port +* Default endpoint of LLM/Embeding backend(LLM_BINDING_HOST or EMBEDDING_BINDING_HOST) -#### For Ollama Server -- Ollama must be running and accessible -- Requires environment variables setup or command line argument provided -- Environment variables: LLM_BINDING=ollama, LLM_BINDING_HOST, LLM_MODEL -- Command line arguments: --llm-binding=ollama, --llm-binding-host, --llm-model -- Default connection is http://localhost:11434 if not priveded +``` +# for lollms backend +LLM_BINDING_HOST=http://localhost:11434 +EMBEDDING_BINDING_HOST=http://localhost:11434 -> The default MAX_TOKENS(num_ctx) for Ollama is 32768. If your Ollama server is lacking or GPU memory, set it to a lower value. +# for lollms backend +LLM_BINDING_HOST=http://localhost:9600 +EMBEDDING_BINDING_HOST=http://localhost:9600 -#### For OpenAI Alike Server -- Requires environment variables setup or command line argument provided -- Environment variables: LLM_BINDING=ollama, LLM_BINDING_HOST, LLM_MODEL, LLM_BINDING_API_KEY -- Command line arguments: --llm-binding=ollama, --llm-binding-host, --llm-model, --llm-binding-api-key -- Default connection is https://api.openai.com/v1 if not priveded +# for openai, openai compatible or azure openai backend +LLM_BINDING_HOST=https://api.openai.com/v1 +EMBEDDING_BINDING_HOST=http://localhost:9600 +``` -#### For Azure OpenAI Server +* Default model config + +``` +LLM_MODEL=mistral-nemo:latest + +EMBEDDING_MODEL=bge-m3:latest +EMBEDDING_DIM=1024 +MAX_EMBED_TOKENS=8192 +``` + +* API keys for LLM/Embedding backend + +When connecting to backend require API KEY, corresponding environment variables must be provided: + +``` +LLM_BINDING_API_KEY=your_api_key +EMBEDDING_BINDING_API_KEY=your_api_key +``` + +* Use command line arguments to choose LLM/Embeding backend + +Use `--llm-binding` to select LLM backend type, and use `--embedding-binding` to select the embedding backend type. All the supported backend types are: + +``` +openai: LLM default type +ollama: Embedding defult type +lollms: +azure_openai: +openai-ollama: select openai for LLM and ollama for embedding(only valid for --llm-binding) +``` + +The LightRAG API Server allows you to mix different bindings for llm/embeddings. For example, you have the possibility to use ollama for the embedding and openai for the llm.With the above default parameters, you can start API Server with simple CLI arguments like these: + +``` +# start with openai llm and ollama embedding +LLM_BINDING_API_KEY=your_api_key Light_server +LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai-ollama + +# start with openai llm and openai embedding +LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai --embedding-binding openai + +# start with ollama llm and ollama embedding (no apikey is needed) +Light_server --llm-binding ollama --embedding-binding ollama +``` + +### For Azure OpenAI Backend Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)): ```bash # Change the resource group name, location and OpenAI resource name as needed @@ -68,13 +115,18 @@ az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_ The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. ``` +# Azure OpenAI Configuration in .env LLM_BINDING=azure_openai -LLM_BINDING_HOST=endpoint_of_azure_ai -LLM_MODEL=model_name_of_azure_ai -LLM_BINDING_API_KEY=api_key_of_azure_ai +LLM_BINDING_HOST=your-azure-endpoint +LLM_MODEL=your-model-deployment-name +LLM_BINDING_API_KEY=your-azure-api-key +AZURE_OPENAI_API_VERSION=2024-08-01-preview # optional, defaults to latest version +EMBEDDING_BINDING=azure_openai # if using Azure OpenAI for embeddings +EMBEDDING_MODEL=your-embedding-deployment-name + ``` -### 3. Install Lightrag as a Linux Service +### Install Lightrag as a Linux Service Create a your service file `lightrag.sevice` from the sample file : `lightrag.sevice.example`. Modified the WorkingDirectoryand EexecStart in the service file: @@ -105,40 +157,36 @@ sudo systemctl status lightrag.service sudo systemctl enable lightrag.service ``` +### Automatic Document Indexing +When starting any of the servers with the `--auto-scan-at-startup` parameter, the system will automatically: -## Configuration +1. Scan for new files in the input directory +2. Indexing new documents that aren't already in the database +3. Make all content immediately available for RAG queries -LightRAG can be configured using either command-line arguments or environment variables. When both are provided, command-line arguments take precedence over environment variables. +> The `--input-dir` parameter specify the input directory to scan for. -Default `TOP_K` is set to `60`. Default `COSINE_THRESHOLD` are set to `0.2`. +## API Server Configuration -### Environment Variables +API Server can be config in three way (highest priority first): -You can configure LightRAG using environment variables by creating a `.env` file in your project root directory. A sample file `.env.example` is provided for your convenience. +* Command line arguments +* Enviroment variables or .env file +* Config.ini (Only for storage configuration) -### Config.ini +Most of the configurations come with a default settings, check out details in sample file: `.env.example`. Datastorage configuration can be also set by config.ini. A sample file `config.ini.example` is provided for your convenience. -Datastorage configuration can be also set by config.ini. A sample file `config.ini.example` is provided for your convenience. +### LLM and Embedding Backend Supported -### Configuration Priority +LightRAG supports binding to various LLM/Embedding backends: -The configuration values are loaded in the following order (highest priority first): -1. Command-line arguments -2. Environment variables -3. Config.ini -4. Defaul values +* ollama +* lollms +* openai & openai compatible +* azure_openai -For example: -```bash -# This command-line argument will override both the environment variable and default value -python lightrag.py --port 8080 - -# The environment variable will override the default value but not the command-line argument -PORT=7000 python lightrag.py -``` - -> Best practices: you can set your database setting in Config.ini while testing, and you use .env for production. +Use environment variables `LLM_BINDING ` or CLI argument `--llm-binding` to select LLM backend type. Use environment variables `EMBEDDING_BINDING ` or CLI argument `--embedding-binding` to select LLM backend type. ### Storage Types Supported @@ -199,7 +247,16 @@ MongoDocStatusStorage MongoDB ### How Select Storage Implementation -You can select storage implementation by enviroment variables or command line arguments. You can not change storage implementation selection after you add documents to LightRAG. Data migration from one storage implementation to anthor is not supported yet. For further information please read the sample env file or config.ini file. +You can select storage implementation by environment variables. Your can set the following environmental variables to a specific storage implement-name before the your first start of the API Server: + +``` +LIGHTRAG_KV_STORAGE=PGKVStorage +LIGHTRAG_VECTOR_STORAGE=PGVectorStorage +LIGHTRAG_GRAPH_STORAGE=PGGraphStorage +LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage +``` + +You can not change storage implementation selection after you add documents to LightRAG. Data migration from one storage implementation to anthor is not supported yet. For further information please read the sample env file or config.ini file. ### LightRag API Server Comand Line Options @@ -207,32 +264,22 @@ You can select storage implementation by enviroment variables or command line a |-----------|---------|-------------| | --host | 0.0.0.0 | Server host | | --port | 9621 | Server port | -| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai | -| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | -| --llm-model | mistral-nemo:latest | LLM model name | -| --llm-binding-api-key | None | API Key for OpenAI Alike LLM | -| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai | -| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | -| --embedding-model | bge-m3:latest | Embedding model name | | --working-dir | ./rag_storage | Working directory for RAG storage | | --input-dir | ./inputs | Directory containing input documents | | --max-async | 4 | Maximum async operations | | --max-tokens | 32768 | Maximum token size | -| --embedding-dim | 1024 | Embedding dimensions | -| --max-embed-tokens | 8192 | Maximum embedding token size | | --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout | | --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) | -| --verbose | False | Verbose debug output (True, Flase) | +| --verbose | - | Verbose debug output (True, Flase) | | --key | None | API key for authentication. Protects lightrag server against unauthorized access | | --ssl | False | Enable HTTPS | | --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) | | --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) | | --top-k | 50 | Number of top-k items to retrieve; corresponds to entities in "local" mode and relationships in "global" mode. | | --cosine-threshold | 0.4 | The cossine threshold for nodes and relations retrieval, works with top-k to control the retrieval of nodes and relations. | -| --kv-storage | JsonKVStorage | implement-name of KV_STORAGE | -| --graph-storage | NetworkXStorage | implement-name of GRAPH_STORAGE | -| --vector-storage | NanoVectorDBStorage | implement-name of VECTOR_STORAGE | -| --doc-status-storage | JsonDocStatusStorage | implement-name of DOC_STATUS_STORAGE | +| --llm-binding | ollama | LLM binding type (lollms, ollama, openai, openai-ollama, azure_openai) | +| --embedding-binding | ollama | Embedding binding type (lollms, ollama, openai, azure_openai) | +| auto-scan-at-startup | - | Scan input directory for new files and start indexing | ### Example Usage @@ -244,57 +291,49 @@ Ollama is the default backend for both llm and embedding, so by default you can # Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding lightrag-server -# Using specific models (ensure they are installed in your ollama instance) -lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024 - # Using an authentication key lightrag-server --key my-key - -# Using lollms for llm and ollama for embedding -lightrag-server --llm-binding lollms ``` #### Running a Lightrag server with lollms default local server as llm and embedding backends ```bash -# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding -lightrag-server --llm-binding lollms --embedding-binding lollms - -# Using specific models (ensure they are installed in your ollama instance) -lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024 +# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding +# Configure LLM_BINDING=lollms and EMBEDDING_BINDING=lollms in .env or config.ini +lightrag-server # Using an authentication key lightrag-server --key my-key - -# Using lollms for llm and openai for embedding -lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small ``` - #### Running a Lightrag server with openai server as llm and embedding backends ```bash -# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding -lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small +# Run lightrag with openai, GPT-4o-mini for llm, and text-embedding-3-small for embedding +# Configure in .env or config.ini: +# LLM_BINDING=openai +# LLM_MODEL=GPT-4o-mini +# EMBEDDING_BINDING=openai +# EMBEDDING_MODEL=text-embedding-3-small +lightrag-server # Using an authentication key -lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key - -# Using lollms for llm and openai for embedding -lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small +lightrag-server --key my-key ``` #### Running a Lightrag server with azure openai server as llm and embedding backends ```bash -# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding -lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small +# Run lightrag with azure_openai +# Configure in .env or config.ini: +# LLM_BINDING=azure_openai +# LLM_MODEL=your-model +# EMBEDDING_BINDING=azure_openai +# EMBEDDING_MODEL=your-embedding-model +lightrag-server # Using an authentication key -lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key - -# Using lollms for llm and azure_openai for embedding -lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small +lightrag-server --key my-key ``` **Important Notes:** @@ -315,7 +354,18 @@ pip install lightrag-hku ## API Endpoints -All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. +All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. When API Server is running, visit: + +- Swagger UI: http://localhost:9621/docs +- ReDoc: http://localhost:9621/redoc + +You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to: + +1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI) +2. Start the RAG server +3. Upload some documents using the document management endpoints +4. Query the system using the query endpoints +5. Trigger document scan if new files is put into inputs directory ### Query Endpoints @@ -453,62 +503,3 @@ For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode q "/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix. -## Development - -Contribute to the project: [Guide](contributor-readme.MD) - -### Running in Development Mode - -For LoLLMs: -```bash -uvicorn lollms_lightrag_server:app --reload --port 9621 -``` - -For Ollama: -```bash -uvicorn ollama_lightrag_server:app --reload --port 9621 -``` - -For OpenAI: -```bash -uvicorn openai_lightrag_server:app --reload --port 9621 -``` -For Azure OpenAI: -```bash -uvicorn azure_openai_lightrag_server:app --reload --port 9621 -``` -### API Documentation - -When any server is running, visit: -- Swagger UI: http://localhost:9621/docs -- ReDoc: http://localhost:9621/redoc - -### Testing API Endpoints - -You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to: -1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI) -2. Start the RAG server -3. Upload some documents using the document management endpoints -4. Query the system using the query endpoints -5. Trigger document scan if new files is put into inputs directory - -### Important Features - -#### Automatic Document Vectorization -When starting any of the servers with the `--input-dir` parameter, the system will automatically: -1. Check for existing vectorized content in the database -2. Only vectorize new documents that aren't already in the database -3. Make all content immediately available for RAG queries - -This intelligent caching mechanism: -- Prevents unnecessary re-vectorization of existing documents -- Reduces startup time for subsequent runs -- Preserves system resources -- Maintains consistency across restarts - -**Important Notes:** -- The `--input-dir` parameter enables automatic document processing at startup -- Documents already in the database are not re-vectorized -- Only new documents in the input directory will be processed -- This optimization significantly reduces startup time for subsequent runs -- The working directory (`--working-dir`) stores the vectorized documents database From 845e914f1bcc8d8cd1543146b319f31318d359a6 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 15:46:47 +0800 Subject: [PATCH 17/25] fix: make ids parameter optional and optimize input text cleaning - Add default None value for ids parameter - Move text cleaning into else branch - Only clean text when auto-generating ids - Preserve original text with custom ids - Improve code readability --- lightrag/lightrag.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 67ef3aab..efc49c2a 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -577,7 +577,7 @@ class LightRAG: await self._insert_done() async def apipeline_enqueue_documents( - self, input: str | list[str], ids: list[str] | None + self, input: str | list[str], ids: list[str] | None = None ) -> None: """ Pipeline for Processing Documents @@ -591,9 +591,6 @@ class LightRAG: if isinstance(input, str): input = [input] - # Clean input text and remove duplicates - input = list(set(self.clean_text(doc) for doc in input)) - # 1. Validate ids if provided or generate MD5 hash IDs if ids is not None: # Check if the number of IDs matches the number of documents @@ -607,6 +604,8 @@ class LightRAG: # Generate contents dict of IDs provided by user and documents contents = {id_: doc for id_, doc in zip(ids, input)} else: + # Clean input text and remove duplicates + input = list(set(self.clean_text(doc) for doc in input)) # Generate contents dict of MD5 hash IDs and documents contents = {compute_mdhash_id(doc, prefix="doc-"): doc for doc in input} From dbeda8a9ff43eed8937d90ee00f679ccae3b3d42 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 16:12:08 +0800 Subject: [PATCH 18/25] Change scanning logs from INFO to DEBUG level --- lightrag/api/routers/document_routes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightrag/api/routers/document_routes.py b/lightrag/api/routers/document_routes.py index 25ca24e4..5c742f39 100644 --- a/lightrag/api/routers/document_routes.py +++ b/lightrag/api/routers/document_routes.py @@ -161,7 +161,7 @@ class DocumentManager: """Scan input directory for new files""" new_files = [] for ext in self.supported_extensions: - logging.info(f"Scanning for {ext} files in {self.input_dir}") + logging.debug(f"Scanning for {ext} files in {self.input_dir}") for file_path in self.input_dir.rglob(f"*{ext}"): if file_path not in self.indexed_files: new_files.append(file_path) From df95f251dcaf6d6623d36a9f3f8b8def1c33e74e Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 16:42:31 +0800 Subject: [PATCH 19/25] Move server ready message to lifespan --- lightrag/api/lightrag_server.py | 2 ++ lightrag/api/utils_api.py | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index b656b67f..10a9b52c 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -181,6 +181,8 @@ def create_app(args): "Skip document scanning(another scanning is active)" ) + ASCIIColors.green("\nServer is ready to accept connections! 🚀\n") + yield finally: diff --git a/lightrag/api/utils_api.py b/lightrag/api/utils_api.py index a24e731e..8784f265 100644 --- a/lightrag/api/utils_api.py +++ b/lightrag/api/utils_api.py @@ -548,7 +548,5 @@ def display_splash_screen(args: argparse.Namespace) -> None: Make sure to include the X-API-Key header in all your requests. """) - ASCIIColors.green("Server is ready to accept connections! 🚀\n") - # Ensure splash output flush to system log sys.stdout.flush() From 57884f2fb8c259e6c93d417b621c84741f907dea Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 16:52:41 +0800 Subject: [PATCH 20/25] Refine LLM settings in env sample file --- .env.example | 6 +++++- lightrag/api/README.md | 3 +-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.env.example b/.env.example index b15f5758..2b5c284c 100644 --- a/.env.example +++ b/.env.example @@ -45,17 +45,21 @@ # MAX_EMBED_TOKENS=8192 ### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +LLM_BINDING=ollama LLM_MODEL=mistral-nemo:latest LLM_BINDING_API_KEY=your_api_key ### Ollama example -LLM_BINDING=ollama LLM_BINDING_HOST=http://localhost:11434 ### OpenAI alike example # LLM_BINDING=openai +# LLM_MODEL=gpt-4o # LLM_BINDING_HOST=https://api.openai.com/v1 +# LLM_BINDING_API_KEY=your_api_key ### lollms example # LLM_BINDING=lollms +# LLM_MODEL=mistral-nemo:latest # LLM_BINDING_HOST=http://localhost:9600 +# LLM_BINDING_API_KEY=your_api_key ### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) EMBEDDING_MODEL=bge-m3:latest diff --git a/lightrag/api/README.md b/lightrag/api/README.md index e9dd817b..d06a8d9e 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -45,7 +45,7 @@ EMBEDDING_BINDING_HOST=http://localhost:11434 LLM_BINDING_HOST=http://localhost:9600 EMBEDDING_BINDING_HOST=http://localhost:9600 -# for openai, openai compatible or azure openai backend +# for openai, openai compatible or azure openai backend LLM_BINDING_HOST=https://api.openai.com/v1 EMBEDDING_BINDING_HOST=http://localhost:9600 ``` @@ -502,4 +502,3 @@ A query prefix in the query string can determines which LightRAG query mode is u For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。 "/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix. - From 78e0ca7835cd01e961ea673db890e4ec8582f7ca Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 17:03:35 +0800 Subject: [PATCH 21/25] Change defautl timeout for LLM to 150s --- .env.example | 2 +- lightrag/api/README.md | 2 +- lightrag/api/utils_api.py | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.env.example b/.env.example index 2b5c284c..835c1479 100644 --- a/.env.example +++ b/.env.example @@ -26,7 +26,7 @@ ### Max async calls for LLM # MAX_ASYNC=4 ### Optional Timeout for LLM -# TIMEOUT=None # Time out in seconds, None for infinite timeout +# TIMEOUT=150 # Time out in seconds, None for infinite timeout ### Settings for RAG query # HISTORY_TURNS=3 diff --git a/lightrag/api/README.md b/lightrag/api/README.md index d06a8d9e..86f18271 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -268,7 +268,7 @@ You can not change storage implementation selection after you add documents to L | --input-dir | ./inputs | Directory containing input documents | | --max-async | 4 | Maximum async operations | | --max-tokens | 32768 | Maximum token size | -| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout | +| --timeout | 150 | Timeout in seconds. None for infinite timeout(not recommended) | | --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) | | --verbose | - | Verbose debug output (True, Flase) | | --key | None | API key for authentication. Protects lightrag server against unauthorized access | diff --git a/lightrag/api/utils_api.py b/lightrag/api/utils_api.py index e32cdc41..a78a6e93 100644 --- a/lightrag/api/utils_api.py +++ b/lightrag/api/utils_api.py @@ -148,6 +148,8 @@ def parse_args() -> argparse.Namespace: ) def timeout_type(value): + if value is None: + return 150 if value is None or value == "None": return None return int(value) From 4a9cfdcb10792ac00f1c8c7cd1bc4bb08d600046 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 17:56:10 +0800 Subject: [PATCH 22/25] Update storage configurations in .env.example --- .env.example | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.env.example b/.env.example index 835c1479..e4034def 100644 --- a/.env.example +++ b/.env.example @@ -85,10 +85,10 @@ EMBEDDING_BINDING_HOST=http://localhost:11434 # AZURE_EMBEDDING_API_VERSION=2023-05-15 ### Data storage selection -# LIGHTRAG_KV_STORAGE=PGKVStorage -# LIGHTRAG_VECTOR_STORAGE=PGVectorStorage -# LIGHTRAG_GRAPH_STORAGE=PGGraphStorage -# LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage +LIGHTRAG_KV_STORAGE=JsonKVStorage +LIGHTRAG_VECTOR_STORAGE=NanoVectorDBStorage +LIGHTRAG_GRAPH_STORAGE=NetworkXStorage +LIGHTRAG_DOC_STATUS_STORAGE=JsonDocStatusStorage ### Oracle Database Configuration ORACLE_DSN=localhost:1521/XEPDB1 From 4202ce8d2f82edfae212f7a45c982330c838460f Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 18:47:10 +0800 Subject: [PATCH 23/25] Fix entity_type string formatting(Fix PostgreSQL indexing) - Remove redundant quotes - Use consistent double quotes --- lightrag/operate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightrag/operate.py b/lightrag/operate.py index 772815ce..a11fa49e 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -296,7 +296,7 @@ async def _merge_edges_then_upsert( node_data={ "source_id": source_id, "description": description, - "entity_type": '"UNKNOWN"', + "entity_type": "UNKNOWN", }, ) description = await _handle_entity_relation_summary( From 9546be326a0c41eae359ed0fec8bd13f06268794 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 19:47:43 +0800 Subject: [PATCH 24/25] Improve entity extraction logging and metrics --- lightrag/operate.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/lightrag/operate.py b/lightrag/operate.py index 987fc7bb..e3c7fe33 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -381,9 +381,8 @@ async def extract_entities( continue_prompt = PROMPTS["entiti_continue_extraction"] if_loop_prompt = PROMPTS["entiti_if_loop_extraction"] - already_processed = 0 - already_entities = 0 - already_relations = 0 + processed_chunks = 0 + total_chunks = len(ordered_chunks) async def _user_llm_func_with_cache( input_text: str, history_messages: list[dict[str, str]] = None @@ -437,7 +436,7 @@ async def extract_entities( chunk_key_dp (tuple[str, TextChunkSchema]): ("chunck-xxxxxx", {"tokens": int, "content": str, "full_doc_id": str, "chunk_order_index": int}) """ - nonlocal already_processed, already_entities, already_relations + nonlocal processed_chunks chunk_key = chunk_key_dp[0] chunk_dp = chunk_key_dp[1] content = chunk_dp["content"] @@ -494,12 +493,11 @@ async def extract_entities( maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append( if_relation ) - already_processed += 1 - already_entities += len(maybe_nodes) - already_relations += len(maybe_edges) - - logger.debug( - f"Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r", + processed_chunks += 1 + entities_count = len(maybe_nodes) + relations_count = len(maybe_edges) + logger.info( + f" Chunk {processed_chunks}/{total_chunks}: extracted {entities_count} entities and {relations_count} relationships (duplicated)" ) return dict(maybe_nodes), dict(maybe_edges) @@ -538,7 +536,7 @@ async def extract_entities( logger.info("Didn't extract any relationships") logger.info( - f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships" + f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships (duplicated)" ) verbose_debug( f"New entities:{all_entities_data}, relationships:{all_relationships_data}" From 2ebab84b72840a0e08e5f1e1775843995ff92457 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 19:54:12 +0800 Subject: [PATCH 25/25] Fix typo --- lightrag/operate.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightrag/operate.py b/lightrag/operate.py index e3c7fe33..e3f445bb 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -497,7 +497,7 @@ async def extract_entities( entities_count = len(maybe_nodes) relations_count = len(maybe_edges) logger.info( - f" Chunk {processed_chunks}/{total_chunks}: extracted {entities_count} entities and {relations_count} relationships (duplicated)" + f" Chunk {processed_chunks}/{total_chunks}: extracted {entities_count} entities and {relations_count} relationships (deduplicated)" ) return dict(maybe_nodes), dict(maybe_edges) @@ -536,7 +536,7 @@ async def extract_entities( logger.info("Didn't extract any relationships") logger.info( - f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships (duplicated)" + f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships (deduplicated)" ) verbose_debug( f"New entities:{all_entities_data}, relationships:{all_relationships_data}"