From 2c56141bfd5ab8a1f8d52b77f08dbab23a067ee2 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 14 Feb 2025 12:34:26 +0800 Subject: [PATCH 1/7] Standardize variable names with other vector database implementations (without functional modifications) --- lightrag/kg/faiss_impl.py | 4 ++-- lightrag/kg/nano_vector_db_impl.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lightrag/kg/faiss_impl.py b/lightrag/kg/faiss_impl.py index 0dca9e4c..b2090d78 100644 --- a/lightrag/kg/faiss_impl.py +++ b/lightrag/kg/faiss_impl.py @@ -27,8 +27,8 @@ class FaissVectorDBStorage(BaseVectorStorage): def __post_init__(self): # Grab config values if available - config = self.global_config.get("vector_db_storage_cls_kwargs", {}) - cosine_threshold = config.get("cosine_better_than_threshold") + kwargs = self.global_config.get("vector_db_storage_cls_kwargs", {}) + cosine_threshold = kwargs.get("cosine_better_than_threshold") if cosine_threshold is None: raise ValueError( "cosine_better_than_threshold must be specified in vector_db_storage_cls_kwargs" diff --git a/lightrag/kg/nano_vector_db_impl.py b/lightrag/kg/nano_vector_db_impl.py index 2db8f72a..60eed3dc 100644 --- a/lightrag/kg/nano_vector_db_impl.py +++ b/lightrag/kg/nano_vector_db_impl.py @@ -79,8 +79,8 @@ class NanoVectorDBStorage(BaseVectorStorage): # Initialize lock only for file operations self._save_lock = asyncio.Lock() # Use global config value if specified, otherwise use default - config = self.global_config.get("vector_db_storage_cls_kwargs", {}) - cosine_threshold = config.get("cosine_better_than_threshold") + kwargs = self.global_config.get("vector_db_storage_cls_kwargs", {}) + cosine_threshold = kwargs.get("cosine_better_than_threshold") if cosine_threshold is None: raise ValueError( "cosine_better_than_threshold must be specified in vector_db_storage_cls_kwargs" From 258c7596e6a49eb1533c5e41280bbab89a818902 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 14 Feb 2025 12:50:43 +0800 Subject: [PATCH 2/7] fix: Improve file path handling and logging for document scanning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Convert relative paths to absolute paths • Add logging for file scanning progress • Log total number of new files found • Enhance file scanning feedback • Improve path resolution safety --- lightrag/api/lightrag_server.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 1aeff264..ce182bc1 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -564,6 +564,10 @@ def parse_args() -> argparse.Namespace: args = parser.parse_args() + # conver relative path to absolute path + args.working_dir = os.path.abspath(args.working_dir) + args.input_dir = os.path.abspath(args.input_dir) + ollama_server_infos.LIGHTRAG_MODEL = args.simulated_model_name return args @@ -595,6 +599,7 @@ class DocumentManager: """Scan input directory for new files""" new_files = [] for ext in self.supported_extensions: + logger.info(f"Scanning for {ext} files in {self.input_dir}") for file_path in self.input_dir.rglob(f"*{ext}"): if file_path not in self.indexed_files: new_files.append(file_path) @@ -1198,6 +1203,7 @@ def create_app(args): new_files = doc_manager.scan_directory_for_new_files() scan_progress["total_files"] = len(new_files) + logger.info(f"Found {len(new_files)} new files to index.") for file_path in new_files: try: with progress_lock: From f6058b79b643e8d52386f435b6d9bf4830d06038 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 14 Feb 2025 13:26:19 +0800 Subject: [PATCH 3/7] Update .env.example with absolute path placeholders --- .env.example | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.env.example b/.env.example index 4b64ecb4..022bd63d 100644 --- a/.env.example +++ b/.env.example @@ -12,8 +12,8 @@ # LIGHTRAG_API_KEY=your-secure-api-key-here ### Directory Configuration -# WORKING_DIR=./rag_storage -# INPUT_DIR=./inputs +# WORKING_DIR= +# INPUT_DIR= ### Logging level LOG_LEVEL=INFO From ad88ba03bf8e531e010f053ecce694d0f343f13a Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 15 Feb 2025 11:07:38 +0800 Subject: [PATCH 4/7] docs: reorganize Ollama emulation API documentation for better readability --- lightrag/api/README.md | 110 ++++++++++++++++++++++------------------- 1 file changed, 60 insertions(+), 50 deletions(-) diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 8e5a61d5..7e4fda7e 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -74,30 +74,38 @@ LLM_MODEL=model_name_of_azure_ai LLM_BINDING_API_KEY=api_key_of_azure_ai ``` -### About Ollama API +### 3. Install Lightrag as a Linux Service -We provide an Ollama-compatible interfaces for LightRAG, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat frontends supporting Ollama, such as Open WebUI, to access LightRAG easily. +Create a your service file `lightrag.sevice` from the sample file : `lightrag.sevice.example`. Modified the WorkingDirectoryand EexecStart in the service file: -#### Choose Query mode in chat - -A query prefix in the query string can determines which LightRAG query mode is used to generate the respond for the query. The supported prefixes include: - -``` -/local -/global -/hybrid -/naive -/mix -/bypass +```text +Description=LightRAG Ollama Service +WorkingDirectory= +ExecStart=/lightrag/api/lightrag-api ``` -For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。 +Modify your service startup script: `lightrag-api`. Change you python virtual environment activation command as needed: -"/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the LightRAG query results. (If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix) +```shell +#!/bin/bash + +# your python virtual environment activation +source /home/netman/lightrag-xyj/venv/bin/activate +# start lightrag api server +lightrag-server +``` + +Install LightRAG service. If your system is Ubuntu, the following commands will work: + +```shell +sudo cp lightrag.service /etc/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start lightrag.service +sudo systemctl status lightrag.service +sudo systemctl enable lightrag.service +``` -#### Connect Open WebUI to LightRAG -After starting the lightrag-server, you can add an Ollama-type connection in the Open WebUI admin pannel. And then a model named lightrag:latest will appear in Open WebUI's model management interface. Users can then send queries to LightRAG through the chat interface. ## Configuration @@ -378,7 +386,7 @@ curl -X DELETE "http://localhost:9621/documents" #### GET /api/version -Get Ollama version information +Get Ollama version information. ```bash curl http://localhost:9621/api/version @@ -386,7 +394,7 @@ curl http://localhost:9621/api/version #### GET /api/tags -Get Ollama available models +Get Ollama available models. ```bash curl http://localhost:9621/api/tags @@ -394,7 +402,7 @@ curl http://localhost:9621/api/tags #### POST /api/chat -Handle chat completion requests +Handle chat completion requests. Routes user queries through LightRAG by selecting query mode based on query prefix. Detects and forwards OpenWebUI session-related requests (for meta data generation task) directly to underlying LLM. ```shell curl -N -X POST http://localhost:9621/api/chat -H "Content-Type: application/json" -d \ @@ -403,6 +411,10 @@ curl -N -X POST http://localhost:9621/api/chat -H "Content-Type: application/jso > For more information about Ollama API pls. visit : [Ollama API documentation](https://github.com/ollama/ollama/blob/main/docs/api.md) +#### POST /api/generate + +Handle generate completion requests. For compatibility purpose, the request is not processed by LightRAG, and will be handled by underlying LLM model. + ### Utility Endpoints #### GET /health @@ -412,7 +424,35 @@ Check server health and configuration. curl "http://localhost:9621/health" ``` +## Ollama Emulation + +We provide an Ollama-compatible interfaces for LightRAG, aiming to emulate LightRAG as an Ollama chat model. This allows AI chat frontends supporting Ollama, such as Open WebUI, to access LightRAG easily. + +### Connect Open WebUI to LightRAG + +After starting the lightrag-server, you can add an Ollama-type connection in the Open WebUI admin pannel. And then a model named lightrag:latest will appear in Open WebUI's model management interface. Users can then send queries to LightRAG through the chat interface. You'd better install LightRAG as service for this use case. + +Open WebUI's use LLM to do the session title and session keyword generation task. So the Ollama chat chat completion API detects and forwards OpenWebUI session-related requests directly to underlying LLM. + +### Choose Query mode in chat + +A query prefix in the query string can determines which LightRAG query mode is used to generate the respond for the query. The supported prefixes include: + +``` +/local +/global +/hybrid +/naive +/mix +/bypass +``` + +For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。 + +"/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix. + ## Development + Contribute to the project: [Guide](contributor-readme.MD) ### Running in Development Mode @@ -471,33 +511,3 @@ This intelligent caching mechanism: - This optimization significantly reduces startup time for subsequent runs - The working directory (`--working-dir`) stores the vectorized documents database -## Install Lightrag as a Linux Service - -Create a your service file `lightrag.sevice` from the sample file : `lightrag.sevice.example`. Modified the WorkingDirectoryand EexecStart in the service file: - -```text -Description=LightRAG Ollama Service -WorkingDirectory= -ExecStart=/lightrag/api/lightrag-api -``` - -Modify your service startup script: `lightrag-api`. Change you python virtual environment activation command as needed: - -```shell -#!/bin/bash - -# your python virtual environment activation -source /home/netman/lightrag-xyj/venv/bin/activate -# start lightrag api server -lightrag-server -``` - -Install LightRAG service. If your system is Ubuntu, the following commands will work: - -```shell -sudo cp lightrag.service /etc/systemd/system/ -sudo systemctl daemon-reload -sudo systemctl start lightrag.service -sudo systemctl status lightrag.service -sudo systemctl enable lightrag.service -``` From 0db0419c6dc38651589db6121245acad3df74eeb Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 15 Feb 2025 11:08:54 +0800 Subject: [PATCH 5/7] Fix linting --- lightrag/api/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 7e4fda7e..06510618 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -510,4 +510,3 @@ This intelligent caching mechanism: - Only new documents in the input directory will be processed - This optimization significantly reduces startup time for subsequent runs - The working directory (`--working-dir`) stores the vectorized documents database - From 2985d88f976ab63b6ce31d1c9929506e37c288ae Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 15 Feb 2025 11:39:10 +0800 Subject: [PATCH 6/7] refactor: improve CORS and streaming response headers - Add configurable CORS origins - Remove duplicate CORS headers - Add X-Accel-Buffering header - Update env example file - Clean up header configurations --- .env.example | 13 +++++++------ lightrag/api/lightrag_server.py | 16 +++++++++++----- lightrag/api/ollama_api.py | 8 ++------ 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/.env.example b/.env.example index 022bd63d..2701335a 100644 --- a/.env.example +++ b/.env.example @@ -1,12 +1,13 @@ ### Server Configuration -#HOST=0.0.0.0 -#PORT=9621 -#NAMESPACE_PREFIX=lightrag # separating data from difference Lightrag instances +# HOST=0.0.0.0 +# PORT=9621 +# NAMESPACE_PREFIX=lightrag # separating data from difference Lightrag instances +# CORS_ORIGINS=http://localhost:3000,http://localhost:8080 ### Optional SSL Configuration -#SSL=true -#SSL_CERTFILE=/path/to/cert.pem -#SSL_KEYFILE=/path/to/key.pem +# SSL=true +# SSL_CERTFILE=/path/to/cert.pem +# SSL_KEYFILE=/path/to/key.pem ### Security (empty for no api-key is needed) # LIGHTRAG_API_KEY=your-secure-api-key-here diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index ce182bc1..19552faf 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -847,10 +847,19 @@ def create_app(args): lifespan=lifespan, ) + def get_cors_origins(): + """Get allowed origins from environment variable + Returns a list of allowed origins, defaults to ["*"] if not set + """ + origins_str = os.getenv("CORS_ORIGINS", "*") + if origins_str == "*": + return ["*"] + return [origin.strip() for origin in origins_str.split(",")] + # Add CORS middleware app.add_middleware( CORSMiddleware, - allow_origins=["*"], + allow_origins=get_cors_origins(), allow_credentials=True, allow_methods=["*"], allow_headers=["*"], @@ -1377,10 +1386,7 @@ def create_app(args): "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Type": "application/x-ndjson", - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Methods": "POST, OPTIONS", - "Access-Control-Allow-Headers": "Content-Type", - "X-Accel-Buffering": "no", # Disable Nginx buffering + "X-Accel-Buffering": "no", # 确保在Nginx代理时正确处理流式响应 }, ) except Exception as e: diff --git a/lightrag/api/ollama_api.py b/lightrag/api/ollama_api.py index 01a883ca..94703dee 100644 --- a/lightrag/api/ollama_api.py +++ b/lightrag/api/ollama_api.py @@ -316,9 +316,7 @@ class OllamaAPI: "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Type": "application/x-ndjson", - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Methods": "POST, OPTIONS", - "Access-Control-Allow-Headers": "Content-Type", + "X-Accel-Buffering": "no", # 确保在Nginx代理时正确处理流式响应 }, ) else: @@ -534,9 +532,7 @@ class OllamaAPI: "Cache-Control": "no-cache", "Connection": "keep-alive", "Content-Type": "application/x-ndjson", - "Access-Control-Allow-Origin": "*", - "Access-Control-Allow-Methods": "POST, OPTIONS", - "Access-Control-Allow-Headers": "Content-Type", + "X-Accel-Buffering": "no", # 确保在Nginx代理时正确处理流式响应 }, ) else: From 8fdbcb0d3f749741daa57dfbd346000f1b4e652f Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 15 Feb 2025 11:46:47 +0800 Subject: [PATCH 7/7] fix: reorganize server info display and add CORS origins info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Add CORS origins display • Move API key status higher in display • Fix tree symbols for better readability • Regroup related server info • Remove redundant line breaks --- lightrag/api/lightrag_server.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 19552faf..97f1156f 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -159,8 +159,12 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.yellow(f"{args.host}") ASCIIColors.white(" ├─ Port: ", end="") ASCIIColors.yellow(f"{args.port}") - ASCIIColors.white(" └─ SSL Enabled: ", end="") + ASCIIColors.white(" ├─ CORS Origins: ", end="") + ASCIIColors.yellow(f"{os.getenv('CORS_ORIGINS', '*')}") + ASCIIColors.white(" ├─ SSL Enabled: ", end="") ASCIIColors.yellow(f"{args.ssl}") + ASCIIColors.white(" └─ API Key: ", end="") + ASCIIColors.yellow("Set" if args.key else "Not Set") if args.ssl: ASCIIColors.white(" ├─ SSL Cert: ", end="") ASCIIColors.yellow(f"{args.ssl_certfile}") @@ -229,10 +233,8 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.yellow(f"{ollama_server_infos.LIGHTRAG_MODEL}") ASCIIColors.white(" ├─ Log Level: ", end="") ASCIIColors.yellow(f"{args.log_level}") - ASCIIColors.white(" ├─ Timeout: ", end="") + ASCIIColors.white(" └─ Timeout: ", end="") ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}") - ASCIIColors.white(" └─ API Key: ", end="") - ASCIIColors.yellow("Set" if args.key else "Not Set") # Server Status ASCIIColors.green("\n✨ Server starting up...\n")