Merge pull request #925 from danielaskdd/simplify-cli-arguments

Simplify cli arguments
This commit is contained in:
Yannick Stephan
2025-02-23 15:02:58 +01:00
committed by GitHub
9 changed files with 398 additions and 370 deletions

View File

@@ -16,80 +16,79 @@
# WORKING_DIR=<absolute_path_for_working_dir>
# INPUT_DIR=<absolute_path_for_doc_input_dir>
### Logging level
LOG_LEVEL=INFO
VERBOSE=False
### Optional Timeout
TIMEOUT=300
# Ollama Emulating Model Tag
### Ollama Emulating Model Tag
# OLLAMA_EMULATING_MODEL_TAG=latest
### RAG Configuration
MAX_ASYNC=4
EMBEDDING_DIM=1024
MAX_EMBED_TOKENS=8192
### Settings relative to query
HISTORY_TURNS=3
COSINE_THRESHOLD=0.2
TOP_K=60
MAX_TOKEN_TEXT_CHUNK=4000
MAX_TOKEN_RELATION_DESC=4000
MAX_TOKEN_ENTITY_DESC=4000
### Settings relative to indexing
CHUNK_SIZE=1200
CHUNK_OVERLAP_SIZE=100
MAX_TOKENS=32768
MAX_TOKEN_SUMMARY=500
SUMMARY_LANGUAGE=English
### Logging level
# LOG_LEVEL=INFO
# VERBOSE=False
### LLM Configuration (Use valid host. For local services, you can use host.docker.internal)
### Ollama example
### Max async calls for LLM
# MAX_ASYNC=4
### Optional Timeout for LLM
# TIMEOUT=150 # Time out in seconds, None for infinite timeout
### Settings for RAG query
# HISTORY_TURNS=3
# COSINE_THRESHOLD=0.2
# TOP_K=60
# MAX_TOKEN_TEXT_CHUNK=4000
# MAX_TOKEN_RELATION_DESC=4000
# MAX_TOKEN_ENTITY_DESC=4000
### Settings for document indexing
# CHUNK_SIZE=1200
# CHUNK_OVERLAP_SIZE=100
# MAX_TOKENS=32768 # Max tokens send to LLM for summarization
# MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary
# SUMMARY_LANGUAGE=English
# MAX_EMBED_TOKENS=8192
### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
LLM_BINDING=ollama
LLM_BINDING_HOST=http://host.docker.internal:11434
LLM_MODEL=mistral-nemo:latest
LLM_BINDING_API_KEY=your_api_key
### Ollama example
LLM_BINDING_HOST=http://localhost:11434
### OpenAI alike example
# LLM_BINDING=openai
# LLM_MODEL=deepseek-chat
# LLM_BINDING_HOST=https://api.deepseek.com
# LLM_MODEL=gpt-4o
# LLM_BINDING_HOST=https://api.openai.com/v1
# LLM_BINDING_API_KEY=your_api_key
### lollms example
# LLM_BINDING=lollms
# LLM_MODEL=mistral-nemo:latest
# LLM_BINDING_HOST=http://localhost:9600
# LLM_BINDING_API_KEY=your_api_key
### for OpenAI LLM (LLM_BINDING_API_KEY take priority)
# OPENAI_API_KEY=your_api_key
### Lollms example
# LLM_BINDING=lollms
# LLM_BINDING_HOST=http://host.docker.internal:9600
# LLM_MODEL=mistral-nemo:latest
### Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)
# Ollama example
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://host.docker.internal:11434
### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
EMBEDDING_MODEL=bge-m3:latest
EMBEDDING_DIM=1024
# EMBEDDING_BINDING_API_KEY=your_api_key
### ollama example
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://localhost:11434
### OpenAI alike example
# EMBEDDING_BINDING=openai
# LLM_BINDING_HOST=https://api.openai.com/v1
### Lollms example
# EMBEDDING_BINDING=lollms
# EMBEDDING_BINDING_HOST=http://host.docker.internal:9600
# EMBEDDING_MODEL=bge-m3:latest
# EMBEDDING_BINDING_HOST=http://localhost:9600
### Optional for Azure (LLM_BINDING_HOST, LLM_BINDING_API_KEY take priority)
# AZURE_OPENAI_API_VERSION=2024-08-01-preview
# AZURE_OPENAI_DEPLOYMENT=gpt-4o
# AZURE_OPENAI_API_KEY=myapikey
# AZURE_OPENAI_API_KEY=your_api_key
# AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
# AZURE_EMBEDDING_API_VERSION=2023-05-15
### Data storage selection
# LIGHTRAG_KV_STORAGE=PGKVStorage
# LIGHTRAG_VECTOR_STORAGE=PGVectorStorage
# LIGHTRAG_GRAPH_STORAGE=PGGraphStorage
# LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
LIGHTRAG_KV_STORAGE=JsonKVStorage
LIGHTRAG_VECTOR_STORAGE=NanoVectorDBStorage
LIGHTRAG_GRAPH_STORAGE=NetworkXStorage
LIGHTRAG_DOC_STATUS_STORAGE=JsonDocStatusStorage
### Oracle Database Configuration
ORACLE_DSN=localhost:1521/XEPDB1
@@ -138,4 +137,4 @@ MONGODB_GRAPH=false # deprecated (keep for backward compatibility)
### Qdrant
QDRANT_URL=http://localhost:16333
QDRANT_API_KEY=your-api-key # 可选
# QDRANT_API_KEY=your-api-key

View File

@@ -1,14 +1,14 @@
## Install with API Support
## Install LightRAG as an API Server
LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways:
LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG API Server in two ways:
### 1. Installation from PyPI
### Installation from PyPI
```bash
pip install "lightrag-hku[api]"
```
### 2. Installation from Source (Development)
### Installation from Source (Development)
```bash
# Clone the repository
@@ -22,33 +22,80 @@ cd lightrag
pip install -e ".[api]"
```
### Prerequisites
### Starting API Server with Default Settings
LightRAG requires both LLM and Embedding Model to work together to complete document indexing and querying tasks. LightRAG supports binding to various LLM/Embedding backends:
* ollama
* lollms
* openai & openai compatible
* azure_openai
Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding.
The new api allows you to mix different bindings for llm/embeddings.
For example, you have the possibility to use ollama for the embedding and openai for the llm.
The LightRAG API Server provides default parameters for LLM and Embedding, allowing users to easily start the service through command line. These default configurations are:
#### For LoLLMs Server
- LoLLMs must be running and accessible
- Default connection: http://localhost:9600
- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port
* Default endpoint of LLM/Embeding backend(LLM_BINDING_HOST or EMBEDDING_BINDING_HOST)
#### For Ollama Server
- Ollama must be running and accessible
- Requires environment variables setup or command line argument provided
- Environment variables: LLM_BINDING=ollama, LLM_BINDING_HOST, LLM_MODEL
- Command line arguments: --llm-binding=ollama, --llm-binding-host, --llm-model
- Default connection is http://localhost:11434 if not priveded
```
# for lollms backend
LLM_BINDING_HOST=http://localhost:11434
EMBEDDING_BINDING_HOST=http://localhost:11434
> The default MAX_TOKENS(num_ctx) for Ollama is 32768. If your Ollama server is lacking or GPU memory, set it to a lower value.
# for lollms backend
LLM_BINDING_HOST=http://localhost:9600
EMBEDDING_BINDING_HOST=http://localhost:9600
#### For OpenAI Alike Server
- Requires environment variables setup or command line argument provided
- Environment variables: LLM_BINDING=ollama, LLM_BINDING_HOST, LLM_MODEL, LLM_BINDING_API_KEY
- Command line arguments: --llm-binding=ollama, --llm-binding-host, --llm-model, --llm-binding-api-key
- Default connection is https://api.openai.com/v1 if not priveded
# for openai, openai compatible or azure openai backend
LLM_BINDING_HOST=https://api.openai.com/v1
EMBEDDING_BINDING_HOST=http://localhost:9600
```
#### For Azure OpenAI Server
* Default model config
```
LLM_MODEL=mistral-nemo:latest
EMBEDDING_MODEL=bge-m3:latest
EMBEDDING_DIM=1024
MAX_EMBED_TOKENS=8192
```
* API keys for LLM/Embedding backend
When connecting to backend require API KEY, corresponding environment variables must be provided:
```
LLM_BINDING_API_KEY=your_api_key
EMBEDDING_BINDING_API_KEY=your_api_key
```
* Use command line arguments to choose LLM/Embeding backend
Use `--llm-binding` to select LLM backend type, and use `--embedding-binding` to select the embedding backend type. All the supported backend types are:
```
openai: LLM default type
ollama: Embedding defult type
lollms:
azure_openai:
openai-ollama: select openai for LLM and ollama for embedding(only valid for --llm-binding)
```
The LightRAG API Server allows you to mix different bindings for llm/embeddings. For example, you have the possibility to use ollama for the embedding and openai for the llm.With the above default parameters, you can start API Server with simple CLI arguments like these:
```
# start with openai llm and ollama embedding
LLM_BINDING_API_KEY=your_api_key Light_server
LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai-ollama
# start with openai llm and openai embedding
LLM_BINDING_API_KEY=your_api_key Light_server --llm-binding openai --embedding-binding openai
# start with ollama llm and ollama embedding (no apikey is needed)
Light_server --llm-binding ollama --embedding-binding ollama
```
### For Azure OpenAI Backend
Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)):
```bash
# Change the resource group name, location and OpenAI resource name as needed
@@ -68,13 +115,18 @@ az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_
The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file.
```
# Azure OpenAI Configuration in .env
LLM_BINDING=azure_openai
LLM_BINDING_HOST=endpoint_of_azure_ai
LLM_MODEL=model_name_of_azure_ai
LLM_BINDING_API_KEY=api_key_of_azure_ai
LLM_BINDING_HOST=your-azure-endpoint
LLM_MODEL=your-model-deployment-name
LLM_BINDING_API_KEY=your-azure-api-key
AZURE_OPENAI_API_VERSION=2024-08-01-preview # optional, defaults to latest version
EMBEDDING_BINDING=azure_openai # if using Azure OpenAI for embeddings
EMBEDDING_MODEL=your-embedding-deployment-name
```
### 3. Install Lightrag as a Linux Service
### Install Lightrag as a Linux Service
Create a your service file `lightrag.sevice` from the sample file : `lightrag.sevice.example`. Modified the WorkingDirectoryand EexecStart in the service file:
@@ -105,40 +157,36 @@ sudo systemctl status lightrag.service
sudo systemctl enable lightrag.service
```
### Automatic Document Indexing
When starting any of the servers with the `--auto-scan-at-startup` parameter, the system will automatically:
## Configuration
1. Scan for new files in the input directory
2. Indexing new documents that aren't already in the database
3. Make all content immediately available for RAG queries
LightRAG can be configured using either command-line arguments or environment variables. When both are provided, command-line arguments take precedence over environment variables.
> The `--input-dir` parameter specify the input directory to scan for.
Default `TOP_K` is set to `60`. Default `COSINE_THRESHOLD` are set to `0.2`.
## API Server Configuration
### Environment Variables
API Server can be config in three way (highest priority first):
You can configure LightRAG using environment variables by creating a `.env` file in your project root directory. A sample file `.env.example` is provided for your convenience.
* Command line arguments
* Enviroment variables or .env file
* Config.ini (Only for storage configuration)
### Config.ini
Most of the configurations come with a default settings, check out details in sample file: `.env.example`. Datastorage configuration can be also set by config.ini. A sample file `config.ini.example` is provided for your convenience.
Datastorage configuration can be also set by config.ini. A sample file `config.ini.example` is provided for your convenience.
### LLM and Embedding Backend Supported
### Configuration Priority
LightRAG supports binding to various LLM/Embedding backends:
The configuration values are loaded in the following order (highest priority first):
1. Command-line arguments
2. Environment variables
3. Config.ini
4. Defaul values
* ollama
* lollms
* openai & openai compatible
* azure_openai
For example:
```bash
# This command-line argument will override both the environment variable and default value
python lightrag.py --port 8080
# The environment variable will override the default value but not the command-line argument
PORT=7000 python lightrag.py
```
> Best practices: you can set your database setting in Config.ini while testing, and you use .env for production.
Use environment variables `LLM_BINDING ` or CLI argument `--llm-binding` to select LLM backend type. Use environment variables `EMBEDDING_BINDING ` or CLI argument `--embedding-binding` to select LLM backend type.
### Storage Types Supported
@@ -199,7 +247,16 @@ MongoDocStatusStorage MongoDB
### How Select Storage Implementation
You can select storage implementation by enviroment variables or command line arguments. You can not change storage implementation selection after you add documents to LightRAG. Data migration from one storage implementation to anthor is not supported yet. For further information please read the sample env file or config.ini file.
You can select storage implementation by environment variables. Your can set the following environmental variables to a specific storage implement-name before the your first start of the API Server:
```
LIGHTRAG_KV_STORAGE=PGKVStorage
LIGHTRAG_VECTOR_STORAGE=PGVectorStorage
LIGHTRAG_GRAPH_STORAGE=PGGraphStorage
LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
```
You can not change storage implementation selection after you add documents to LightRAG. Data migration from one storage implementation to anthor is not supported yet. For further information please read the sample env file or config.ini file.
### LightRag API Server Comand Line Options
@@ -207,32 +264,22 @@ You can select storage implementation by enviroment variables or command line a
|-----------|---------|-------------|
| --host | 0.0.0.0 | Server host |
| --port | 9621 | Server port |
| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai |
| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --llm-model | mistral-nemo:latest | LLM model name |
| --llm-binding-api-key | None | API Key for OpenAI Alike LLM |
| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai |
| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --embedding-model | bge-m3:latest | Embedding model name |
| --working-dir | ./rag_storage | Working directory for RAG storage |
| --input-dir | ./inputs | Directory containing input documents |
| --max-async | 4 | Maximum async operations |
| --max-tokens | 32768 | Maximum token size |
| --embedding-dim | 1024 | Embedding dimensions |
| --max-embed-tokens | 8192 | Maximum embedding token size |
| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout |
| --timeout | 150 | Timeout in seconds. None for infinite timeout(not recommended) |
| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) |
| --verbose | False | Verbose debug output (True, Flase) |
| --verbose | - | Verbose debug output (True, Flase) |
| --key | None | API key for authentication. Protects lightrag server against unauthorized access |
| --ssl | False | Enable HTTPS |
| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) |
| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) |
| --top-k | 50 | Number of top-k items to retrieve; corresponds to entities in "local" mode and relationships in "global" mode. |
| --cosine-threshold | 0.4 | The cossine threshold for nodes and relations retrieval, works with top-k to control the retrieval of nodes and relations. |
| --kv-storage | JsonKVStorage | implement-name of KV_STORAGE |
| --graph-storage | NetworkXStorage | implement-name of GRAPH_STORAGE |
| --vector-storage | NanoVectorDBStorage | implement-name of VECTOR_STORAGE |
| --doc-status-storage | JsonDocStatusStorage | implement-name of DOC_STATUS_STORAGE |
| --llm-binding | ollama | LLM binding type (lollms, ollama, openai, openai-ollama, azure_openai) |
| --embedding-binding | ollama | Embedding binding type (lollms, ollama, openai, azure_openai) |
| auto-scan-at-startup | - | Scan input directory for new files and start indexing |
### Example Usage
@@ -244,57 +291,49 @@ Ollama is the default backend for both llm and embedding, so by default you can
# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding
lightrag-server
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and ollama for embedding
lightrag-server --llm-binding lollms
```
#### Running a Lightrag server with lollms default local server as llm and embedding backends
```bash
# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding
lightrag-server --llm-binding lollms --embedding-binding lollms
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024
# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding
# Configure LLM_BINDING=lollms and EMBEDDING_BINDING=lollms in .env or config.ini
lightrag-server
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
```
#### Running a Lightrag server with openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Run lightrag with openai, GPT-4o-mini for llm, and text-embedding-3-small for embedding
# Configure in .env or config.ini:
# LLM_BINDING=openai
# LLM_MODEL=GPT-4o-mini
# EMBEDDING_BINDING=openai
# EMBEDDING_MODEL=text-embedding-3-small
lightrag-server
# Using an authentication key
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
lightrag-server --key my-key
```
#### Running a Lightrag server with azure openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Run lightrag with azure_openai
# Configure in .env or config.ini:
# LLM_BINDING=azure_openai
# LLM_MODEL=your-model
# EMBEDDING_BINDING=azure_openai
# EMBEDDING_MODEL=your-embedding-model
lightrag-server
# Using an authentication key
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and azure_openai for embedding
lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small
lightrag-server --key my-key
```
**Important Notes:**
@@ -315,7 +354,18 @@ pip install lightrag-hku
## API Endpoints
All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality.
All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. When API Server is running, visit:
- Swagger UI: http://localhost:9621/docs
- ReDoc: http://localhost:9621/redoc
You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to:
1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI)
2. Start the RAG server
3. Upload some documents using the document management endpoints
4. Query the system using the query endpoints
5. Trigger document scan if new files is put into inputs directory
### Query Endpoints
@@ -452,63 +502,3 @@ A query prefix in the query string can determines which LightRAG query mode is u
For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。
"/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix.
## Development
Contribute to the project: [Guide](contributor-readme.MD)
### Running in Development Mode
For LoLLMs:
```bash
uvicorn lollms_lightrag_server:app --reload --port 9621
```
For Ollama:
```bash
uvicorn ollama_lightrag_server:app --reload --port 9621
```
For OpenAI:
```bash
uvicorn openai_lightrag_server:app --reload --port 9621
```
For Azure OpenAI:
```bash
uvicorn azure_openai_lightrag_server:app --reload --port 9621
```
### API Documentation
When any server is running, visit:
- Swagger UI: http://localhost:9621/docs
- ReDoc: http://localhost:9621/redoc
### Testing API Endpoints
You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to:
1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI)
2. Start the RAG server
3. Upload some documents using the document management endpoints
4. Query the system using the query endpoints
5. Trigger document scan if new files is put into inputs directory
### Important Features
#### Automatic Document Vectorization
When starting any of the servers with the `--input-dir` parameter, the system will automatically:
1. Check for existing vectorized content in the database
2. Only vectorize new documents that aren't already in the database
3. Make all content immediately available for RAG queries
This intelligent caching mechanism:
- Prevents unnecessary re-vectorization of existing documents
- Reduces startup time for subsequent runs
- Preserves system resources
- Maintains consistency across restarts
**Important Notes:**
- The `--input-dir` parameter enables automatic document processing at startup
- Documents already in the database are not re-vectorized
- Only new documents in the input directory will be processed
- This optimization significantly reduces startup time for subsequent runs
- The working directory (`--working-dir`) stores the vectorized documents database

View File

@@ -19,20 +19,17 @@ from ascii_colors import ASCIIColors
from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from dotenv import load_dotenv
from .utils_api import (
get_api_key_dependency,
parse_args,
get_default_host,
display_splash_screen,
)
from lightrag import LightRAG
from lightrag.types import GPTKeywordExtractionFormat
from lightrag.api import __api_version__
from lightrag.utils import EmbeddingFunc
from lightrag.utils import logger
from .routers.document_routes import (
DocumentManager,
create_document_routes,
@@ -68,6 +65,38 @@ scan_progress: Dict = {
progress_lock = threading.Lock()
class AccessLogFilter(logging.Filter):
def __init__(self):
super().__init__()
# Define paths to be filtered
self.filtered_paths = ["/documents", "/health", "/webui/"]
def filter(self, record):
try:
if not hasattr(record, "args") or not isinstance(record.args, tuple):
return True
if len(record.args) < 5:
return True
method = record.args[1]
path = record.args[2]
status = record.args[4]
# print(f"Debug - Method: {method}, Path: {path}, Status: {status}")
# print(f"Debug - Filtered paths: {self.filtered_paths}")
if (
method == "GET"
and (status == 200 or status == 304)
and path in self.filtered_paths
):
return False
return True
except Exception:
return True
def create_app(args):
# Set global top_k
global global_top_k
@@ -152,6 +181,8 @@ def create_app(args):
"Skip document scanning(another scanning is active)"
)
ASCIIColors.green("\nServer is ready to accept connections! 🚀\n")
yield
finally:
@@ -285,7 +316,7 @@ def create_app(args):
)
# Initialize RAG
if args.llm_binding in ["lollms", "ollama", "openai-ollama"]:
if args.llm_binding in ["lollms", "ollama", "openai"]:
rag = LightRAG(
working_dir=args.working_dir,
llm_model_func=lollms_model_complete
@@ -324,12 +355,10 @@ def create_app(args):
namespace_prefix=args.namespace_prefix,
auto_manage_storages_states=False,
)
else:
else: # azure_openai
rag = LightRAG(
working_dir=args.working_dir,
llm_model_func=azure_openai_model_complete
if args.llm_binding == "azure_openai"
else openai_alike_model_complete,
llm_model_func=azure_openai_model_complete,
chunk_token_size=int(args.chunk_size),
chunk_overlap_token_size=int(args.chunk_overlap_size),
llm_model_kwargs={
@@ -409,6 +438,38 @@ def create_app(args):
def main():
args = parse_args()
import uvicorn
import logging.config
# Configure uvicorn logging
logging.config.dictConfig(
{
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(levelname)s: %(message)s",
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
},
"loggers": {
"uvicorn.access": {
"handlers": ["default"],
"level": "INFO",
"propagate": False,
},
},
}
)
# Add filter to uvicorn access logger
uvicorn_access_logger = logging.getLogger("uvicorn.access")
uvicorn_access_logger.addFilter(AccessLogFilter())
app = create_app(args)
display_splash_screen(args)
@@ -416,6 +477,7 @@ def main():
"app": app,
"host": args.host,
"port": args.port,
"log_config": None, # Disable default config
}
if args.ssl:
uvicorn_config.update(

View File

@@ -161,7 +161,7 @@ class DocumentManager:
"""Scan input directory for new files"""
new_files = []
for ext in self.supported_extensions:
logging.info(f"Scanning for {ext} files in {self.input_dir}")
logging.debug(f"Scanning for {ext} files in {self.input_dir}")
for file_path in self.input_dir.rglob(f"*{ext}"):
if file_path not in self.indexed_files:
new_files.append(file_path)

View File

@@ -161,8 +161,6 @@ def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60):
"""
try:
param = request.to_query_params(False)
if param.top_k is None:
param.top_k = top_k
response = await rag.aquery(request.query, param=param)
# If response is a string (e.g. cache hit), return directly
@@ -192,8 +190,6 @@ def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60):
"""
try:
param = request.to_query_params(True)
if param.top_k is None:
param.top_k = top_k
response = await rag.aquery(request.query, param=param)
from fastapi.responses import StreamingResponse

View File

@@ -122,47 +122,6 @@ def parse_args() -> argparse.Namespace:
description="LightRAG FastAPI Server with separate working and input directories"
)
parser.add_argument(
"--kv-storage",
default=get_env_value(
"LIGHTRAG_KV_STORAGE", DefaultRAGStorageConfig.KV_STORAGE
),
help=f"KV storage implementation (default: {DefaultRAGStorageConfig.KV_STORAGE})",
)
parser.add_argument(
"--doc-status-storage",
default=get_env_value(
"LIGHTRAG_DOC_STATUS_STORAGE", DefaultRAGStorageConfig.DOC_STATUS_STORAGE
),
help=f"Document status storage implementation (default: {DefaultRAGStorageConfig.DOC_STATUS_STORAGE})",
)
parser.add_argument(
"--graph-storage",
default=get_env_value(
"LIGHTRAG_GRAPH_STORAGE", DefaultRAGStorageConfig.GRAPH_STORAGE
),
help=f"Graph storage implementation (default: {DefaultRAGStorageConfig.GRAPH_STORAGE})",
)
parser.add_argument(
"--vector-storage",
default=get_env_value(
"LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE
),
help=f"Vector storage implementation (default: {DefaultRAGStorageConfig.VECTOR_STORAGE})",
)
# Bindings configuration
parser.add_argument(
"--llm-binding",
default=get_env_value("LLM_BINDING", "ollama"),
help="LLM binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)",
)
parser.add_argument(
"--embedding-binding",
default=get_env_value("EMBEDDING_BINDING", "ollama"),
help="Embedding binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)",
)
# Server configuration
parser.add_argument(
"--host",
@@ -188,66 +147,9 @@ def parse_args() -> argparse.Namespace:
help="Directory containing input documents (default: from env or ./inputs)",
)
# LLM Model configuration
parser.add_argument(
"--llm-binding-host",
default=get_env_value("LLM_BINDING_HOST", None),
help="LLM server host URL. If not provided, defaults based on llm-binding:\n"
+ "- ollama: http://localhost:11434\n"
+ "- lollms: http://localhost:9600\n"
+ "- openai: https://api.openai.com/v1",
)
default_llm_api_key = get_env_value("LLM_BINDING_API_KEY", None)
parser.add_argument(
"--llm-binding-api-key",
default=default_llm_api_key,
help="llm server API key (default: from env or empty string)",
)
parser.add_argument(
"--llm-model",
default=get_env_value("LLM_MODEL", "mistral-nemo:latest"),
help="LLM model name (default: from env or mistral-nemo:latest)",
)
# Embedding model configuration
parser.add_argument(
"--embedding-binding-host",
default=get_env_value("EMBEDDING_BINDING_HOST", None),
help="Embedding server host URL. If not provided, defaults based on embedding-binding:\n"
+ "- ollama: http://localhost:11434\n"
+ "- lollms: http://localhost:9600\n"
+ "- openai: https://api.openai.com/v1",
)
default_embedding_api_key = get_env_value("EMBEDDING_BINDING_API_KEY", "")
parser.add_argument(
"--embedding-binding-api-key",
default=default_embedding_api_key,
help="embedding server API key (default: from env or empty string)",
)
parser.add_argument(
"--embedding-model",
default=get_env_value("EMBEDDING_MODEL", "bge-m3:latest"),
help="Embedding model name (default: from env or bge-m3:latest)",
)
parser.add_argument(
"--chunk_size",
default=get_env_value("CHUNK_SIZE", 1200),
help="chunk chunk size default 1200",
)
parser.add_argument(
"--chunk_overlap_size",
default=get_env_value("CHUNK_OVERLAP_SIZE", 100),
help="chunk overlap size default 100",
)
def timeout_type(value):
if value is None:
return 150
if value is None or value == "None":
return None
return int(value)
@@ -272,18 +174,6 @@ def parse_args() -> argparse.Namespace:
default=get_env_value("MAX_TOKENS", 32768, int),
help="Maximum token size (default: from env or 32768)",
)
parser.add_argument(
"--embedding-dim",
type=int,
default=get_env_value("EMBEDDING_DIM", 1024, int),
help="Embedding dimensions (default: from env or 1024)",
)
parser.add_argument(
"--max-embed-tokens",
type=int,
default=get_env_value("MAX_EMBED_TOKENS", 8192, int),
help="Maximum embedding token size (default: from env or 8192)",
)
# Logging configuration
parser.add_argument(
@@ -292,6 +182,12 @@ def parse_args() -> argparse.Namespace:
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level (default: from env or INFO)",
)
parser.add_argument(
"--verbose",
action="store_true",
default=get_env_value("VERBOSE", False, bool),
help="Enable verbose debug output(only valid for DEBUG log-level)",
)
parser.add_argument(
"--key",
@@ -317,12 +213,6 @@ def parse_args() -> argparse.Namespace:
default=get_env_value("SSL_KEYFILE", None),
help="Path to SSL private key file (required if --ssl is enabled)",
)
parser.add_argument(
"--auto-scan-at-startup",
action="store_true",
default=False,
help="Enable automatic scanning when the program starts",
)
parser.add_argument(
"--history-turns",
@@ -364,10 +254,26 @@ def parse_args() -> argparse.Namespace:
)
parser.add_argument(
"--verbose",
type=bool,
default=get_env_value("VERBOSE", False, bool),
help="Verbose debug output(default: from env or false)",
"--auto-scan-at-startup",
action="store_true",
default=False,
help="Enable automatic scanning when the program starts",
)
# LLM and embedding bindings
parser.add_argument(
"--llm-binding",
type=str,
default=get_env_value("LLM_BINDING", "ollama"),
choices=["lollms", "ollama", "openai", "openai-ollama", "azure_openai"],
help="LLM binding type (default: from env or ollama)",
)
parser.add_argument(
"--embedding-binding",
type=str,
default=get_env_value("EMBEDDING_BINDING", "ollama"),
choices=["lollms", "ollama", "openai", "azure_openai"],
help="Embedding binding type (default: from env or ollama)",
)
args = parser.parse_args()
@@ -376,6 +282,44 @@ def parse_args() -> argparse.Namespace:
args.working_dir = os.path.abspath(args.working_dir)
args.input_dir = os.path.abspath(args.input_dir)
# Inject storage configuration from environment variables
args.kv_storage = get_env_value(
"LIGHTRAG_KV_STORAGE", DefaultRAGStorageConfig.KV_STORAGE
)
args.doc_status_storage = get_env_value(
"LIGHTRAG_DOC_STATUS_STORAGE", DefaultRAGStorageConfig.DOC_STATUS_STORAGE
)
args.graph_storage = get_env_value(
"LIGHTRAG_GRAPH_STORAGE", DefaultRAGStorageConfig.GRAPH_STORAGE
)
args.vector_storage = get_env_value(
"LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE
)
# Handle openai-ollama special case
if args.llm_binding == "openai-ollama":
args.llm_binding = "openai"
args.embedding_binding = "ollama"
args.llm_binding_host = get_env_value(
"LLM_BINDING_HOST", get_default_host(args.llm_binding)
)
args.embedding_binding_host = get_env_value(
"EMBEDDING_BINDING_HOST", get_default_host(args.embedding_binding)
)
args.llm_binding_api_key = get_env_value("LLM_BINDING_API_KEY", None)
args.embedding_binding_api_key = get_env_value("EMBEDDING_BINDING_API_KEY", "")
# Inject model configuration
args.llm_model = get_env_value("LLM_MODEL", "mistral-nemo:latest")
args.embedding_model = get_env_value("EMBEDDING_MODEL", "bge-m3:latest")
args.embedding_dim = get_env_value("EMBEDDING_DIM", 1024, int)
args.max_embed_tokens = get_env_value("MAX_EMBED_TOKENS", 8192, int)
# Inject chunk configuration
args.chunk_size = get_env_value("CHUNK_SIZE", 1200, int)
args.chunk_overlap_size = get_env_value("CHUNK_OVERLAP_SIZE", 100, int)
ollama_server_infos.LIGHTRAG_MODEL = args.simulated_model_name
return args
@@ -548,7 +492,5 @@ def display_splash_screen(args: argparse.Namespace) -> None:
Make sure to include the X-API-Key header in all your requests.
""")
ASCIIColors.green("Server is ready to accept connections! 🚀\n")
# Ensure splash output flush to system log
sys.stdout.flush()

View File

@@ -48,6 +48,10 @@ from .utils import (
set_logger,
)
from .types import KnowledgeGraph
from dotenv import load_dotenv
# Load environment variables
load_dotenv(override=True)
# TODO: TO REMOVE @Yannick
config = configparser.ConfigParser()
@@ -473,6 +477,11 @@ class LightRAG:
storage_class = lazy_external_import(import_path, storage_name)
return storage_class
@staticmethod
def clean_text(text: str) -> str:
"""Clean text by removing null bytes (0x00) and whitespace"""
return text.strip().replace("\x00", "")
def insert(
self,
input: str | list[str],
@@ -524,8 +533,13 @@ class LightRAG:
) -> None:
update_storage = False
try:
doc_key = compute_mdhash_id(full_text.strip(), prefix="doc-")
new_docs = {doc_key: {"content": full_text.strip()}}
# Clean input texts
full_text = self.clean_text(full_text)
text_chunks = [self.clean_text(chunk) for chunk in text_chunks]
# Process cleaned texts
doc_key = compute_mdhash_id(full_text, prefix="doc-")
new_docs = {doc_key: {"content": full_text}}
_add_doc_keys = await self.full_docs.filter_keys({doc_key})
new_docs = {k: v for k, v in new_docs.items() if k in _add_doc_keys}
@@ -538,11 +552,10 @@ class LightRAG:
inserting_chunks: dict[str, Any] = {}
for chunk_text in text_chunks:
chunk_text_stripped = chunk_text.strip()
chunk_key = compute_mdhash_id(chunk_text_stripped, prefix="chunk-")
chunk_key = compute_mdhash_id(chunk_text, prefix="chunk-")
inserting_chunks[chunk_key] = {
"content": chunk_text_stripped,
"content": chunk_text,
"full_doc_id": doc_key,
}
@@ -593,13 +606,12 @@ class LightRAG:
raise ValueError("IDs must be unique")
# Generate contents dict of IDs provided by user and documents
contents = {id_: doc.strip() for id_, doc in zip(ids, input)}
contents = {id_: doc for id_, doc in zip(ids, input)}
else:
# Clean input text and remove duplicates
input = list(set(self.clean_text(doc) for doc in input))
# Generate contents dict of MD5 hash IDs and documents
contents = {
compute_mdhash_id(doc.strip(), prefix="doc-"): doc.strip()
for doc in input
}
contents = {compute_mdhash_id(doc, prefix="doc-"): doc for doc in input}
# 2. Remove duplicate contents
unique_contents = {
@@ -807,7 +819,7 @@ class LightRAG:
all_chunks_data: dict[str, dict[str, str]] = {}
chunk_to_source_map: dict[str, str] = {}
for chunk_data in custom_kg.get("chunks", {}):
chunk_content = chunk_data["content"].strip()
chunk_content = self.clean_text(chunk_data["content"])
source_id = chunk_data["source_id"]
tokens = len(
encode_string_by_tiktoken(

View File

@@ -5,6 +5,7 @@ import json
import re
from typing import Any, AsyncIterator
from collections import Counter, defaultdict
from .utils import (
logger,
clean_str,
@@ -23,6 +24,7 @@ from .utils import (
CacheData,
statistic_data,
get_conversation_turns,
verbose_debug,
)
from .base import (
BaseGraphStorage,
@@ -33,6 +35,10 @@ from .base import (
)
from .prompt import GRAPH_FIELD_SEP, PROMPTS
import time
from dotenv import load_dotenv
# Load environment variables
load_dotenv(override=True)
def chunking_by_token_size(
@@ -295,7 +301,7 @@ async def _merge_edges_then_upsert(
node_data={
"source_id": source_id,
"description": description,
"entity_type": '"UNKNOWN"',
"entity_type": "UNKNOWN",
},
)
description = await _handle_entity_relation_summary(
@@ -375,9 +381,8 @@ async def extract_entities(
continue_prompt = PROMPTS["entiti_continue_extraction"]
if_loop_prompt = PROMPTS["entiti_if_loop_extraction"]
already_processed = 0
already_entities = 0
already_relations = 0
processed_chunks = 0
total_chunks = len(ordered_chunks)
async def _user_llm_func_with_cache(
input_text: str, history_messages: list[dict[str, str]] = None
@@ -431,7 +436,7 @@ async def extract_entities(
chunk_key_dp (tuple[str, TextChunkSchema]):
("chunck-xxxxxx", {"tokens": int, "content": str, "full_doc_id": str, "chunk_order_index": int})
"""
nonlocal already_processed, already_entities, already_relations
nonlocal processed_chunks
chunk_key = chunk_key_dp[0]
chunk_dp = chunk_key_dp[1]
content = chunk_dp["content"]
@@ -488,12 +493,11 @@ async def extract_entities(
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(
if_relation
)
already_processed += 1
already_entities += len(maybe_nodes)
already_relations += len(maybe_edges)
logger.debug(
f"Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
processed_chunks += 1
entities_count = len(maybe_nodes)
relations_count = len(maybe_edges)
logger.info(
f" Chunk {processed_chunks}/{total_chunks}: extracted {entities_count} entities and {relations_count} relationships (deduplicated)"
)
return dict(maybe_nodes), dict(maybe_edges)
@@ -532,8 +536,12 @@ async def extract_entities(
logger.info("Didn't extract any relationships")
logger.info(
f"New entities or relationships extracted, entities:{all_entities_data}, relationships:{all_relationships_data}"
f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships (deduplicated)"
)
verbose_debug(
f"New entities:{all_entities_data}, relationships:{all_relationships_data}"
)
verbose_debug(f"New relationships:{all_relationships_data}")
if entity_vdb is not None:
data_for_vdb = {

View File

@@ -15,8 +15,11 @@ from typing import Any, Callable
import xml.etree.ElementTree as ET
import numpy as np
import tiktoken
from lightrag.prompt import PROMPTS
from dotenv import load_dotenv
# Load environment variables
load_dotenv(override=True)
VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true"
@@ -25,10 +28,26 @@ VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true"
def verbose_debug(msg: str, *args, **kwargs):
"""Function for outputting detailed debug information.
When VERBOSE_DEBUG=True, outputs the complete message.
When VERBOSE_DEBUG=False, outputs only the first 30 characters.
When VERBOSE_DEBUG=False, outputs only the first 50 characters.
Args:
msg: The message format string
*args: Arguments to be formatted into the message
**kwargs: Keyword arguments passed to logger.debug()
"""
if VERBOSE_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
# Format the message with args first
if args:
formatted_msg = msg % args
else:
formatted_msg = msg
# Then truncate the formatted message
truncated_msg = (
formatted_msg[:50] + "..." if len(formatted_msg) > 50 else formatted_msg
)
logger.debug(truncated_msg, **kwargs)
def set_verbose_debug(enabled: bool):