Merge branch 'main' into linux-service

This commit is contained in:
yangdx
2025-01-17 19:58:33 +08:00
11 changed files with 1383 additions and 379 deletions

60
.env.example Normal file
View File

@@ -0,0 +1,60 @@
# Server Configuration
HOST=0.0.0.0
PORT=9621
# Directory Configuration
WORKING_DIR=/app/data/rag_storage
INPUT_DIR=/app/data/inputs
# LLM Configuration (Use valid host. For local services, you can use host.docker.internal)
# Ollama example
LLM_BINDING=ollama
LLM_BINDING_HOST=http://host.docker.internal:11434
LLM_MODEL=mistral-nemo:latest
# Lollms example
LLM_BINDING=lollms
LLM_BINDING_HOST=http://host.docker.internal:9600
LLM_MODEL=mistral-nemo:latest
# Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)
# Ollama example
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://host.docker.internal:11434
EMBEDDING_MODEL=bge-m3:latest
# Lollms example
EMBEDDING_BINDING=lollms
EMBEDDING_BINDING_HOST=http://host.docker.internal:9600
EMBEDDING_MODEL=bge-m3:latest
# RAG Configuration
MAX_ASYNC=4
MAX_TOKENS=32768
EMBEDDING_DIM=1024
MAX_EMBED_TOKENS=8192
# Security (empty for no key)
LIGHTRAG_API_KEY=your-secure-api-key-here
# Logging
LOG_LEVEL=INFO
# Optional SSL Configuration
#SSL=true
#SSL_CERTFILE=/path/to/cert.pem
#SSL_KEYFILE=/path/to/key.pem
# Optional Timeout
#TIMEOUT=30
# Optional for Azure
# AZURE_OPENAI_API_VERSION=2024-08-01-preview
# AZURE_OPENAI_DEPLOYMENT=gpt-4o
# AZURE_OPENAI_API_KEY=myapikey
# AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
# AZURE_EMBEDDING_API_VERSION=2023-05-15

41
Dockerfile Normal file
View File

@@ -0,0 +1,41 @@
# Build stage
FROM python:3.11-slim as builder
WORKDIR /app
# Install build dependencies
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
&& rm -rf /var/lib/apt/lists/*
# Copy only requirements files first to leverage Docker cache
COPY requirements.txt .
COPY lightrag/api/requirements.txt ./lightrag/api/
# Install dependencies
RUN pip install --user --no-cache-dir -r requirements.txt
RUN pip install --user --no-cache-dir -r lightrag/api/requirements.txt
# Final stage
FROM python:3.11-slim
WORKDIR /app
# Copy only necessary files from builder
COPY --from=builder /root/.local /root/.local
COPY ./lightrag ./lightrag
COPY setup.py .
COPY .env .
RUN pip install .
# Make sure scripts in .local are usable
ENV PATH=/root/.local/bin:$PATH
# Create necessary directories
RUN mkdir -p /app/data/rag_storage /app/data/inputs
# Expose the default port
EXPOSE 9621
# Set entrypoint
ENTRYPOINT ["python", "-m", "lightrag.api.lightrag_server"]

338
README.md
View File

@@ -922,342 +922,10 @@ def extract_queries(file_path):
``` ```
</details> </details>
## Install with API Support ## API
LightRag can be installed with API support to serve a Fast api interface to perform data upload and indexing/Rag operations/Rescan of the input folder etc..
LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways: The documentation can be found [https://github.com/ParisNeo/LightRAG/blob/main/docs/LightRagAPI.md](here)
### 1. Installation from PyPI
```bash
pip install "lightrag-hku[api]"
```
### 2. Installation from Source (Development)
```bash
# Clone the repository
git clone https://github.com/HKUDS/lightrag.git
# Change to the repository directory
cd lightrag
# Install in editable mode with API support
pip install -e ".[api]"
```
### Prerequisites
Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding.
The new api allows you to mix different bindings for llm/embeddings.
For example, you have the possibility to use ollama for the embedding and openai for the llm.
#### For LoLLMs Server
- LoLLMs must be running and accessible
- Default connection: http://localhost:9600
- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port
#### For Ollama Server
- Ollama must be running and accessible
- Default connection: http://localhost:11434
- Configure using --ollama-host if running on a different host/port
#### For OpenAI Server
- Requires valid OpenAI API credentials set in environment variables
- OPENAI_API_KEY must be set
#### For Azure OpenAI Server
Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)):
```bash
# Change the resource group name, location and OpenAI resource name as needed
RESOURCE_GROUP_NAME=LightRAG
LOCATION=swedencentral
RESOURCE_NAME=LightRAG-OpenAI
az login
az group create --name $RESOURCE_GROUP_NAME --location $LOCATION
az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral
az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard"
az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard"
az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint"
az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME
```
The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file.
### Configuration Options
Each server has its own specific configuration options:
#### LightRag Server Options
| Parameter | Default | Description |
|-----------|---------|-------------|
| --host | 0.0.0.0 | Server host |
| --port | 9621 | Server port |
| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai |
| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --llm-model | mistral-nemo:latest | LLM model name |
| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai |
| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --embedding-model | bge-m3:latest | Embedding model name |
| --working-dir | ./rag_storage | Working directory for RAG storage |
| --input-dir | ./inputs | Directory containing input documents |
| --max-async | 4 | Maximum async operations |
| --max-tokens | 32768 | Maximum token size |
| --embedding-dim | 1024 | Embedding dimensions |
| --max-embed-tokens | 8192 | Maximum embedding token size |
| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout |
| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) |
| --key | None | API key for authentication. Protects lightrag server against unauthorized access |
| --ssl | False | Enable HTTPS |
| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) |
| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) |
For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`.
### Example Usage
#### Running a Lightrag server with ollama default local server as llm and embedding backends
Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama.
```bash
# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding
lightrag-server
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and ollama for embedding
lightrag-server --llm-binding lollms
```
#### Running a Lightrag server with lollms default local server as llm and embedding backends
```bash
# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding
lightrag-server --llm-binding lollms --embedding-binding lollms
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
```
#### Running a Lightrag server with openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Using an authentication key
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
```
#### Running a Lightrag server with azure openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Using an authentication key
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and azure_openai for embedding
lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small
```
**Important Notes:**
- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance
- For Ollama: Make sure the specified models are installed in your Ollama instance
- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable
- For Azure OpenAI: Build and configure your server as stated in the Prequisites section
For help on any server, use the --help flag:
```bash
lightrag-server --help
```
Note: If you don't need the API functionality, you can install the base package without API support using:
```bash
pip install lightrag-hku
```
## API Endpoints
All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality.
### Query Endpoints
#### POST /query
Query the RAG system with options for different search modes.
```bash
curl -X POST "http://localhost:9621/query" \
-H "Content-Type: application/json" \
-d '{"query": "Your question here", "mode": "hybrid", ""}'
```
#### POST /query/stream
Stream responses from the RAG system.
```bash
curl -X POST "http://localhost:9621/query/stream" \
-H "Content-Type: application/json" \
-d '{"query": "Your question here", "mode": "hybrid"}'
```
### Document Management Endpoints
#### POST /documents/text
Insert text directly into the RAG system.
```bash
curl -X POST "http://localhost:9621/documents/text" \
-H "Content-Type: application/json" \
-d '{"text": "Your text content here", "description": "Optional description"}'
```
#### POST /documents/file
Upload a single file to the RAG system.
```bash
curl -X POST "http://localhost:9621/documents/file" \
-F "file=@/path/to/your/document.txt" \
-F "description=Optional description"
```
#### POST /documents/batch
Upload multiple files at once.
```bash
curl -X POST "http://localhost:9621/documents/batch" \
-F "files=@/path/to/doc1.txt" \
-F "files=@/path/to/doc2.txt"
```
#### DELETE /documents
Clear all documents from the RAG system.
```bash
curl -X DELETE "http://localhost:9621/documents"
```
### Utility Endpoints
#### GET /health
Check server health and configuration.
```bash
curl "http://localhost:9621/health"
```
## Development
Contribute to the project: [Guide](contributor-readme.MD)
### Running in Development Mode
For LoLLMs:
```bash
uvicorn lollms_lightrag_server:app --reload --port 9621
```
For Ollama:
```bash
uvicorn ollama_lightrag_server:app --reload --port 9621
```
For OpenAI:
```bash
uvicorn openai_lightrag_server:app --reload --port 9621
```
For Azure OpenAI:
```bash
uvicorn azure_openai_lightrag_server:app --reload --port 9621
```
### API Documentation
When any server is running, visit:
- Swagger UI: http://localhost:9621/docs
- ReDoc: http://localhost:9621/redoc
### Testing API Endpoints
You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to:
1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI)
2. Start the RAG server
3. Upload some documents using the document management endpoints
4. Query the system using the query endpoints
### Important Features
#### Automatic Document Vectorization
When starting any of the servers with the `--input-dir` parameter, the system will automatically:
1. Scan the specified directory for documents
2. Check for existing vectorized content in the database
3. Only vectorize new documents that aren't already in the database
4. Make all content immediately available for RAG queries
This intelligent caching mechanism:
- Prevents unnecessary re-vectorization of existing documents
- Reduces startup time for subsequent runs
- Preserves system resources
- Maintains consistency across restarts
### Example Usage
#### LoLLMs RAG Server
```bash
# Start server with automatic document vectorization
# Only new documents will be vectorized, existing ones will be loaded from cache
lollms-lightrag-server --input-dir ./my_documents --port 8080
```
#### Ollama RAG Server
```bash
# Start server with automatic document vectorization
# Previously vectorized documents will be loaded from the database
ollama-lightrag-server --input-dir ./my_documents --port 8080
```
#### OpenAI RAG Server
```bash
# Start server with automatic document vectorization
# Existing documents are retrieved from cache, only new ones are processed
openai-lightrag-server --input-dir ./my_documents --port 9624
```
#### Azure OpenAI RAG Server
```bash
# Start server with automatic document vectorization
# Existing documents are retrieved from cache, only new ones are processed
azure-openai-lightrag-server --input-dir ./my_documents --port 9624
```
**Important Notes:**
- The `--input-dir` parameter enables automatic document processing at startup
- Documents already in the database are not re-vectorized
- Only new documents in the input directory will be processed
- This optimization significantly reduces startup time for subsequent runs
- The working directory (`--working-dir`) stores the vectorized documents database
## Star History ## Star History

22
docker-compose.yml Normal file
View File

@@ -0,0 +1,22 @@
version: '3.8'
services:
lightrag:
build: .
ports:
- "${PORT:-9621}:9621"
volumes:
- ./data/rag_storage:/app/data/rag_storage
- ./data/inputs:/app/data/inputs
env_file:
- .env
environment:
- TZ=UTC
restart: unless-stopped
networks:
- lightrag_net
extra_hosts:
- "host.docker.internal:host-gateway"
networks:
lightrag_net:
driver: bridge

176
docs/DockerDeployment.md Normal file
View File

@@ -0,0 +1,176 @@
# LightRAG
A lightweight Knowledge Graph Retrieval-Augmented Generation system with multiple LLM backend support.
## 🚀 Installation
### Prerequisites
- Python 3.10+
- Git
- Docker (optional for Docker deployment)
### Native Installation
1. Clone the repository:
```bash
# Linux/MacOS
git clone https://github.com/ParisNeo/LightRAG.git
cd LightRAG
```
```powershell
# Windows PowerShell
git clone https://github.com/ParisNeo/LightRAG.git
cd LightRAG
```
2. Configure your environment:
```bash
# Linux/MacOS
cp .env.example .env
# Edit .env with your preferred configuration
```
```powershell
# Windows PowerShell
Copy-Item .env.example .env
# Edit .env with your preferred configuration
```
3. Create and activate virtual environment:
```bash
# Linux/MacOS
python -m venv venv
source venv/bin/activate
```
```powershell
# Windows PowerShell
python -m venv venv
.\venv\Scripts\Activate
```
4. Install dependencies:
```bash
# Both platforms
pip install -r requirements.txt
```
## 🐳 Docker Deployment
Docker instructions work the same on all platforms with Docker Desktop installed.
1. Build and start the container:
```bash
docker-compose up -d
```
### Configuration Options
LightRAG can be configured using environment variables in the `.env` file:
#### Server Configuration
- `HOST`: Server host (default: 0.0.0.0)
- `PORT`: Server port (default: 9621)
#### LLM Configuration
- `LLM_BINDING`: LLM backend to use (lollms/ollama/openai)
- `LLM_BINDING_HOST`: LLM server host URL
- `LLM_MODEL`: Model name to use
#### Embedding Configuration
- `EMBEDDING_BINDING`: Embedding backend (lollms/ollama/openai)
- `EMBEDDING_BINDING_HOST`: Embedding server host URL
- `EMBEDDING_MODEL`: Embedding model name
#### RAG Configuration
- `MAX_ASYNC`: Maximum async operations
- `MAX_TOKENS`: Maximum token size
- `EMBEDDING_DIM`: Embedding dimensions
- `MAX_EMBED_TOKENS`: Maximum embedding token size
#### Security
- `LIGHTRAG_API_KEY`: API key for authentication
### Data Storage Paths
The system uses the following paths for data storage:
```
data/
├── rag_storage/ # RAG data persistence
└── inputs/ # Input documents
```
### Example Deployments
1. Using with Ollama:
```env
LLM_BINDING=ollama
LLM_BINDING_HOST=http://host.docker.internal:11434
LLM_MODEL=mistral
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://host.docker.internal:11434
EMBEDDING_MODEL=bge-m3
```
you can't just use localhost from docker, that's why you need to use host.docker.internal which is defined in the docker compose file and should allow you to access the localhost services.
2. Using with OpenAI:
```env
LLM_BINDING=openai
LLM_MODEL=gpt-3.5-turbo
EMBEDDING_BINDING=openai
EMBEDDING_MODEL=text-embedding-ada-002
OPENAI_API_KEY=your-api-key
```
### API Usage
Once deployed, you can interact with the API at `http://localhost:9621`
Example query using PowerShell:
```powershell
$headers = @{
"X-API-Key" = "your-api-key"
"Content-Type" = "application/json"
}
$body = @{
query = "your question here"
} | ConvertTo-Json
Invoke-RestMethod -Uri "http://localhost:9621/query" -Method Post -Headers $headers -Body $body
```
Example query using curl:
```bash
curl -X POST "http://localhost:9621/query" \
-H "X-API-Key: your-api-key" \
-H "Content-Type: application/json" \
-d '{"query": "your question here"}'
```
## 🔒 Security
Remember to:
1. Set a strong API key in production
2. Use SSL in production environments
3. Configure proper network security
## 📦 Updates
To update the Docker container:
```bash
docker-compose pull
docker-compose up -d --build
```
To update native installation:
```bash
# Linux/MacOS
git pull
source venv/bin/activate
pip install -r requirements.txt
```
```powershell
# Windows PowerShell
git pull
.\venv\Scripts\Activate
pip install -r requirements.txt
```

361
docs/LightRagAPI.md Normal file
View File

@@ -0,0 +1,361 @@
## Install with API Support
LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways:
### 1. Installation from PyPI
```bash
pip install "lightrag-hku[api]"
```
### 2. Installation from Source (Development)
```bash
# Clone the repository
git clone https://github.com/HKUDS/lightrag.git
# Change to the repository directory
cd lightrag
# Install in editable mode with API support
pip install -e ".[api]"
```
### Prerequisites
Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding.
The new api allows you to mix different bindings for llm/embeddings.
For example, you have the possibility to use ollama for the embedding and openai for the llm.
#### For LoLLMs Server
- LoLLMs must be running and accessible
- Default connection: http://localhost:9600
- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port
#### For Ollama Server
- Ollama must be running and accessible
- Default connection: http://localhost:11434
- Configure using --ollama-host if running on a different host/port
#### For OpenAI Server
- Requires valid OpenAI API credentials set in environment variables
- OPENAI_API_KEY must be set
#### For Azure OpenAI Server
Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)):
```bash
# Change the resource group name, location and OpenAI resource name as needed
RESOURCE_GROUP_NAME=LightRAG
LOCATION=swedencentral
RESOURCE_NAME=LightRAG-OpenAI
az login
az group create --name $RESOURCE_GROUP_NAME --location $LOCATION
az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral
az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard"
az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard"
az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint"
az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME
```
The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file.
## Configuration
LightRAG can be configured using either command-line arguments or environment variables. When both are provided, command-line arguments take precedence over environment variables.
### Environment Variables
You can configure LightRAG using environment variables by creating a `.env` file in your project root directory. Here's a complete example of available environment variables:
```env
# Server Configuration
HOST=0.0.0.0
PORT=9621
# Directory Configuration
WORKING_DIR=/app/data/rag_storage
INPUT_DIR=/app/data/inputs
# LLM Configuration
LLM_BINDING=ollama
LLM_BINDING_HOST=http://localhost:11434
LLM_MODEL=mistral-nemo:latest
# Embedding Configuration
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://localhost:11434
EMBEDDING_MODEL=bge-m3:latest
# RAG Configuration
MAX_ASYNC=4
MAX_TOKENS=32768
EMBEDDING_DIM=1024
MAX_EMBED_TOKENS=8192
# Security
LIGHTRAG_API_KEY=
# Logging
LOG_LEVEL=INFO
# Optional SSL Configuration
#SSL=true
#SSL_CERTFILE=/path/to/cert.pem
#SSL_KEYFILE=/path/to/key.pem
# Optional Timeout
#TIMEOUT=30
```
### Configuration Priority
The configuration values are loaded in the following order (highest priority first):
1. Command-line arguments
2. Environment variables
3. Default values
For example:
```bash
# This command-line argument will override both the environment variable and default value
python lightrag.py --port 8080
# The environment variable will override the default value but not the command-line argument
PORT=7000 python lightrag.py
```
#### LightRag Server Options
| Parameter | Default | Description |
|-----------|---------|-------------|
| --host | 0.0.0.0 | Server host |
| --port | 9621 | Server port |
| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai |
| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --llm-model | mistral-nemo:latest | LLM model name |
| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai |
| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --embedding-model | bge-m3:latest | Embedding model name |
| --working-dir | ./rag_storage | Working directory for RAG storage |
| --input-dir | ./inputs | Directory containing input documents |
| --max-async | 4 | Maximum async operations |
| --max-tokens | 32768 | Maximum token size |
| --embedding-dim | 1024 | Embedding dimensions |
| --max-embed-tokens | 8192 | Maximum embedding token size |
| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout |
| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) |
| --key | None | API key for authentication. Protects lightrag server against unauthorized access |
| --ssl | False | Enable HTTPS |
| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) |
| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) |
For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`.
### Example Usage
#### Running a Lightrag server with ollama default local server as llm and embedding backends
Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama.
```bash
# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding
lightrag-server
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and ollama for embedding
lightrag-server --llm-binding lollms
```
#### Running a Lightrag server with lollms default local server as llm and embedding backends
```bash
# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding
lightrag-server --llm-binding lollms --embedding-binding lollms
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
```
#### Running a Lightrag server with openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Using an authentication key
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
```
#### Running a Lightrag server with azure openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Using an authentication key
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and azure_openai for embedding
lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small
```
**Important Notes:**
- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance
- For Ollama: Make sure the specified models are installed in your Ollama instance
- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable
- For Azure OpenAI: Build and configure your server as stated in the Prequisites section
For help on any server, use the --help flag:
```bash
lightrag-server --help
```
Note: If you don't need the API functionality, you can install the base package without API support using:
```bash
pip install lightrag-hku
```
## API Endpoints
All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality.
### Query Endpoints
#### POST /query
Query the RAG system with options for different search modes.
```bash
curl -X POST "http://localhost:9621/query" \
-H "Content-Type: application/json" \
-d '{"query": "Your question here", "mode": "hybrid", ""}'
```
#### POST /query/stream
Stream responses from the RAG system.
```bash
curl -X POST "http://localhost:9621/query/stream" \
-H "Content-Type: application/json" \
-d '{"query": "Your question here", "mode": "hybrid"}'
```
### Document Management Endpoints
#### POST /documents/text
Insert text directly into the RAG system.
```bash
curl -X POST "http://localhost:9621/documents/text" \
-H "Content-Type: application/json" \
-d '{"text": "Your text content here", "description": "Optional description"}'
```
#### POST /documents/file
Upload a single file to the RAG system.
```bash
curl -X POST "http://localhost:9621/documents/file" \
-F "file=@/path/to/your/document.txt" \
-F "description=Optional description"
```
#### POST /documents/batch
Upload multiple files at once.
```bash
curl -X POST "http://localhost:9621/documents/batch" \
-F "files=@/path/to/doc1.txt" \
-F "files=@/path/to/doc2.txt"
```
#### DELETE /documents
Clear all documents from the RAG system.
```bash
curl -X DELETE "http://localhost:9621/documents"
```
### Utility Endpoints
#### GET /health
Check server health and configuration.
```bash
curl "http://localhost:9621/health"
```
## Development
Contribute to the project: [Guide](contributor-readme.MD)
### Running in Development Mode
For LoLLMs:
```bash
uvicorn lollms_lightrag_server:app --reload --port 9621
```
For Ollama:
```bash
uvicorn ollama_lightrag_server:app --reload --port 9621
```
For OpenAI:
```bash
uvicorn openai_lightrag_server:app --reload --port 9621
```
For Azure OpenAI:
```bash
uvicorn azure_openai_lightrag_server:app --reload --port 9621
```
### API Documentation
When any server is running, visit:
- Swagger UI: http://localhost:9621/docs
- ReDoc: http://localhost:9621/redoc
### Testing API Endpoints
You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to:
1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI)
2. Start the RAG server
3. Upload some documents using the document management endpoints
4. Query the system using the query endpoints
### Important Features
#### Automatic Document Vectorization
When starting any of the servers with the `--input-dir` parameter, the system will automatically:
1. Scan the specified directory for documents
2. Check for existing vectorized content in the database
3. Only vectorize new documents that aren't already in the database
4. Make all content immediately available for RAG queries
This intelligent caching mechanism:
- Prevents unnecessary re-vectorization of existing documents
- Reduces startup time for subsequent runs
- Preserves system resources
- Maintains consistency across restarts
**Important Notes:**
- The `--input-dir` parameter enables automatic document processing at startup
- Documents already in the database are not re-vectorized
- Only new documents in the input directory will be processed
- This optimization significantly reduces startup time for subsequent runs
- The working directory (`--working-dir`) stores the vectorized documents database

View File

@@ -0,0 +1,115 @@
## API 服务器实现
LightRAG also provides a FastAPI-based server implementation for RESTful API access to RAG operations. This allows you to run LightRAG as a service and interact with it through HTTP requests.
LightRAG 还提供基于 FastAPI 的服务器实现,用于对 RAG 操作进行 RESTful API 访问。这允许您将 LightRAG 作为服务运行并通过 HTTP 请求与其交互。
### 设置 API 服务器
<details>
<summary>单击展开设置说明</summary>
1. 首先,确保您具有所需的依赖项:
```bash
pip install fastapi uvicorn pydantic
```
2. 设置您的环境变量:
```bash
export RAG_DIR="your_index_directory" # Optional: Defaults to "index_default"
export OPENAI_BASE_URL="Your OpenAI API base URL" # Optional: Defaults to "https://api.openai.com/v1"
export OPENAI_API_KEY="Your OpenAI API key" # Required
export LLM_MODEL="Your LLM model" # Optional: Defaults to "gpt-4o-mini"
export EMBEDDING_MODEL="Your embedding model" # Optional: Defaults to "text-embedding-3-large"
```
3. 运行API服务器:
```bash
python examples/lightrag_api_openai_compatible_demo.py
```
服务器将启动于 `http://0.0.0.0:8020`.
</details>
### API端点
API服务器提供以下端点:
#### 1. 查询端点
<details>
<summary>点击查看查询端点详情</summary>
- **URL:** `/query`
- **Method:** POST
- **Body:**
```json
{
"query": "Your question here",
"mode": "hybrid", // Can be "naive", "local", "global", or "hybrid"
"only_need_context": true // Optional: Defaults to false, if true, only the referenced context will be returned, otherwise the llm answer will be returned
}
```
- **Example:**
```bash
curl -X POST "http://127.0.0.1:8020/query" \
-H "Content-Type: application/json" \
-d '{"query": "What are the main themes?", "mode": "hybrid"}'
```
</details>
#### 2. 插入文本端点
<details>
<summary>单击可查看插入文本端点详细信息</summary>
- **URL:** `/insert`
- **Method:** POST
- **Body:**
```json
{
"text": "Your text content here"
}
```
- **Example:**
```bash
curl -X POST "http://127.0.0.1:8020/insert" \
-H "Content-Type: application/json" \
-d '{"text": "Content to be inserted into RAG"}'
```
</details>
#### 3. 插入文件端点
<details>
<summary>单击查看插入文件端点详细信息</summary>
- **URL:** `/insert_file`
- **Method:** POST
- **Body:**
```json
{
"file_path": "path/to/your/file.txt"
}
```
- **Example:**
```bash
curl -X POST "http://127.0.0.1:8020/insert_file" \
-H "Content-Type: application/json" \
-d '{"file_path": "./book.txt"}'
```
</details>
#### 4. 健康检查端点
<details>
<summary>点击查看健康检查端点详细信息</summary>
- **URL:** `/health`
- **Method:** GET
- **Example:**
```bash
curl -X GET "http://127.0.0.1:8020/health"
```
</details>
### 配置
可以使用环境变量配置API服务器:
- `RAG_DIR`: 存放RAG索引的目录 (default: "index_default")
- 应在代码中为您的特定 LLM 和嵌入模型提供商配置 API 密钥和基本 URL

361
lightrag/api/README.md Normal file
View File

@@ -0,0 +1,361 @@
## Install with API Support
LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways:
### 1. Installation from PyPI
```bash
pip install "lightrag-hku[api]"
```
### 2. Installation from Source (Development)
```bash
# Clone the repository
git clone https://github.com/HKUDS/lightrag.git
# Change to the repository directory
cd lightrag
# Install in editable mode with API support
pip install -e ".[api]"
```
### Prerequisites
Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding.
The new api allows you to mix different bindings for llm/embeddings.
For example, you have the possibility to use ollama for the embedding and openai for the llm.
#### For LoLLMs Server
- LoLLMs must be running and accessible
- Default connection: http://localhost:9600
- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port
#### For Ollama Server
- Ollama must be running and accessible
- Default connection: http://localhost:11434
- Configure using --ollama-host if running on a different host/port
#### For OpenAI Server
- Requires valid OpenAI API credentials set in environment variables
- OPENAI_API_KEY must be set
#### For Azure OpenAI Server
Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)):
```bash
# Change the resource group name, location and OpenAI resource name as needed
RESOURCE_GROUP_NAME=LightRAG
LOCATION=swedencentral
RESOURCE_NAME=LightRAG-OpenAI
az login
az group create --name $RESOURCE_GROUP_NAME --location $LOCATION
az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral
az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard"
az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard"
az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint"
az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME
```
The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file.
## Configuration
LightRAG can be configured using either command-line arguments or environment variables. When both are provided, command-line arguments take precedence over environment variables.
### Environment Variables
You can configure LightRAG using environment variables by creating a `.env` file in your project root directory. Here's a complete example of available environment variables:
```env
# Server Configuration
HOST=0.0.0.0
PORT=9621
# Directory Configuration
WORKING_DIR=/app/data/rag_storage
INPUT_DIR=/app/data/inputs
# LLM Configuration
LLM_BINDING=ollama
LLM_BINDING_HOST=http://localhost:11434
LLM_MODEL=mistral-nemo:latest
# Embedding Configuration
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://localhost:11434
EMBEDDING_MODEL=bge-m3:latest
# RAG Configuration
MAX_ASYNC=4
MAX_TOKENS=32768
EMBEDDING_DIM=1024
MAX_EMBED_TOKENS=8192
# Security
LIGHTRAG_API_KEY=
# Logging
LOG_LEVEL=INFO
# Optional SSL Configuration
#SSL=true
#SSL_CERTFILE=/path/to/cert.pem
#SSL_KEYFILE=/path/to/key.pem
# Optional Timeout
#TIMEOUT=30
```
### Configuration Priority
The configuration values are loaded in the following order (highest priority first):
1. Command-line arguments
2. Environment variables
3. Default values
For example:
```bash
# This command-line argument will override both the environment variable and default value
python lightrag.py --port 8080
# The environment variable will override the default value but not the command-line argument
PORT=7000 python lightrag.py
```
#### LightRag Server Options
| Parameter | Default | Description |
|-----------|---------|-------------|
| --host | 0.0.0.0 | Server host |
| --port | 9621 | Server port |
| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai |
| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --llm-model | mistral-nemo:latest | LLM model name |
| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai |
| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
| --embedding-model | bge-m3:latest | Embedding model name |
| --working-dir | ./rag_storage | Working directory for RAG storage |
| --input-dir | ./inputs | Directory containing input documents |
| --max-async | 4 | Maximum async operations |
| --max-tokens | 32768 | Maximum token size |
| --embedding-dim | 1024 | Embedding dimensions |
| --max-embed-tokens | 8192 | Maximum embedding token size |
| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout |
| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) |
| --key | None | API key for authentication. Protects lightrag server against unauthorized access |
| --ssl | False | Enable HTTPS |
| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) |
| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) |
For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`.
### Example Usage
#### Running a Lightrag server with ollama default local server as llm and embedding backends
Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama.
```bash
# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding
lightrag-server
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and ollama for embedding
lightrag-server --llm-binding lollms
```
#### Running a Lightrag server with lollms default local server as llm and embedding backends
```bash
# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding
lightrag-server --llm-binding lollms --embedding-binding lollms
# Using specific models (ensure they are installed in your ollama instance)
lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024
# Using an authentication key
lightrag-server --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
```
#### Running a Lightrag server with openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Using an authentication key
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and openai for embedding
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
```
#### Running a Lightrag server with azure openai server as llm and embedding backends
```bash
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
# Using an authentication key
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key
# Using lollms for llm and azure_openai for embedding
lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small
```
**Important Notes:**
- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance
- For Ollama: Make sure the specified models are installed in your Ollama instance
- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable
- For Azure OpenAI: Build and configure your server as stated in the Prequisites section
For help on any server, use the --help flag:
```bash
lightrag-server --help
```
Note: If you don't need the API functionality, you can install the base package without API support using:
```bash
pip install lightrag-hku
```
## API Endpoints
All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality.
### Query Endpoints
#### POST /query
Query the RAG system with options for different search modes.
```bash
curl -X POST "http://localhost:9621/query" \
-H "Content-Type: application/json" \
-d '{"query": "Your question here", "mode": "hybrid", ""}'
```
#### POST /query/stream
Stream responses from the RAG system.
```bash
curl -X POST "http://localhost:9621/query/stream" \
-H "Content-Type: application/json" \
-d '{"query": "Your question here", "mode": "hybrid"}'
```
### Document Management Endpoints
#### POST /documents/text
Insert text directly into the RAG system.
```bash
curl -X POST "http://localhost:9621/documents/text" \
-H "Content-Type: application/json" \
-d '{"text": "Your text content here", "description": "Optional description"}'
```
#### POST /documents/file
Upload a single file to the RAG system.
```bash
curl -X POST "http://localhost:9621/documents/file" \
-F "file=@/path/to/your/document.txt" \
-F "description=Optional description"
```
#### POST /documents/batch
Upload multiple files at once.
```bash
curl -X POST "http://localhost:9621/documents/batch" \
-F "files=@/path/to/doc1.txt" \
-F "files=@/path/to/doc2.txt"
```
#### DELETE /documents
Clear all documents from the RAG system.
```bash
curl -X DELETE "http://localhost:9621/documents"
```
### Utility Endpoints
#### GET /health
Check server health and configuration.
```bash
curl "http://localhost:9621/health"
```
## Development
Contribute to the project: [Guide](contributor-readme.MD)
### Running in Development Mode
For LoLLMs:
```bash
uvicorn lollms_lightrag_server:app --reload --port 9621
```
For Ollama:
```bash
uvicorn ollama_lightrag_server:app --reload --port 9621
```
For OpenAI:
```bash
uvicorn openai_lightrag_server:app --reload --port 9621
```
For Azure OpenAI:
```bash
uvicorn azure_openai_lightrag_server:app --reload --port 9621
```
### API Documentation
When any server is running, visit:
- Swagger UI: http://localhost:9621/docs
- ReDoc: http://localhost:9621/redoc
### Testing API Endpoints
You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to:
1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI)
2. Start the RAG server
3. Upload some documents using the document management endpoints
4. Query the system using the query endpoints
### Important Features
#### Automatic Document Vectorization
When starting any of the servers with the `--input-dir` parameter, the system will automatically:
1. Scan the specified directory for documents
2. Check for existing vectorized content in the database
3. Only vectorize new documents that aren't already in the database
4. Make all content immediately available for RAG queries
This intelligent caching mechanism:
- Prevents unnecessary re-vectorization of existing documents
- Reduces startup time for subsequent runs
- Preserves system resources
- Maintains consistency across restarts
**Important Notes:**
- The `--input-dir` parameter enables automatic document processing at startup
- Documents already in the database are not re-vectorized
- Only new documents in the input directory will be processed
- This optimization significantly reduces startup time for subsequent runs
- The working directory (`--working-dir`) stores the vectorized documents database

1
lightrag/api/__init__.py Normal file
View File

@@ -0,0 +1 @@
__api_version__ = "1.0.0"

View File

@@ -7,23 +7,27 @@ from lightrag.llm import lollms_model_complete, lollms_embed
from lightrag.llm import ollama_model_complete, ollama_embed from lightrag.llm import ollama_model_complete, ollama_embed
from lightrag.llm import openai_complete_if_cache, openai_embedding from lightrag.llm import openai_complete_if_cache, openai_embedding
from lightrag.llm import azure_openai_complete_if_cache, azure_openai_embedding from lightrag.llm import azure_openai_complete_if_cache, azure_openai_embedding
from lightrag.api import __api_version__
from lightrag.utils import EmbeddingFunc from lightrag.utils import EmbeddingFunc
from typing import Optional, List, Union from typing import Optional, List, Union, Any
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
import shutil import shutil
import aiofiles import aiofiles
from ascii_colors import trace_exception from ascii_colors import trace_exception, ASCIIColors
import os import os
from fastapi import Depends, Security from fastapi import Depends, Security
from fastapi.security import APIKeyHeader from fastapi.security import APIKeyHeader
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from contextlib import asynccontextmanager
from starlette.status import HTTP_403_FORBIDDEN from starlette.status import HTTP_403_FORBIDDEN
import pipmaster as pm import pipmaster as pm
from dotenv import load_dotenv
def get_default_host(binding_type: str) -> str: def get_default_host(binding_type: str) -> str:
default_hosts = { default_hosts = {
@@ -37,73 +41,256 @@ def get_default_host(binding_type: str) -> str:
) # fallback to ollama if unknown ) # fallback to ollama if unknown
def parse_args(): def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any:
"""
Get value from environment variable with type conversion
Args:
env_key (str): Environment variable key
default (Any): Default value if env variable is not set
value_type (type): Type to convert the value to
Returns:
Any: Converted value from environment or default
"""
value = os.getenv(env_key)
if value is None:
return default
if isinstance(value_type, bool):
return value.lower() in ("true", "1", "yes")
try:
return value_type(value)
except ValueError:
return default
def display_splash_screen(args: argparse.Namespace) -> None:
"""
Display a colorful splash screen showing LightRAG server configuration
Args:
args: Parsed command line arguments
"""
# Banner
ASCIIColors.cyan(f"""
╔══════════════════════════════════════════════════════════════╗
║ 🚀 LightRAG Server v{__api_version__}
║ Fast, Lightweight RAG Server Implementation ║
╚══════════════════════════════════════════════════════════════╝
""")
# Server Configuration
ASCIIColors.magenta("\n📡 Server Configuration:")
ASCIIColors.white(" ├─ Host: ", end="")
ASCIIColors.yellow(f"{args.host}")
ASCIIColors.white(" ├─ Port: ", end="")
ASCIIColors.yellow(f"{args.port}")
ASCIIColors.white(" ├─ SSL Enabled: ", end="")
ASCIIColors.yellow(f"{args.ssl}")
if args.ssl:
ASCIIColors.white(" ├─ SSL Cert: ", end="")
ASCIIColors.yellow(f"{args.ssl_certfile}")
ASCIIColors.white(" └─ SSL Key: ", end="")
ASCIIColors.yellow(f"{args.ssl_keyfile}")
# Directory Configuration
ASCIIColors.magenta("\n📂 Directory Configuration:")
ASCIIColors.white(" ├─ Working Directory: ", end="")
ASCIIColors.yellow(f"{args.working_dir}")
ASCIIColors.white(" └─ Input Directory: ", end="")
ASCIIColors.yellow(f"{args.input_dir}")
# LLM Configuration
ASCIIColors.magenta("\n🤖 LLM Configuration:")
ASCIIColors.white(" ├─ Binding: ", end="")
ASCIIColors.yellow(f"{args.llm_binding}")
ASCIIColors.white(" ├─ Host: ", end="")
ASCIIColors.yellow(f"{args.llm_binding_host}")
ASCIIColors.white(" └─ Model: ", end="")
ASCIIColors.yellow(f"{args.llm_model}")
# Embedding Configuration
ASCIIColors.magenta("\n📊 Embedding Configuration:")
ASCIIColors.white(" ├─ Binding: ", end="")
ASCIIColors.yellow(f"{args.embedding_binding}")
ASCIIColors.white(" ├─ Host: ", end="")
ASCIIColors.yellow(f"{args.embedding_binding_host}")
ASCIIColors.white(" ├─ Model: ", end="")
ASCIIColors.yellow(f"{args.embedding_model}")
ASCIIColors.white(" └─ Dimensions: ", end="")
ASCIIColors.yellow(f"{args.embedding_dim}")
# RAG Configuration
ASCIIColors.magenta("\n⚙️ RAG Configuration:")
ASCIIColors.white(" ├─ Max Async Operations: ", end="")
ASCIIColors.yellow(f"{args.max_async}")
ASCIIColors.white(" ├─ Max Tokens: ", end="")
ASCIIColors.yellow(f"{args.max_tokens}")
ASCIIColors.white(" └─ Max Embed Tokens: ", end="")
ASCIIColors.yellow(f"{args.max_embed_tokens}")
# System Configuration
ASCIIColors.magenta("\n🛠️ System Configuration:")
ASCIIColors.white(" ├─ Log Level: ", end="")
ASCIIColors.yellow(f"{args.log_level}")
ASCIIColors.white(" ├─ Timeout: ", end="")
ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}")
ASCIIColors.white(" └─ API Key: ", end="")
ASCIIColors.yellow("Set" if args.key else "Not Set")
# Server Status
ASCIIColors.green("\n✨ Server starting up...\n")
# Server Access Information
protocol = "https" if args.ssl else "http"
if args.host == "0.0.0.0":
ASCIIColors.magenta("\n🌐 Server Access Information:")
ASCIIColors.white(" ├─ Local Access: ", end="")
ASCIIColors.yellow(f"{protocol}://localhost:{args.port}")
ASCIIColors.white(" ├─ Remote Access: ", end="")
ASCIIColors.yellow(f"{protocol}://<your-ip-address>:{args.port}")
ASCIIColors.white(" ├─ API Documentation (local): ", end="")
ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/docs")
ASCIIColors.white(" └─ Alternative Documentation (local): ", end="")
ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/redoc")
ASCIIColors.yellow("\n📝 Note:")
ASCIIColors.white(""" Since the server is running on 0.0.0.0:
- Use 'localhost' or '127.0.0.1' for local access
- Use your machine's IP address for remote access
- To find your IP address:
• Windows: Run 'ipconfig' in terminal
• Linux/Mac: Run 'ifconfig' or 'ip addr' in terminal
""")
else:
base_url = f"{protocol}://{args.host}:{args.port}"
ASCIIColors.magenta("\n🌐 Server Access Information:")
ASCIIColors.white(" ├─ Base URL: ", end="")
ASCIIColors.yellow(f"{base_url}")
ASCIIColors.white(" ├─ API Documentation: ", end="")
ASCIIColors.yellow(f"{base_url}/docs")
ASCIIColors.white(" └─ Alternative Documentation: ", end="")
ASCIIColors.yellow(f"{base_url}/redoc")
# Usage Examples
ASCIIColors.magenta("\n📚 Quick Start Guide:")
ASCIIColors.cyan("""
1. Access the Swagger UI:
Open your browser and navigate to the API documentation URL above
2. API Authentication:""")
if args.key:
ASCIIColors.cyan(""" Add the following header to your requests:
X-API-Key: <your-api-key>
""")
else:
ASCIIColors.cyan(" No authentication required\n")
ASCIIColors.cyan(""" 3. Basic Operations:
- POST /upload_document: Upload new documents to RAG
- POST /query: Query your document collection
- GET /collections: List available collections
4. Monitor the server:
- Check server logs for detailed operation information
- Use healthcheck endpoint: GET /health
""")
# Security Notice
if args.key:
ASCIIColors.yellow("\n⚠️ Security Notice:")
ASCIIColors.white(""" API Key authentication is enabled.
Make sure to include the X-API-Key header in all your requests.
""")
ASCIIColors.green("Server is ready to accept connections! 🚀\n")
def parse_args() -> argparse.Namespace:
"""
Parse command line arguments with environment variable fallback
Returns:
argparse.Namespace: Parsed arguments
"""
# Load environment variables from .env file
load_dotenv()
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="LightRAG FastAPI Server with separate working and input directories" description="LightRAG FastAPI Server with separate working and input directories"
) )
# Start by the bindings # Bindings (with env var support)
parser.add_argument( parser.add_argument(
"--llm-binding", "--llm-binding",
default="ollama", default=get_env_value("LLM_BINDING", "ollama"),
help="LLM binding to be used. Supported: lollms, ollama, openai (default: ollama)", help="LLM binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)",
) )
parser.add_argument( parser.add_argument(
"--embedding-binding", "--embedding-binding",
default="ollama", default=get_env_value("EMBEDDING_BINDING", "ollama"),
help="Embedding binding to be used. Supported: lollms, ollama, openai (default: ollama)", help="Embedding binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)",
) )
# Parse just these arguments first # Parse temporary args for host defaults
temp_args, _ = parser.parse_known_args() temp_args, _ = parser.parse_known_args()
# Add remaining arguments with dynamic defaults for hosts
# Server configuration # Server configuration
parser.add_argument( parser.add_argument(
"--host", default="0.0.0.0", help="Server host (default: 0.0.0.0)" "--host",
default=get_env_value("HOST", "0.0.0.0"),
help="Server host (default: from env or 0.0.0.0)",
) )
parser.add_argument( parser.add_argument(
"--port", type=int, default=9621, help="Server port (default: 9621)" "--port",
type=int,
default=get_env_value("PORT", 9621, int),
help="Server port (default: from env or 9621)",
) )
# Directory configuration # Directory configuration
parser.add_argument( parser.add_argument(
"--working-dir", "--working-dir",
default="./rag_storage", default=get_env_value("WORKING_DIR", "./rag_storage"),
help="Working directory for RAG storage (default: ./rag_storage)", help="Working directory for RAG storage (default: from env or ./rag_storage)",
) )
parser.add_argument( parser.add_argument(
"--input-dir", "--input-dir",
default="./inputs", default=get_env_value("INPUT_DIR", "./inputs"),
help="Directory containing input documents (default: ./inputs)", help="Directory containing input documents (default: from env or ./inputs)",
) )
# LLM Model configuration # LLM Model configuration
default_llm_host = get_default_host(temp_args.llm_binding) default_llm_host = get_env_value(
"LLM_BINDING_HOST", get_default_host(temp_args.llm_binding)
)
parser.add_argument( parser.add_argument(
"--llm-binding-host", "--llm-binding-host",
default=default_llm_host, default=default_llm_host,
help=f"llm server host URL (default: {default_llm_host})", help=f"llm server host URL (default: from env or {default_llm_host})",
) )
parser.add_argument( parser.add_argument(
"--llm-model", "--llm-model",
default="mistral-nemo:latest", default=get_env_value("LLM_MODEL", "mistral-nemo:latest"),
help="LLM model name (default: mistral-nemo:latest)", help="LLM model name (default: from env or mistral-nemo:latest)",
) )
# Embedding model configuration # Embedding model configuration
default_embedding_host = get_default_host(temp_args.embedding_binding) default_embedding_host = get_env_value(
"EMBEDDING_BINDING_HOST", get_default_host(temp_args.embedding_binding)
)
parser.add_argument( parser.add_argument(
"--embedding-binding-host", "--embedding-binding-host",
default=default_embedding_host, default=default_embedding_host,
help=f"embedding server host URL (default: {default_embedding_host})", help=f"embedding server host URL (default: from env or {default_embedding_host})",
) )
parser.add_argument( parser.add_argument(
"--embedding-model", "--embedding-model",
default="bge-m3:latest", default=get_env_value("EMBEDDING_MODEL", "bge-m3:latest"),
help="Embedding model name (default: bge-m3:latest)", help="Embedding model name (default: from env or bge-m3:latest)",
) )
def timeout_type(value): def timeout_type(value):
@@ -113,63 +300,74 @@ def parse_args():
parser.add_argument( parser.add_argument(
"--timeout", "--timeout",
default=None, default=get_env_value("TIMEOUT", None, timeout_type),
type=timeout_type, type=timeout_type,
help="Timeout in seconds (useful when using slow AI). Use None for infinite timeout", help="Timeout in seconds (useful when using slow AI). Use None for infinite timeout",
) )
# RAG configuration # RAG configuration
parser.add_argument( parser.add_argument(
"--max-async", type=int, default=4, help="Maximum async operations (default: 4)" "--max-async",
type=int,
default=get_env_value("MAX_ASYNC", 4, int),
help="Maximum async operations (default: from env or 4)",
) )
parser.add_argument( parser.add_argument(
"--max-tokens", "--max-tokens",
type=int, type=int,
default=32768, default=get_env_value("MAX_TOKENS", 32768, int),
help="Maximum token size (default: 32768)", help="Maximum token size (default: from env or 32768)",
) )
parser.add_argument( parser.add_argument(
"--embedding-dim", "--embedding-dim",
type=int, type=int,
default=1024, default=get_env_value("EMBEDDING_DIM", 1024, int),
help="Embedding dimensions (default: 1024)", help="Embedding dimensions (default: from env or 1024)",
) )
parser.add_argument( parser.add_argument(
"--max-embed-tokens", "--max-embed-tokens",
type=int, type=int,
default=8192, default=get_env_value("MAX_EMBED_TOKENS", 8192, int),
help="Maximum embedding token size (default: 8192)", help="Maximum embedding token size (default: from env or 8192)",
) )
# Logging configuration # Logging configuration
parser.add_argument( parser.add_argument(
"--log-level", "--log-level",
default="INFO", default=get_env_value("LOG_LEVEL", "INFO"),
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
help="Logging level (default: INFO)", help="Logging level (default: from env or INFO)",
) )
parser.add_argument( parser.add_argument(
"--key", "--key",
type=str, type=str,
default=get_env_value("LIGHTRAG_API_KEY", None),
help="API key for authentication. This protects lightrag server against unauthorized access", help="API key for authentication. This protects lightrag server against unauthorized access",
default=None,
) )
# Optional https parameters # Optional https parameters
parser.add_argument( parser.add_argument(
"--ssl", action="store_true", help="Enable HTTPS (default: False)" "--ssl",
action="store_true",
default=get_env_value("SSL", False, bool),
help="Enable HTTPS (default: from env or False)",
) )
parser.add_argument( parser.add_argument(
"--ssl-certfile", "--ssl-certfile",
default=None, default=get_env_value("SSL_CERTFILE", None),
help="Path to SSL certificate file (required if --ssl is enabled)", help="Path to SSL certificate file (required if --ssl is enabled)",
) )
parser.add_argument( parser.add_argument(
"--ssl-keyfile", "--ssl-keyfile",
default=None, default=get_env_value("SSL_KEYFILE", None),
help="Path to SSL private key file (required if --ssl is enabled)", help="Path to SSL private key file (required if --ssl is enabled)",
) )
return parser.parse_args()
args = parser.parse_args()
display_splash_screen(args)
return args
class DocumentManager: class DocumentManager:
@@ -435,9 +633,10 @@ def create_app(args):
else: else:
logging.warning(f"No content extracted from file: {file_path}") logging.warning(f"No content extracted from file: {file_path}")
@app.on_event("startup") @asynccontextmanager
async def startup_event(): async def lifespan(app: FastAPI):
"""Index all files in input directory during startup""" """Lifespan context manager for startup and shutdown events"""
# Startup logic
try: try:
new_files = doc_manager.scan_directory() new_files = doc_manager.scan_directory()
for file_path in new_files: for file_path in new_files:
@@ -448,7 +647,6 @@ def create_app(args):
logging.error(f"Error indexing file {file_path}: {str(e)}") logging.error(f"Error indexing file {file_path}: {str(e)}")
logging.info(f"Indexed {len(new_files)} documents from {args.input_dir}") logging.info(f"Indexed {len(new_files)} documents from {args.input_dir}")
except Exception as e: except Exception as e:
logging.error(f"Error during startup indexing: {str(e)}") logging.error(f"Error during startup indexing: {str(e)}")
@@ -521,6 +719,7 @@ def create_app(args):
else: else:
return QueryResponse(response=response) return QueryResponse(response=response)
except Exception as e: except Exception as e:
trace_exception(e)
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@app.post("/query/stream", dependencies=[Depends(optional_api_key)]) @app.post("/query/stream", dependencies=[Depends(optional_api_key)])

View File

@@ -743,7 +743,7 @@ async def zhipu_complete_if_cache(
messages = [] messages = []
if not system_prompt: if not system_prompt:
system_prompt = "You are a helpful assistant. 注意内容里的敏感词用***替换。" system_prompt = "You are a helpful assistant. Note that sensitive words in the content should be replaced with ***"
# Add system prompt if provided # Add system prompt if provided
if system_prompt: if system_prompt: