From 2c3ff234e948860676d20eda38fd6c95b69db490 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Thu, 16 Jan 2025 22:14:16 +0100 Subject: [PATCH 01/15] Moving extended api documentation to new doc folder --- README.md | 335 ----------------------------------------- docs/LightRagAPI.md | 302 +++++++++++++++++++++++++++++++++++++ lightrag/api/README.md | 302 +++++++++++++++++++++++++++++++++++++ 3 files changed, 604 insertions(+), 335 deletions(-) create mode 100644 docs/LightRagAPI.md create mode 100644 lightrag/api/README.md diff --git a/README.md b/README.md index 8a0da666..e302d708 100644 --- a/README.md +++ b/README.md @@ -921,342 +921,7 @@ def extract_queries(file_path): ``` -## Install with API Support -LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways: - -### 1. Installation from PyPI - -```bash -pip install "lightrag-hku[api]" -``` - -### 2. Installation from Source (Development) - -```bash -# Clone the repository -git clone https://github.com/HKUDS/lightrag.git - -# Change to the repository directory -cd lightrag - -# Install in editable mode with API support -pip install -e ".[api]" -``` - -### Prerequisites - -Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding. -The new api allows you to mix different bindings for llm/embeddings. -For example, you have the possibility to use ollama for the embedding and openai for the llm. - -#### For LoLLMs Server -- LoLLMs must be running and accessible -- Default connection: http://localhost:9600 -- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port - -#### For Ollama Server -- Ollama must be running and accessible -- Default connection: http://localhost:11434 -- Configure using --ollama-host if running on a different host/port - -#### For OpenAI Server -- Requires valid OpenAI API credentials set in environment variables -- OPENAI_API_KEY must be set - -#### For Azure OpenAI Server -Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)): -```bash -# Change the resource group name, location and OpenAI resource name as needed -RESOURCE_GROUP_NAME=LightRAG -LOCATION=swedencentral -RESOURCE_NAME=LightRAG-OpenAI - -az login -az group create --name $RESOURCE_GROUP_NAME --location $LOCATION -az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral -az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard" -az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard" -az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint" -az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME - -``` -The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. - - - -### Configuration Options - -Each server has its own specific configuration options: - -#### LightRag Server Options - -| Parameter | Default | Description | -|-----------|---------|-------------| -| --host | 0.0.0.0 | Server host | -| --port | 9621 | Server port | -| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai | -| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | -| --llm-model | mistral-nemo:latest | LLM model name | -| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai | -| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | -| --embedding-model | bge-m3:latest | Embedding model name | -| --working-dir | ./rag_storage | Working directory for RAG storage | -| --input-dir | ./inputs | Directory containing input documents | -| --max-async | 4 | Maximum async operations | -| --max-tokens | 32768 | Maximum token size | -| --embedding-dim | 1024 | Embedding dimensions | -| --max-embed-tokens | 8192 | Maximum embedding token size | -| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout | -| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) | -| --key | None | API key for authentication. Protects lightrag server against unauthorized access | -| --ssl | False | Enable HTTPS | -| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) | -| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) | - - - -For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`. -### Example Usage - -#### Running a Lightrag server with ollama default local server as llm and embedding backends - -Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama. - -```bash -# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding -lightrag-server - -# Using specific models (ensure they are installed in your ollama instance) -lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024 - -# Using an authentication key -lightrag-server --key my-key - -# Using lollms for llm and ollama for embedding -lightrag-server --llm-binding lollms -``` - -#### Running a Lightrag server with lollms default local server as llm and embedding backends - -```bash -# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding -lightrag-server --llm-binding lollms --embedding-binding lollms - -# Using specific models (ensure they are installed in your ollama instance) -lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024 - -# Using an authentication key -lightrag-server --key my-key - -# Using lollms for llm and openai for embedding -lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small -``` - - -#### Running a Lightrag server with openai server as llm and embedding backends - -```bash -# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding -lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small - -# Using an authentication key -lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key - -# Using lollms for llm and openai for embedding -lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small -``` - -#### Running a Lightrag server with azure openai server as llm and embedding backends - -```bash -# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding -lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small - -# Using an authentication key -lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key - -# Using lollms for llm and azure_openai for embedding -lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small -``` - -**Important Notes:** -- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance -- For Ollama: Make sure the specified models are installed in your Ollama instance -- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable -- For Azure OpenAI: Build and configure your server as stated in the Prequisites section - -For help on any server, use the --help flag: -```bash -lightrag-server --help -``` - -Note: If you don't need the API functionality, you can install the base package without API support using: -```bash -pip install lightrag-hku -``` - -## API Endpoints - -All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. - -### Query Endpoints - -#### POST /query -Query the RAG system with options for different search modes. - -```bash -curl -X POST "http://localhost:9621/query" \ - -H "Content-Type: application/json" \ - -d '{"query": "Your question here", "mode": "hybrid", ""}' -``` - -#### POST /query/stream -Stream responses from the RAG system. - -```bash -curl -X POST "http://localhost:9621/query/stream" \ - -H "Content-Type: application/json" \ - -d '{"query": "Your question here", "mode": "hybrid"}' -``` - -### Document Management Endpoints - -#### POST /documents/text -Insert text directly into the RAG system. - -```bash -curl -X POST "http://localhost:9621/documents/text" \ - -H "Content-Type: application/json" \ - -d '{"text": "Your text content here", "description": "Optional description"}' -``` - -#### POST /documents/file -Upload a single file to the RAG system. - -```bash -curl -X POST "http://localhost:9621/documents/file" \ - -F "file=@/path/to/your/document.txt" \ - -F "description=Optional description" -``` - -#### POST /documents/batch -Upload multiple files at once. - -```bash -curl -X POST "http://localhost:9621/documents/batch" \ - -F "files=@/path/to/doc1.txt" \ - -F "files=@/path/to/doc2.txt" -``` - -#### DELETE /documents -Clear all documents from the RAG system. - -```bash -curl -X DELETE "http://localhost:9621/documents" -``` - -### Utility Endpoints - -#### GET /health -Check server health and configuration. - -```bash -curl "http://localhost:9621/health" -``` - -## Development -Contribute to the project: [Guide](contributor-readme.MD) - -### Running in Development Mode - -For LoLLMs: -```bash -uvicorn lollms_lightrag_server:app --reload --port 9621 -``` - -For Ollama: -```bash -uvicorn ollama_lightrag_server:app --reload --port 9621 -``` - -For OpenAI: -```bash -uvicorn openai_lightrag_server:app --reload --port 9621 -``` -For Azure OpenAI: -```bash -uvicorn azure_openai_lightrag_server:app --reload --port 9621 -``` -### API Documentation - -When any server is running, visit: -- Swagger UI: http://localhost:9621/docs -- ReDoc: http://localhost:9621/redoc - -### Testing API Endpoints - -You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to: -1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI) -2. Start the RAG server -3. Upload some documents using the document management endpoints -4. Query the system using the query endpoints - -### Important Features - -#### Automatic Document Vectorization -When starting any of the servers with the `--input-dir` parameter, the system will automatically: -1. Scan the specified directory for documents -2. Check for existing vectorized content in the database -3. Only vectorize new documents that aren't already in the database -4. Make all content immediately available for RAG queries - -This intelligent caching mechanism: -- Prevents unnecessary re-vectorization of existing documents -- Reduces startup time for subsequent runs -- Preserves system resources -- Maintains consistency across restarts - -### Example Usage - -#### LoLLMs RAG Server - -```bash -# Start server with automatic document vectorization -# Only new documents will be vectorized, existing ones will be loaded from cache -lollms-lightrag-server --input-dir ./my_documents --port 8080 -``` - -#### Ollama RAG Server - -```bash -# Start server with automatic document vectorization -# Previously vectorized documents will be loaded from the database -ollama-lightrag-server --input-dir ./my_documents --port 8080 -``` - -#### OpenAI RAG Server - -```bash -# Start server with automatic document vectorization -# Existing documents are retrieved from cache, only new ones are processed -openai-lightrag-server --input-dir ./my_documents --port 9624 -``` - -#### Azure OpenAI RAG Server - -```bash -# Start server with automatic document vectorization -# Existing documents are retrieved from cache, only new ones are processed -azure-openai-lightrag-server --input-dir ./my_documents --port 9624 -``` - -**Important Notes:** -- The `--input-dir` parameter enables automatic document processing at startup -- Documents already in the database are not re-vectorized -- Only new documents in the input directory will be processed -- This optimization significantly reduces startup time for subsequent runs -- The working directory (`--working-dir`) stores the vectorized documents database ## Star History diff --git a/docs/LightRagAPI.md b/docs/LightRagAPI.md new file mode 100644 index 00000000..588d6164 --- /dev/null +++ b/docs/LightRagAPI.md @@ -0,0 +1,302 @@ +## Install with API Support + +LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways: + +### 1. Installation from PyPI + +```bash +pip install "lightrag-hku[api]" +``` + +### 2. Installation from Source (Development) + +```bash +# Clone the repository +git clone https://github.com/HKUDS/lightrag.git + +# Change to the repository directory +cd lightrag + +# Install in editable mode with API support +pip install -e ".[api]" +``` + +### Prerequisites + +Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding. +The new api allows you to mix different bindings for llm/embeddings. +For example, you have the possibility to use ollama for the embedding and openai for the llm. + +#### For LoLLMs Server +- LoLLMs must be running and accessible +- Default connection: http://localhost:9600 +- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port + +#### For Ollama Server +- Ollama must be running and accessible +- Default connection: http://localhost:11434 +- Configure using --ollama-host if running on a different host/port + +#### For OpenAI Server +- Requires valid OpenAI API credentials set in environment variables +- OPENAI_API_KEY must be set + +#### For Azure OpenAI Server +Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)): +```bash +# Change the resource group name, location and OpenAI resource name as needed +RESOURCE_GROUP_NAME=LightRAG +LOCATION=swedencentral +RESOURCE_NAME=LightRAG-OpenAI + +az login +az group create --name $RESOURCE_GROUP_NAME --location $LOCATION +az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard" +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard" +az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint" +az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME + +``` +The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. + + + +### Configuration Options + +Each server has its own specific configuration options: + +#### LightRag Server Options + +| Parameter | Default | Description | +|-----------|---------|-------------| +| --host | 0.0.0.0 | Server host | +| --port | 9621 | Server port | +| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai | +| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | +| --llm-model | mistral-nemo:latest | LLM model name | +| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai | +| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | +| --embedding-model | bge-m3:latest | Embedding model name | +| --working-dir | ./rag_storage | Working directory for RAG storage | +| --input-dir | ./inputs | Directory containing input documents | +| --max-async | 4 | Maximum async operations | +| --max-tokens | 32768 | Maximum token size | +| --embedding-dim | 1024 | Embedding dimensions | +| --max-embed-tokens | 8192 | Maximum embedding token size | +| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout | +| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) | +| --key | None | API key for authentication. Protects lightrag server against unauthorized access | +| --ssl | False | Enable HTTPS | +| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) | +| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) | + + + +For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`. +### Example Usage + +#### Running a Lightrag server with ollama default local server as llm and embedding backends + +Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama. + +```bash +# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding +lightrag-server + +# Using specific models (ensure they are installed in your ollama instance) +lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024 + +# Using an authentication key +lightrag-server --key my-key + +# Using lollms for llm and ollama for embedding +lightrag-server --llm-binding lollms +``` + +#### Running a Lightrag server with lollms default local server as llm and embedding backends + +```bash +# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding +lightrag-server --llm-binding lollms --embedding-binding lollms + +# Using specific models (ensure they are installed in your ollama instance) +lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024 + +# Using an authentication key +lightrag-server --key my-key + +# Using lollms for llm and openai for embedding +lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small +``` + + +#### Running a Lightrag server with openai server as llm and embedding backends + +```bash +# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding +lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small + +# Using an authentication key +lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key + +# Using lollms for llm and openai for embedding +lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small +``` + +#### Running a Lightrag server with azure openai server as llm and embedding backends + +```bash +# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding +lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small + +# Using an authentication key +lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key + +# Using lollms for llm and azure_openai for embedding +lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small +``` + +**Important Notes:** +- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance +- For Ollama: Make sure the specified models are installed in your Ollama instance +- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable +- For Azure OpenAI: Build and configure your server as stated in the Prequisites section + +For help on any server, use the --help flag: +```bash +lightrag-server --help +``` + +Note: If you don't need the API functionality, you can install the base package without API support using: +```bash +pip install lightrag-hku +``` + +## API Endpoints + +All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. + +### Query Endpoints + +#### POST /query +Query the RAG system with options for different search modes. + +```bash +curl -X POST "http://localhost:9621/query" \ + -H "Content-Type: application/json" \ + -d '{"query": "Your question here", "mode": "hybrid", ""}' +``` + +#### POST /query/stream +Stream responses from the RAG system. + +```bash +curl -X POST "http://localhost:9621/query/stream" \ + -H "Content-Type: application/json" \ + -d '{"query": "Your question here", "mode": "hybrid"}' +``` + +### Document Management Endpoints + +#### POST /documents/text +Insert text directly into the RAG system. + +```bash +curl -X POST "http://localhost:9621/documents/text" \ + -H "Content-Type: application/json" \ + -d '{"text": "Your text content here", "description": "Optional description"}' +``` + +#### POST /documents/file +Upload a single file to the RAG system. + +```bash +curl -X POST "http://localhost:9621/documents/file" \ + -F "file=@/path/to/your/document.txt" \ + -F "description=Optional description" +``` + +#### POST /documents/batch +Upload multiple files at once. + +```bash +curl -X POST "http://localhost:9621/documents/batch" \ + -F "files=@/path/to/doc1.txt" \ + -F "files=@/path/to/doc2.txt" +``` + +#### DELETE /documents +Clear all documents from the RAG system. + +```bash +curl -X DELETE "http://localhost:9621/documents" +``` + +### Utility Endpoints + +#### GET /health +Check server health and configuration. + +```bash +curl "http://localhost:9621/health" +``` + +## Development +Contribute to the project: [Guide](contributor-readme.MD) + +### Running in Development Mode + +For LoLLMs: +```bash +uvicorn lollms_lightrag_server:app --reload --port 9621 +``` + +For Ollama: +```bash +uvicorn ollama_lightrag_server:app --reload --port 9621 +``` + +For OpenAI: +```bash +uvicorn openai_lightrag_server:app --reload --port 9621 +``` +For Azure OpenAI: +```bash +uvicorn azure_openai_lightrag_server:app --reload --port 9621 +``` +### API Documentation + +When any server is running, visit: +- Swagger UI: http://localhost:9621/docs +- ReDoc: http://localhost:9621/redoc + +### Testing API Endpoints + +You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to: +1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI) +2. Start the RAG server +3. Upload some documents using the document management endpoints +4. Query the system using the query endpoints + +### Important Features + +#### Automatic Document Vectorization +When starting any of the servers with the `--input-dir` parameter, the system will automatically: +1. Scan the specified directory for documents +2. Check for existing vectorized content in the database +3. Only vectorize new documents that aren't already in the database +4. Make all content immediately available for RAG queries + +This intelligent caching mechanism: +- Prevents unnecessary re-vectorization of existing documents +- Reduces startup time for subsequent runs +- Preserves system resources +- Maintains consistency across restarts + +**Important Notes:** +- The `--input-dir` parameter enables automatic document processing at startup +- Documents already in the database are not re-vectorized +- Only new documents in the input directory will be processed +- This optimization significantly reduces startup time for subsequent runs +- The working directory (`--working-dir`) stores the vectorized documents database \ No newline at end of file diff --git a/lightrag/api/README.md b/lightrag/api/README.md new file mode 100644 index 00000000..588d6164 --- /dev/null +++ b/lightrag/api/README.md @@ -0,0 +1,302 @@ +## Install with API Support + +LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways: + +### 1. Installation from PyPI + +```bash +pip install "lightrag-hku[api]" +``` + +### 2. Installation from Source (Development) + +```bash +# Clone the repository +git clone https://github.com/HKUDS/lightrag.git + +# Change to the repository directory +cd lightrag + +# Install in editable mode with API support +pip install -e ".[api]" +``` + +### Prerequisites + +Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding. +The new api allows you to mix different bindings for llm/embeddings. +For example, you have the possibility to use ollama for the embedding and openai for the llm. + +#### For LoLLMs Server +- LoLLMs must be running and accessible +- Default connection: http://localhost:9600 +- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port + +#### For Ollama Server +- Ollama must be running and accessible +- Default connection: http://localhost:11434 +- Configure using --ollama-host if running on a different host/port + +#### For OpenAI Server +- Requires valid OpenAI API credentials set in environment variables +- OPENAI_API_KEY must be set + +#### For Azure OpenAI Server +Azure OpenAI API can be created using the following commands in Azure CLI (you need to install Azure CLI first from [https://docs.microsoft.com/en-us/cli/azure/install-azure-cli](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli)): +```bash +# Change the resource group name, location and OpenAI resource name as needed +RESOURCE_GROUP_NAME=LightRAG +LOCATION=swedencentral +RESOURCE_NAME=LightRAG-OpenAI + +az login +az group create --name $RESOURCE_GROUP_NAME --location $LOCATION +az cognitiveservices account create --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --kind OpenAI --sku S0 --location swedencentral +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name gpt-4o --model-name gpt-4o --model-version "2024-08-06" --sku-capacity 100 --sku-name "Standard" +az cognitiveservices account deployment create --resource-group $RESOURCE_GROUP_NAME --model-format OpenAI --name $RESOURCE_NAME --deployment-name text-embedding-3-large --model-name text-embedding-3-large --model-version "1" --sku-capacity 80 --sku-name "Standard" +az cognitiveservices account show --name $RESOURCE_NAME --resource-group $RESOURCE_GROUP_NAME --query "properties.endpoint" +az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_NAME + +``` +The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. + + + +### Configuration Options + +Each server has its own specific configuration options: + +#### LightRag Server Options + +| Parameter | Default | Description | +|-----------|---------|-------------| +| --host | 0.0.0.0 | Server host | +| --port | 9621 | Server port | +| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai | +| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | +| --llm-model | mistral-nemo:latest | LLM model name | +| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai | +| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) | +| --embedding-model | bge-m3:latest | Embedding model name | +| --working-dir | ./rag_storage | Working directory for RAG storage | +| --input-dir | ./inputs | Directory containing input documents | +| --max-async | 4 | Maximum async operations | +| --max-tokens | 32768 | Maximum token size | +| --embedding-dim | 1024 | Embedding dimensions | +| --max-embed-tokens | 8192 | Maximum embedding token size | +| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout | +| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) | +| --key | None | API key for authentication. Protects lightrag server against unauthorized access | +| --ssl | False | Enable HTTPS | +| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) | +| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) | + + + +For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`. +### Example Usage + +#### Running a Lightrag server with ollama default local server as llm and embedding backends + +Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama. + +```bash +# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding +lightrag-server + +# Using specific models (ensure they are installed in your ollama instance) +lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024 + +# Using an authentication key +lightrag-server --key my-key + +# Using lollms for llm and ollama for embedding +lightrag-server --llm-binding lollms +``` + +#### Running a Lightrag server with lollms default local server as llm and embedding backends + +```bash +# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding +lightrag-server --llm-binding lollms --embedding-binding lollms + +# Using specific models (ensure they are installed in your ollama instance) +lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024 + +# Using an authentication key +lightrag-server --key my-key + +# Using lollms for llm and openai for embedding +lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small +``` + + +#### Running a Lightrag server with openai server as llm and embedding backends + +```bash +# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding +lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small + +# Using an authentication key +lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key + +# Using lollms for llm and openai for embedding +lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small +``` + +#### Running a Lightrag server with azure openai server as llm and embedding backends + +```bash +# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding +lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small + +# Using an authentication key +lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key + +# Using lollms for llm and azure_openai for embedding +lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small +``` + +**Important Notes:** +- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance +- For Ollama: Make sure the specified models are installed in your Ollama instance +- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable +- For Azure OpenAI: Build and configure your server as stated in the Prequisites section + +For help on any server, use the --help flag: +```bash +lightrag-server --help +``` + +Note: If you don't need the API functionality, you can install the base package without API support using: +```bash +pip install lightrag-hku +``` + +## API Endpoints + +All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. + +### Query Endpoints + +#### POST /query +Query the RAG system with options for different search modes. + +```bash +curl -X POST "http://localhost:9621/query" \ + -H "Content-Type: application/json" \ + -d '{"query": "Your question here", "mode": "hybrid", ""}' +``` + +#### POST /query/stream +Stream responses from the RAG system. + +```bash +curl -X POST "http://localhost:9621/query/stream" \ + -H "Content-Type: application/json" \ + -d '{"query": "Your question here", "mode": "hybrid"}' +``` + +### Document Management Endpoints + +#### POST /documents/text +Insert text directly into the RAG system. + +```bash +curl -X POST "http://localhost:9621/documents/text" \ + -H "Content-Type: application/json" \ + -d '{"text": "Your text content here", "description": "Optional description"}' +``` + +#### POST /documents/file +Upload a single file to the RAG system. + +```bash +curl -X POST "http://localhost:9621/documents/file" \ + -F "file=@/path/to/your/document.txt" \ + -F "description=Optional description" +``` + +#### POST /documents/batch +Upload multiple files at once. + +```bash +curl -X POST "http://localhost:9621/documents/batch" \ + -F "files=@/path/to/doc1.txt" \ + -F "files=@/path/to/doc2.txt" +``` + +#### DELETE /documents +Clear all documents from the RAG system. + +```bash +curl -X DELETE "http://localhost:9621/documents" +``` + +### Utility Endpoints + +#### GET /health +Check server health and configuration. + +```bash +curl "http://localhost:9621/health" +``` + +## Development +Contribute to the project: [Guide](contributor-readme.MD) + +### Running in Development Mode + +For LoLLMs: +```bash +uvicorn lollms_lightrag_server:app --reload --port 9621 +``` + +For Ollama: +```bash +uvicorn ollama_lightrag_server:app --reload --port 9621 +``` + +For OpenAI: +```bash +uvicorn openai_lightrag_server:app --reload --port 9621 +``` +For Azure OpenAI: +```bash +uvicorn azure_openai_lightrag_server:app --reload --port 9621 +``` +### API Documentation + +When any server is running, visit: +- Swagger UI: http://localhost:9621/docs +- ReDoc: http://localhost:9621/redoc + +### Testing API Endpoints + +You can test the API endpoints using the provided curl commands or through the Swagger UI interface. Make sure to: +1. Start the appropriate backend service (LoLLMs, Ollama, or OpenAI) +2. Start the RAG server +3. Upload some documents using the document management endpoints +4. Query the system using the query endpoints + +### Important Features + +#### Automatic Document Vectorization +When starting any of the servers with the `--input-dir` parameter, the system will automatically: +1. Scan the specified directory for documents +2. Check for existing vectorized content in the database +3. Only vectorize new documents that aren't already in the database +4. Make all content immediately available for RAG queries + +This intelligent caching mechanism: +- Prevents unnecessary re-vectorization of existing documents +- Reduces startup time for subsequent runs +- Preserves system resources +- Maintains consistency across restarts + +**Important Notes:** +- The `--input-dir` parameter enables automatic document processing at startup +- Documents already in the database are not re-vectorized +- Only new documents in the input directory will be processed +- This optimization significantly reduces startup time for subsequent runs +- The working directory (`--working-dir`) stores the vectorized documents database \ No newline at end of file From cbd02bbbab108d46deb1f8ccb4da5dd4fb5fe36a Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Thu, 16 Jan 2025 22:16:34 +0100 Subject: [PATCH 02/15] Added link to documentation from main README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index e302d708..3ebbe290 100644 --- a/README.md +++ b/README.md @@ -921,7 +921,10 @@ def extract_queries(file_path): ``` +## API +LightRag can be installed with API support to serve a Fast api interface to perform data upload and indexing/Rag operations/Rescan of the input folder etc.. +The documentation can be found [https://github.com/ParisNeo/LightRAG/blob/main/docs/LightRagAPI.md](here) ## Star History From b2e7c75f5a943e7163769642c4fc835980b0e94e Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Thu, 16 Jan 2025 22:28:28 +0100 Subject: [PATCH 03/15] Added Docker container setup --- .env.example | 37 +++++++++ Dockerfile | 38 +++++++++ docker-compose.yml | 21 +++++ docs/DockerDeployment.md | 174 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 270 insertions(+) create mode 100644 .env.example create mode 100644 Dockerfile create mode 100644 docker-compose.yml create mode 100644 docs/DockerDeployment.md diff --git a/.env.example b/.env.example new file mode 100644 index 00000000..b7e9f7ab --- /dev/null +++ b/.env.example @@ -0,0 +1,37 @@ +# Server Configuration +HOST=0.0.0.0 +PORT=9621 + +# Directory Configuration +WORKING_DIR=/app/data/rag_storage +INPUT_DIR=/app/data/inputs + +# LLM Configuration +LLM_BINDING=ollama +LLM_BINDING_HOST=http://localhost:11434 +LLM_MODEL=mistral-nemo:latest + +# Embedding Configuration +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_MODEL=bge-m3:latest + +# RAG Configuration +MAX_ASYNC=4 +MAX_TOKENS=32768 +EMBEDDING_DIM=1024 +MAX_EMBED_TOKENS=8192 + +# Security (empty for no key) +LIGHTRAG_API_KEY=your-secure-api-key-here + +# Logging +LOG_LEVEL=INFO + +# Optional SSL Configuration +#SSL=true +#SSL_CERTFILE=/path/to/cert.pem +#SSL_KEYFILE=/path/to/key.pem + +# Optional Timeout +#TIMEOUT=30 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..eab315aa --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +# Build stage +FROM python:3.11-slim as builder + +WORKDIR /app + +# Install build dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + && rm -rf /var/lib/apt/lists/* + +# Copy only requirements files first to leverage Docker cache +COPY requirements.txt . +COPY lightrag/api/requirements.txt ./lightrag/api/ + +# Install dependencies +RUN pip install --user --no-cache-dir -r requirements.txt +RUN pip install --user --no-cache-dir -r lightrag/api/requirements.txt + +# Final stage +FROM python:3.11-slim + +WORKDIR /app + +# Copy only necessary files from builder +COPY --from=builder /root/.local /root/.local +COPY . . + +# Make sure scripts in .local are usable +ENV PATH=/root/.local/bin:$PATH + +# Create necessary directories +RUN mkdir -p /app/data/rag_storage /app/data/inputs + +# Expose the default port +EXPOSE 9621 + +# Set entrypoint +ENTRYPOINT ["python", "-m", "lightrag.api.lightrag_server"] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..5c851a60 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,21 @@ +version: '3.8' + +services: + lightrag: + build: . + ports: + - "${PORT:-9621}:9621" + volumes: + - ./data/rag_storage:/app/data/rag_storage + - ./data/inputs:/app/data/inputs + env_file: + - .env + environment: + - TZ=UTC + restart: unless-stopped + networks: + - lightrag_net + +networks: + lightrag_net: + driver: bridge diff --git a/docs/DockerDeployment.md b/docs/DockerDeployment.md new file mode 100644 index 00000000..fa9120c0 --- /dev/null +++ b/docs/DockerDeployment.md @@ -0,0 +1,174 @@ +# LightRAG + +A lightweight Knowledge Graph Retrieval-Augmented Generation system with multiple LLM backend support. + +## šŸš€ Installation + +### Prerequisites +- Python 3.10+ +- Git +- Docker (optional for Docker deployment) + +### Native Installation + +1. Clone the repository: +```bash +# Linux/MacOS +git clone https://github.com/ParisNeo/LightRAG.git +cd LightRAG +``` +```powershell +# Windows PowerShell +git clone https://github.com/ParisNeo/LightRAG.git +cd LightRAG +``` + +2. Configure your environment: +```bash +# Linux/MacOS +cp .env.example .env +# Edit .env with your preferred configuration +``` +```powershell +# Windows PowerShell +Copy-Item .env.example .env +# Edit .env with your preferred configuration +``` + +3. Create and activate virtual environment: +```bash +# Linux/MacOS +python -m venv venv +source venv/bin/activate +``` +```powershell +# Windows PowerShell +python -m venv venv +.\venv\Scripts\Activate +``` + +4. Install dependencies: +```bash +# Both platforms +pip install -r requirements.txt +``` + +## 🐳 Docker Deployment + +Docker instructions work the same on all platforms with Docker Desktop installed. + +1. Build and start the container: +```bash +docker-compose up -d +``` + +### Configuration Options + +LightRAG can be configured using environment variables in the `.env` file: + +#### Server Configuration +- `HOST`: Server host (default: 0.0.0.0) +- `PORT`: Server port (default: 9621) + +#### LLM Configuration +- `LLM_BINDING`: LLM backend to use (lollms/ollama/openai) +- `LLM_BINDING_HOST`: LLM server host URL +- `LLM_MODEL`: Model name to use + +#### Embedding Configuration +- `EMBEDDING_BINDING`: Embedding backend (lollms/ollama/openai) +- `EMBEDDING_BINDING_HOST`: Embedding server host URL +- `EMBEDDING_MODEL`: Embedding model name + +#### RAG Configuration +- `MAX_ASYNC`: Maximum async operations +- `MAX_TOKENS`: Maximum token size +- `EMBEDDING_DIM`: Embedding dimensions +- `MAX_EMBED_TOKENS`: Maximum embedding token size + +#### Security +- `LIGHTRAG_API_KEY`: API key for authentication + +### Data Storage Paths + +The system uses the following paths for data storage: +``` +data/ +ā”œā”€ā”€ rag_storage/ # RAG data persistence +└── inputs/ # Input documents +``` + +### Example Deployments + +1. Using with Ollama: +```env +LLM_BINDING=ollama +LLM_BINDING_HOST=http://localhost:11434 +LLM_MODEL=mistral +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_MODEL=bge-m3 +``` + +2. Using with OpenAI: +```env +LLM_BINDING=openai +LLM_MODEL=gpt-3.5-turbo +EMBEDDING_BINDING=openai +EMBEDDING_MODEL=text-embedding-ada-002 +OPENAI_API_KEY=your-api-key +``` + +### API Usage + +Once deployed, you can interact with the API at `http://localhost:9621` + +Example query using PowerShell: +```powershell +$headers = @{ + "X-API-Key" = "your-api-key" + "Content-Type" = "application/json" +} +$body = @{ + query = "your question here" +} | ConvertTo-Json + +Invoke-RestMethod -Uri "http://localhost:9621/query" -Method Post -Headers $headers -Body $body +``` + +Example query using curl: +```bash +curl -X POST "http://localhost:9621/query" \ + -H "X-API-Key: your-api-key" \ + -H "Content-Type: application/json" \ + -d '{"query": "your question here"}' +``` + +## šŸ”’ Security + +Remember to: +1. Set a strong API key in production +2. Use SSL in production environments +3. Configure proper network security + +## šŸ“¦ Updates + +To update the Docker container: +```bash +docker-compose pull +docker-compose up -d --build +``` + +To update native installation: +```bash +# Linux/MacOS +git pull +source venv/bin/activate +pip install -r requirements.txt +``` +```powershell +# Windows PowerShell +git pull +.\venv\Scripts\Activate +pip install -r requirements.txt +``` \ No newline at end of file From ea566d815dfed438cbb1f2bd16745278ecae9070 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Thu, 16 Jan 2025 23:21:50 +0100 Subject: [PATCH 04/15] Added environment variables control of all lightrag server parameters preparing for the usage in docker --- lightrag/api/lightrag_server.py | 131 ++++++++++++++++++++++---------- lightrag/api/requirements.txt | 1 + 2 files changed, 91 insertions(+), 41 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 0d154b38..cec3c089 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -20,6 +20,7 @@ import os from fastapi import Depends, Security from fastapi.security import APIKeyHeader from fastapi.middleware.cors import CORSMiddleware +from contextlib import asynccontextmanager from starlette.status import HTTP_403_FORBIDDEN import pipmaster as pm @@ -36,74 +37,112 @@ def get_default_host(binding_type: str) -> str: binding_type, "http://localhost:11434" ) # fallback to ollama if unknown +from dotenv import load_dotenv +import os -def parse_args(): +def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any: + """ + Get value from environment variable with type conversion + + Args: + env_key (str): Environment variable key + default (Any): Default value if env variable is not set + value_type (type): Type to convert the value to + + Returns: + Any: Converted value from environment or default + """ + value = os.getenv(env_key) + if value is None: + return default + + if value_type == bool: + return value.lower() in ('true', '1', 'yes') + try: + return value_type(value) + except ValueError: + return default + +def parse_args() -> argparse.Namespace: + """ + Parse command line arguments with environment variable fallback + + Returns: + argparse.Namespace: Parsed arguments + """ + # Load environment variables from .env file + load_dotenv() + parser = argparse.ArgumentParser( description="LightRAG FastAPI Server with separate working and input directories" ) - # Start by the bindings + # Bindings (with env var support) parser.add_argument( "--llm-binding", - default="ollama", - help="LLM binding to be used. Supported: lollms, ollama, openai (default: ollama)", + default=get_env_value("LLM_BINDING", "ollama"), + help="LLM binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)", ) parser.add_argument( "--embedding-binding", - default="ollama", - help="Embedding binding to be used. Supported: lollms, ollama, openai (default: ollama)", + default=get_env_value("EMBEDDING_BINDING", "ollama"), + help="Embedding binding to be used. Supported: lollms, ollama, openai (default: from env or ollama)", ) - # Parse just these arguments first + # Parse temporary args for host defaults temp_args, _ = parser.parse_known_args() - # Add remaining arguments with dynamic defaults for hosts # Server configuration parser.add_argument( - "--host", default="0.0.0.0", help="Server host (default: 0.0.0.0)" + "--host", + default=get_env_value("HOST", "0.0.0.0"), + help="Server host (default: from env or 0.0.0.0)" ) parser.add_argument( - "--port", type=int, default=9621, help="Server port (default: 9621)" + "--port", + type=int, + default=get_env_value("PORT", 9621, int), + help="Server port (default: from env or 9621)" ) # Directory configuration parser.add_argument( "--working-dir", - default="./rag_storage", - help="Working directory for RAG storage (default: ./rag_storage)", + default=get_env_value("WORKING_DIR", "./rag_storage"), + help="Working directory for RAG storage (default: from env or ./rag_storage)", ) parser.add_argument( "--input-dir", - default="./inputs", - help="Directory containing input documents (default: ./inputs)", + default=get_env_value("INPUT_DIR", "./inputs"), + help="Directory containing input documents (default: from env or ./inputs)", ) # LLM Model configuration - default_llm_host = get_default_host(temp_args.llm_binding) + default_llm_host = get_env_value("LLM_BINDING_HOST", get_default_host(temp_args.llm_binding)) parser.add_argument( "--llm-binding-host", default=default_llm_host, - help=f"llm server host URL (default: {default_llm_host})", + help=f"llm server host URL (default: from env or {default_llm_host})", ) parser.add_argument( "--llm-model", - default="mistral-nemo:latest", - help="LLM model name (default: mistral-nemo:latest)", + default=get_env_value("LLM_MODEL", "mistral-nemo:latest"), + help="LLM model name (default: from env or mistral-nemo:latest)", ) # Embedding model configuration - default_embedding_host = get_default_host(temp_args.embedding_binding) + default_embedding_host = get_env_value("EMBEDDING_BINDING_HOST", get_default_host(temp_args.embedding_binding)) parser.add_argument( "--embedding-binding-host", default=default_embedding_host, - help=f"embedding server host URL (default: {default_embedding_host})", + help=f"embedding server host URL (default: from env or {default_embedding_host})", ) parser.add_argument( "--embedding-model", - default="bge-m3:latest", - help="Embedding model name (default: bge-m3:latest)", + default=get_env_value("EMBEDDING_MODEL", "bge-m3:latest"), + help="Embedding model name (default: from env or bge-m3:latest)", ) def timeout_type(value): @@ -113,62 +152,70 @@ def parse_args(): parser.add_argument( "--timeout", - default=None, + default=get_env_value("TIMEOUT", None, timeout_type), type=timeout_type, help="Timeout in seconds (useful when using slow AI). Use None for infinite timeout", ) + # RAG configuration parser.add_argument( - "--max-async", type=int, default=4, help="Maximum async operations (default: 4)" + "--max-async", + type=int, + default=get_env_value("MAX_ASYNC", 4, int), + help="Maximum async operations (default: from env or 4)" ) parser.add_argument( "--max-tokens", type=int, - default=32768, - help="Maximum token size (default: 32768)", + default=get_env_value("MAX_TOKENS", 32768, int), + help="Maximum token size (default: from env or 32768)", ) parser.add_argument( "--embedding-dim", type=int, - default=1024, - help="Embedding dimensions (default: 1024)", + default=get_env_value("EMBEDDING_DIM", 1024, int), + help="Embedding dimensions (default: from env or 1024)", ) parser.add_argument( "--max-embed-tokens", type=int, - default=8192, - help="Maximum embedding token size (default: 8192)", + default=get_env_value("MAX_EMBED_TOKENS", 8192, int), + help="Maximum embedding token size (default: from env or 8192)", ) # Logging configuration parser.add_argument( "--log-level", - default="INFO", + default=get_env_value("LOG_LEVEL", "INFO"), choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], - help="Logging level (default: INFO)", + help="Logging level (default: from env or INFO)", ) parser.add_argument( "--key", type=str, + default=get_env_value("LIGHTRAG_API_KEY", None), help="API key for authentication. This protects lightrag server against unauthorized access", - default=None, ) # Optional https parameters parser.add_argument( - "--ssl", action="store_true", help="Enable HTTPS (default: False)" + "--ssl", + action="store_true", + default=get_env_value("SSL", False, bool), + help="Enable HTTPS (default: from env or False)" ) parser.add_argument( "--ssl-certfile", - default=None, + default=get_env_value("SSL_CERTFILE", None), help="Path to SSL certificate file (required if --ssl is enabled)", ) parser.add_argument( "--ssl-keyfile", - default=None, + default=get_env_value("SSL_KEYFILE", None), help="Path to SSL private key file (required if --ssl is enabled)", ) + return parser.parse_args() @@ -434,10 +481,12 @@ def create_app(args): logging.info(f"Successfully indexed file: {file_path}") else: logging.warning(f"No content extracted from file: {file_path}") - - @app.on_event("startup") - async def startup_event(): - """Index all files in input directory during startup""" + + + @asynccontextmanager + async def lifespan(app: FastAPI): + """Lifespan context manager for startup and shutdown events""" + # Startup logic try: new_files = doc_manager.scan_directory() for file_path in new_files: @@ -448,7 +497,6 @@ def create_app(args): logging.error(f"Error indexing file {file_path}: {str(e)}") logging.info(f"Indexed {len(new_files)} documents from {args.input_dir}") - except Exception as e: logging.error(f"Error during startup indexing: {str(e)}") @@ -521,6 +569,7 @@ def create_app(args): else: return QueryResponse(response=response) except Exception as e: + trace_exception(e) raise HTTPException(status_code=500, detail=str(e)) @app.post("/query/stream", dependencies=[Depends(optional_api_key)]) diff --git a/lightrag/api/requirements.txt b/lightrag/api/requirements.txt index 9154809c..0d5b82f6 100644 --- a/lightrag/api/requirements.txt +++ b/lightrag/api/requirements.txt @@ -16,3 +16,4 @@ torch tqdm transformers uvicorn +python-dotenv \ No newline at end of file From d8309c81d5200f1177dea3fd4efc88a1e18a4b7a Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Thu, 16 Jan 2025 23:22:57 +0100 Subject: [PATCH 05/15] Fixed typing error --- lightrag/api/lightrag_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index cec3c089..705a4673 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -9,7 +9,7 @@ from lightrag.llm import openai_complete_if_cache, openai_embedding from lightrag.llm import azure_openai_complete_if_cache, azure_openai_embedding from lightrag.utils import EmbeddingFunc -from typing import Optional, List, Union +from typing import Optional, List, Union, Any from enum import Enum from pathlib import Path import shutil From c5e027aa9aa8d548d0a5eb52b555e13773936048 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 00:42:22 +0100 Subject: [PATCH 06/15] Added documentation about used environment variables --- docs/LightRagAPI.md | 63 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/docs/LightRagAPI.md b/docs/LightRagAPI.md index 588d6164..15b5185d 100644 --- a/docs/LightRagAPI.md +++ b/docs/LightRagAPI.md @@ -61,10 +61,69 @@ az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_ The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. +## Configuration -### Configuration Options +LightRAG can be configured using either command-line arguments or environment variables. When both are provided, command-line arguments take precedence over environment variables. -Each server has its own specific configuration options: +### Environment Variables + +You can configure LightRAG using environment variables by creating a `.env` file in your project root directory. Here's a complete example of available environment variables: + +```env +# Server Configuration +HOST=0.0.0.0 +PORT=9621 + +# Directory Configuration +WORKING_DIR=/app/data/rag_storage +INPUT_DIR=/app/data/inputs + +# LLM Configuration +LLM_BINDING=ollama +LLM_BINDING_HOST=http://localhost:11434 +LLM_MODEL=mistral-nemo:latest + +# Embedding Configuration +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_MODEL=bge-m3:latest + +# RAG Configuration +MAX_ASYNC=4 +MAX_TOKENS=32768 +EMBEDDING_DIM=1024 +MAX_EMBED_TOKENS=8192 + +# Security +LIGHTRAG_API_KEY= + +# Logging +LOG_LEVEL=INFO + +# Optional SSL Configuration +#SSL=true +#SSL_CERTFILE=/path/to/cert.pem +#SSL_KEYFILE=/path/to/key.pem + +# Optional Timeout +#TIMEOUT=30 +``` + +### Configuration Priority + +The configuration values are loaded in the following order (highest priority first): +1. Command-line arguments +2. Environment variables +3. Default values + +For example: +```bash +# This command-line argument will override both the environment variable and default value +python lightrag.py --port 8080 + +# The environment variable will override the default value but not the command-line argument +PORT=7000 python lightrag.py +``` #### LightRag Server Options From b8c0631e9990bf71f62d84bff0e6b34fbe9ed1f4 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 00:49:17 +0100 Subject: [PATCH 07/15] Enhanced documentation --- lightrag/api/README.md | 63 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 2 deletions(-) diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 588d6164..15b5185d 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -61,10 +61,69 @@ az cognitiveservices account keys list --name $RESOURCE_NAME -g $RESOURCE_GROUP_ The output of the last command will give you the endpoint and the key for the OpenAI API. You can use these values to set the environment variables in the `.env` file. +## Configuration -### Configuration Options +LightRAG can be configured using either command-line arguments or environment variables. When both are provided, command-line arguments take precedence over environment variables. -Each server has its own specific configuration options: +### Environment Variables + +You can configure LightRAG using environment variables by creating a `.env` file in your project root directory. Here's a complete example of available environment variables: + +```env +# Server Configuration +HOST=0.0.0.0 +PORT=9621 + +# Directory Configuration +WORKING_DIR=/app/data/rag_storage +INPUT_DIR=/app/data/inputs + +# LLM Configuration +LLM_BINDING=ollama +LLM_BINDING_HOST=http://localhost:11434 +LLM_MODEL=mistral-nemo:latest + +# Embedding Configuration +EMBEDDING_BINDING=ollama +EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_MODEL=bge-m3:latest + +# RAG Configuration +MAX_ASYNC=4 +MAX_TOKENS=32768 +EMBEDDING_DIM=1024 +MAX_EMBED_TOKENS=8192 + +# Security +LIGHTRAG_API_KEY= + +# Logging +LOG_LEVEL=INFO + +# Optional SSL Configuration +#SSL=true +#SSL_CERTFILE=/path/to/cert.pem +#SSL_KEYFILE=/path/to/key.pem + +# Optional Timeout +#TIMEOUT=30 +``` + +### Configuration Priority + +The configuration values are loaded in the following order (highest priority first): +1. Command-line arguments +2. Environment variables +3. Default values + +For example: +```bash +# This command-line argument will override both the environment variable and default value +python lightrag.py --port 8080 + +# The environment variable will override the default value but not the command-line argument +PORT=7000 python lightrag.py +``` #### LightRag Server Options From 65a44a4644f4f0e25b772f23db0d64f3db3a641a Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 00:53:49 +0100 Subject: [PATCH 08/15] Added api version and Configuration details at startup as well as more useful information --- lightrag/api/__init__.py | 1 + lightrag/api/lightrag_server.py | 87 ++++++++++++++++++++++++++++++++- 2 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 lightrag/api/__init__.py diff --git a/lightrag/api/__init__.py b/lightrag/api/__init__.py new file mode 100644 index 00000000..3b7d00a1 --- /dev/null +++ b/lightrag/api/__init__.py @@ -0,0 +1 @@ +__api_version__ = "1.0.0" diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 705a4673..c8425aea 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -7,6 +7,7 @@ from lightrag.llm import lollms_model_complete, lollms_embed from lightrag.llm import ollama_model_complete, ollama_embed from lightrag.llm import openai_complete_if_cache, openai_embedding from lightrag.llm import azure_openai_complete_if_cache, azure_openai_embedding +from lightrag.api import __api_version__ from lightrag.utils import EmbeddingFunc from typing import Optional, List, Union, Any @@ -14,7 +15,7 @@ from enum import Enum from pathlib import Path import shutil import aiofiles -from ascii_colors import trace_exception +from ascii_colors import trace_exception, ASCIIColors import os from fastapi import Depends, Security @@ -63,6 +64,85 @@ def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any: except ValueError: return default +def display_splash_screen(args: argparse.Namespace) -> None: + """ + Display a colorful splash screen showing LightRAG server configuration + + Args: + args: Parsed command line arguments + """ + # Banner + ASCIIColors.cyan(f""" + ╔══════════════════════════════════════════════════════════════╗ + ā•‘ šŸš€ LightRAG Server v{__api_version__} ā•‘ + ā•‘ Fast, Lightweight RAG Server Implementation ā•‘ + ā•šā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā•ā• + """) + + # Server Configuration + ASCIIColors.magenta("\nšŸ“” Server Configuration:") + ASCIIColors.white(f" ā”œā”€ Host: ", end="") + ASCIIColors.yellow(f"{args.host}") + ASCIIColors.white(f" ā”œā”€ Port: ", end="") + ASCIIColors.yellow(f"{args.port}") + ASCIIColors.white(f" ā”œā”€ SSL Enabled: ", end="") + ASCIIColors.yellow(f"{args.ssl}") + if args.ssl: + ASCIIColors.white(f" ā”œā”€ SSL Cert: ", end="") + ASCIIColors.yellow(f"{args.ssl_certfile}") + ASCIIColors.white(f" └─ SSL Key: ", end="") + ASCIIColors.yellow(f"{args.ssl_keyfile}") + + # Directory Configuration + ASCIIColors.magenta("\nšŸ“‚ Directory Configuration:") + ASCIIColors.white(f" ā”œā”€ Working Directory: ", end="") + ASCIIColors.yellow(f"{args.working_dir}") + ASCIIColors.white(f" └─ Input Directory: ", end="") + ASCIIColors.yellow(f"{args.input_dir}") + + # LLM Configuration + ASCIIColors.magenta("\nšŸ¤– LLM Configuration:") + ASCIIColors.white(f" ā”œā”€ Binding: ", end="") + ASCIIColors.yellow(f"{args.llm_binding}") + ASCIIColors.white(f" ā”œā”€ Host: ", end="") + ASCIIColors.yellow(f"{args.llm_binding_host}") + ASCIIColors.white(f" └─ Model: ", end="") + ASCIIColors.yellow(f"{args.llm_model}") + + # Embedding Configuration + ASCIIColors.magenta("\nšŸ“Š Embedding Configuration:") + ASCIIColors.white(f" ā”œā”€ Binding: ", end="") + ASCIIColors.yellow(f"{args.embedding_binding}") + ASCIIColors.white(f" ā”œā”€ Host: ", end="") + ASCIIColors.yellow(f"{args.embedding_binding_host}") + ASCIIColors.white(f" ā”œā”€ Model: ", end="") + ASCIIColors.yellow(f"{args.embedding_model}") + ASCIIColors.white(f" └─ Dimensions: ", end="") + ASCIIColors.yellow(f"{args.embedding_dim}") + + # RAG Configuration + ASCIIColors.magenta("\nāš™ļø RAG Configuration:") + ASCIIColors.white(f" ā”œā”€ Max Async Operations: ", end="") + ASCIIColors.yellow(f"{args.max_async}") + ASCIIColors.white(f" ā”œā”€ Max Tokens: ", end="") + ASCIIColors.yellow(f"{args.max_tokens}") + ASCIIColors.white(f" └─ Max Embed Tokens: ", end="") + ASCIIColors.yellow(f"{args.max_embed_tokens}") + + # System Configuration + ASCIIColors.magenta("\nšŸ› ļø System Configuration:") + ASCIIColors.white(f" ā”œā”€ Log Level: ", end="") + ASCIIColors.yellow(f"{args.log_level}") + ASCIIColors.white(f" ā”œā”€ Timeout: ", end="") + ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}") + ASCIIColors.white(f" └─ API Key: ", end="") + ASCIIColors.yellow("Set" if args.key else "Not Set") + + # Server Status + ASCIIColors.green("\n✨ Server starting up...\n") + + + def parse_args() -> argparse.Namespace: """ Parse command line arguments with environment variable fallback @@ -216,7 +296,10 @@ def parse_args() -> argparse.Namespace: help="Path to SSL private key file (required if --ssl is enabled)", ) - return parser.parse_args() + args = parser.parse_args() + display_splash_screen(args) + + return args class DocumentManager: From 84f7f1504697d9c12b3fe43f255c5df8c2d992b6 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 00:54:24 +0100 Subject: [PATCH 09/15] Added optional Azure configuration --- .env.example | 10 ++++++ lightrag/api/lightrag_server.py | 64 +++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/.env.example b/.env.example index b7e9f7ab..8e9f76b0 100644 --- a/.env.example +++ b/.env.example @@ -35,3 +35,13 @@ LOG_LEVEL=INFO # Optional Timeout #TIMEOUT=30 + + +# Optional for Azure +# AZURE_OPENAI_API_VERSION=2024-08-01-preview +# AZURE_OPENAI_DEPLOYMENT=gpt-4o +# AZURE_OPENAI_API_KEY=myapikey +# AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com + +# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large +# AZURE_EMBEDDING_API_VERSION=2023-05-15 \ No newline at end of file diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index c8425aea..149f35fb 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -141,6 +141,70 @@ def display_splash_screen(args: argparse.Namespace) -> None: # Server Status ASCIIColors.green("\n✨ Server starting up...\n") + # Server Access Information + protocol = "https" if args.ssl else "http" + if args.host == "0.0.0.0": + ASCIIColors.magenta("\n🌐 Server Access Information:") + ASCIIColors.white(" ā”œā”€ Local Access: ", end="") + ASCIIColors.yellow(f"{protocol}://localhost:{args.port}") + ASCIIColors.white(" ā”œā”€ Remote Access: ", end="") + ASCIIColors.yellow(f"{protocol}://:{args.port}") + ASCIIColors.white(" ā”œā”€ API Documentation (local): ", end="") + ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/docs") + ASCIIColors.white(" └─ Alternative Documentation (local): ", end="") + ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/redoc") + + ASCIIColors.yellow("\nšŸ“ Note:") + ASCIIColors.white(""" Since the server is running on 0.0.0.0: + - Use 'localhost' or '127.0.0.1' for local access + - Use your machine's IP address for remote access + - To find your IP address: + • Windows: Run 'ipconfig' in terminal + • Linux/Mac: Run 'ifconfig' or 'ip addr' in terminal + """) + else: + base_url = f"{protocol}://{args.host}:{args.port}" + ASCIIColors.magenta("\n🌐 Server Access Information:") + ASCIIColors.white(" ā”œā”€ Base URL: ", end="") + ASCIIColors.yellow(f"{base_url}") + ASCIIColors.white(" ā”œā”€ API Documentation: ", end="") + ASCIIColors.yellow(f"{base_url}/docs") + ASCIIColors.white(" └─ Alternative Documentation: ", end="") + ASCIIColors.yellow(f"{base_url}/redoc") + + # Usage Examples + ASCIIColors.magenta("\nšŸ“š Quick Start Guide:") + ASCIIColors.cyan(""" + 1. Access the Swagger UI: + Open your browser and navigate to the API documentation URL above + + 2. API Authentication:""") + if args.key: + ASCIIColors.cyan(""" Add the following header to your requests: + X-API-Key: + """) + else: + ASCIIColors.cyan(" No authentication required\n") + + ASCIIColors.cyan(""" 3. Basic Operations: + - POST /upload_document: Upload new documents to RAG + - POST /query: Query your document collection + - GET /collections: List available collections + + 4. Monitor the server: + - Check server logs for detailed operation information + - Use healthcheck endpoint: GET /health + """) + + # Security Notice + if args.key: + ASCIIColors.yellow("\nāš ļø Security Notice:") + ASCIIColors.white(""" API Key authentication is enabled. + Make sure to include the X-API-Key header in all your requests. + """) + + ASCIIColors.green("Server is ready to accept connections! šŸš€\n") + def parse_args() -> argparse.Namespace: From c91b57196d4884912e3e6dd25f971598ac2fd90a Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 01:18:28 +0100 Subject: [PATCH 10/15] Fixed docker access to ollama and lollms --- .env.example | 21 +++++++++++++++++---- docker-compose.yml | 5 +++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/.env.example b/.env.example index 8e9f76b0..2055a7f8 100644 --- a/.env.example +++ b/.env.example @@ -6,14 +6,27 @@ PORT=9621 WORKING_DIR=/app/data/rag_storage INPUT_DIR=/app/data/inputs -# LLM Configuration +# LLM Configuration (Use valid host. For local services, you can use host.docker.internal) +# Ollama example LLM_BINDING=ollama -LLM_BINDING_HOST=http://localhost:11434 +LLM_BINDING_HOST=http://host.docker.internal:11434 LLM_MODEL=mistral-nemo:latest -# Embedding Configuration +# Lollms example +LLM_BINDING=lollms +LLM_BINDING_HOST=http://host.docker.internal:9600 +LLM_MODEL=mistral-nemo:latest + + +# Embedding Configuration (Use valid host. For local services, you can use host.docker.internal) +# Ollama example EMBEDDING_BINDING=ollama -EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_BINDING_HOST=http://host.docker.internal:11434 +EMBEDDING_MODEL=bge-m3:latest + +# Lollms example +EMBEDDING_BINDING=lollms +EMBEDDING_BINDING_HOST=http://host.docker.internal:9600 EMBEDDING_MODEL=bge-m3:latest # RAG Configuration diff --git a/docker-compose.yml b/docker-compose.yml index 5c851a60..ab9a4f97 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,7 +15,8 @@ services: restart: unless-stopped networks: - lightrag_net - + extra_hosts: + - "host.docker.internal:host-gateway" networks: lightrag_net: - driver: bridge + driver: bridge \ No newline at end of file From 5fe28d31e952743bb048678ec6d507f181d01cff Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 01:36:16 +0100 Subject: [PATCH 11/15] Fixed linting --- .env.example | 2 +- docker-compose.yml | 2 +- docs/DockerDeployment.md | 4 +- docs/LightRagAPI.md | 2 +- lightrag/api/README.md | 2 +- lightrag/api/lightrag_server.py | 90 +++++++++++++++++---------------- lightrag/api/requirements.txt | 2 +- 7 files changed, 54 insertions(+), 50 deletions(-) diff --git a/.env.example b/.env.example index 2055a7f8..7d5c0fe5 100644 --- a/.env.example +++ b/.env.example @@ -57,4 +57,4 @@ LOG_LEVEL=INFO # AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com # AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large -# AZURE_EMBEDDING_API_VERSION=2023-05-15 \ No newline at end of file +# AZURE_EMBEDDING_API_VERSION=2023-05-15 diff --git a/docker-compose.yml b/docker-compose.yml index ab9a4f97..a4713279 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -19,4 +19,4 @@ services: - "host.docker.internal:host-gateway" networks: lightrag_net: - driver: bridge \ No newline at end of file + driver: bridge diff --git a/docs/DockerDeployment.md b/docs/DockerDeployment.md index fa9120c0..ab3fd854 100644 --- a/docs/DockerDeployment.md +++ b/docs/DockerDeployment.md @@ -5,7 +5,7 @@ A lightweight Knowledge Graph Retrieval-Augmented Generation system with multipl ## šŸš€ Installation ### Prerequisites -- Python 3.10+ +- Python 3.10+ - Git - Docker (optional for Docker deployment) @@ -171,4 +171,4 @@ pip install -r requirements.txt git pull .\venv\Scripts\Activate pip install -r requirements.txt -``` \ No newline at end of file +``` diff --git a/docs/LightRagAPI.md b/docs/LightRagAPI.md index 15b5185d..6c82be66 100644 --- a/docs/LightRagAPI.md +++ b/docs/LightRagAPI.md @@ -358,4 +358,4 @@ This intelligent caching mechanism: - Documents already in the database are not re-vectorized - Only new documents in the input directory will be processed - This optimization significantly reduces startup time for subsequent runs -- The working directory (`--working-dir`) stores the vectorized documents database \ No newline at end of file +- The working directory (`--working-dir`) stores the vectorized documents database diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 15b5185d..6c82be66 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -358,4 +358,4 @@ This intelligent caching mechanism: - Documents already in the database are not re-vectorized - Only new documents in the input directory will be processed - This optimization significantly reduces startup time for subsequent runs -- The working directory (`--working-dir`) stores the vectorized documents database \ No newline at end of file +- The working directory (`--working-dir`) stores the vectorized documents database diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 149f35fb..ae47bb47 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -38,36 +38,38 @@ def get_default_host(binding_type: str) -> str: binding_type, "http://localhost:11434" ) # fallback to ollama if unknown + from dotenv import load_dotenv -import os + def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any: """ Get value from environment variable with type conversion - + Args: env_key (str): Environment variable key default (Any): Default value if env variable is not set value_type (type): Type to convert the value to - + Returns: Any: Converted value from environment or default """ value = os.getenv(env_key) if value is None: return default - + if value_type == bool: - return value.lower() in ('true', '1', 'yes') + return value.lower() in ("true", "1", "yes") try: return value_type(value) except ValueError: return default + def display_splash_screen(args: argparse.Namespace) -> None: """ Display a colorful splash screen showing LightRAG server configuration - + Args: args: Parsed command line arguments """ @@ -81,61 +83,61 @@ def display_splash_screen(args: argparse.Namespace) -> None: # Server Configuration ASCIIColors.magenta("\nšŸ“” Server Configuration:") - ASCIIColors.white(f" ā”œā”€ Host: ", end="") + ASCIIColors.white(" ā”œā”€ Host: ", end="") ASCIIColors.yellow(f"{args.host}") - ASCIIColors.white(f" ā”œā”€ Port: ", end="") + ASCIIColors.white(" ā”œā”€ Port: ", end="") ASCIIColors.yellow(f"{args.port}") - ASCIIColors.white(f" ā”œā”€ SSL Enabled: ", end="") + ASCIIColors.white(" ā”œā”€ SSL Enabled: ", end="") ASCIIColors.yellow(f"{args.ssl}") if args.ssl: - ASCIIColors.white(f" ā”œā”€ SSL Cert: ", end="") + ASCIIColors.white(" ā”œā”€ SSL Cert: ", end="") ASCIIColors.yellow(f"{args.ssl_certfile}") - ASCIIColors.white(f" └─ SSL Key: ", end="") + ASCIIColors.white(" └─ SSL Key: ", end="") ASCIIColors.yellow(f"{args.ssl_keyfile}") # Directory Configuration ASCIIColors.magenta("\nšŸ“‚ Directory Configuration:") - ASCIIColors.white(f" ā”œā”€ Working Directory: ", end="") + ASCIIColors.white(" ā”œā”€ Working Directory: ", end="") ASCIIColors.yellow(f"{args.working_dir}") - ASCIIColors.white(f" └─ Input Directory: ", end="") + ASCIIColors.white(" └─ Input Directory: ", end="") ASCIIColors.yellow(f"{args.input_dir}") # LLM Configuration ASCIIColors.magenta("\nšŸ¤– LLM Configuration:") - ASCIIColors.white(f" ā”œā”€ Binding: ", end="") + ASCIIColors.white(" ā”œā”€ Binding: ", end="") ASCIIColors.yellow(f"{args.llm_binding}") - ASCIIColors.white(f" ā”œā”€ Host: ", end="") + ASCIIColors.white(" ā”œā”€ Host: ", end="") ASCIIColors.yellow(f"{args.llm_binding_host}") - ASCIIColors.white(f" └─ Model: ", end="") + ASCIIColors.white(" └─ Model: ", end="") ASCIIColors.yellow(f"{args.llm_model}") # Embedding Configuration ASCIIColors.magenta("\nšŸ“Š Embedding Configuration:") - ASCIIColors.white(f" ā”œā”€ Binding: ", end="") + ASCIIColors.white(" ā”œā”€ Binding: ", end="") ASCIIColors.yellow(f"{args.embedding_binding}") - ASCIIColors.white(f" ā”œā”€ Host: ", end="") + ASCIIColors.white(" ā”œā”€ Host: ", end="") ASCIIColors.yellow(f"{args.embedding_binding_host}") - ASCIIColors.white(f" ā”œā”€ Model: ", end="") + ASCIIColors.white(" ā”œā”€ Model: ", end="") ASCIIColors.yellow(f"{args.embedding_model}") - ASCIIColors.white(f" └─ Dimensions: ", end="") + ASCIIColors.white(" └─ Dimensions: ", end="") ASCIIColors.yellow(f"{args.embedding_dim}") # RAG Configuration ASCIIColors.magenta("\nāš™ļø RAG Configuration:") - ASCIIColors.white(f" ā”œā”€ Max Async Operations: ", end="") + ASCIIColors.white(" ā”œā”€ Max Async Operations: ", end="") ASCIIColors.yellow(f"{args.max_async}") - ASCIIColors.white(f" ā”œā”€ Max Tokens: ", end="") + ASCIIColors.white(" ā”œā”€ Max Tokens: ", end="") ASCIIColors.yellow(f"{args.max_tokens}") - ASCIIColors.white(f" └─ Max Embed Tokens: ", end="") + ASCIIColors.white(" └─ Max Embed Tokens: ", end="") ASCIIColors.yellow(f"{args.max_embed_tokens}") # System Configuration ASCIIColors.magenta("\nšŸ› ļø System Configuration:") - ASCIIColors.white(f" ā”œā”€ Log Level: ", end="") + ASCIIColors.white(" ā”œā”€ Log Level: ", end="") ASCIIColors.yellow(f"{args.log_level}") - ASCIIColors.white(f" ā”œā”€ Timeout: ", end="") + ASCIIColors.white(" ā”œā”€ Timeout: ", end="") ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}") - ASCIIColors.white(f" └─ API Key: ", end="") + ASCIIColors.white(" └─ API Key: ", end="") ASCIIColors.yellow("Set" if args.key else "Not Set") # Server Status @@ -153,7 +155,7 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/docs") ASCIIColors.white(" └─ Alternative Documentation (local): ", end="") ASCIIColors.yellow(f"{protocol}://localhost:{args.port}/redoc") - + ASCIIColors.yellow("\nšŸ“ Note:") ASCIIColors.white(""" Since the server is running on 0.0.0.0: - Use 'localhost' or '127.0.0.1' for local access @@ -174,10 +176,10 @@ def display_splash_screen(args: argparse.Namespace) -> None: # Usage Examples ASCIIColors.magenta("\nšŸ“š Quick Start Guide:") - ASCIIColors.cyan(""" + ASCIIColors.cyan(""" 1. Access the Swagger UI: Open your browser and navigate to the API documentation URL above - + 2. API Authentication:""") if args.key: ASCIIColors.cyan(""" Add the following header to your requests: @@ -185,12 +187,12 @@ def display_splash_screen(args: argparse.Namespace) -> None: """) else: ASCIIColors.cyan(" No authentication required\n") - + ASCIIColors.cyan(""" 3. Basic Operations: - POST /upload_document: Upload new documents to RAG - POST /query: Query your document collection - GET /collections: List available collections - + 4. Monitor the server: - Check server logs for detailed operation information - Use healthcheck endpoint: GET /health @@ -202,21 +204,20 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.white(""" API Key authentication is enabled. Make sure to include the X-API-Key header in all your requests. """) - - ASCIIColors.green("Server is ready to accept connections! šŸš€\n") + ASCIIColors.green("Server is ready to accept connections! šŸš€\n") def parse_args() -> argparse.Namespace: """ Parse command line arguments with environment variable fallback - + Returns: argparse.Namespace: Parsed arguments """ # Load environment variables from .env file load_dotenv() - + parser = argparse.ArgumentParser( description="LightRAG FastAPI Server with separate working and input directories" ) @@ -240,13 +241,13 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--host", default=get_env_value("HOST", "0.0.0.0"), - help="Server host (default: from env or 0.0.0.0)" + help="Server host (default: from env or 0.0.0.0)", ) parser.add_argument( "--port", type=int, default=get_env_value("PORT", 9621, int), - help="Server port (default: from env or 9621)" + help="Server port (default: from env or 9621)", ) # Directory configuration @@ -262,7 +263,9 @@ def parse_args() -> argparse.Namespace: ) # LLM Model configuration - default_llm_host = get_env_value("LLM_BINDING_HOST", get_default_host(temp_args.llm_binding)) + default_llm_host = get_env_value( + "LLM_BINDING_HOST", get_default_host(temp_args.llm_binding) + ) parser.add_argument( "--llm-binding-host", default=default_llm_host, @@ -276,7 +279,9 @@ def parse_args() -> argparse.Namespace: ) # Embedding model configuration - default_embedding_host = get_env_value("EMBEDDING_BINDING_HOST", get_default_host(temp_args.embedding_binding)) + default_embedding_host = get_env_value( + "EMBEDDING_BINDING_HOST", get_default_host(temp_args.embedding_binding) + ) parser.add_argument( "--embedding-binding-host", default=default_embedding_host, @@ -306,7 +311,7 @@ def parse_args() -> argparse.Namespace: "--max-async", type=int, default=get_env_value("MAX_ASYNC", 4, int), - help="Maximum async operations (default: from env or 4)" + help="Maximum async operations (default: from env or 4)", ) parser.add_argument( "--max-tokens", @@ -347,7 +352,7 @@ def parse_args() -> argparse.Namespace: "--ssl", action="store_true", default=get_env_value("SSL", False, bool), - help="Enable HTTPS (default: from env or False)" + help="Enable HTTPS (default: from env or False)", ) parser.add_argument( "--ssl-certfile", @@ -628,8 +633,7 @@ def create_app(args): logging.info(f"Successfully indexed file: {file_path}") else: logging.warning(f"No content extracted from file: {file_path}") - - + @asynccontextmanager async def lifespan(app: FastAPI): """Lifespan context manager for startup and shutdown events""" diff --git a/lightrag/api/requirements.txt b/lightrag/api/requirements.txt index 0d5b82f6..f68b0e19 100644 --- a/lightrag/api/requirements.txt +++ b/lightrag/api/requirements.txt @@ -9,6 +9,7 @@ ollama openai pipmaster python-dotenv +python-dotenv python-multipart tenacity tiktoken @@ -16,4 +17,3 @@ torch tqdm transformers uvicorn -python-dotenv \ No newline at end of file From 52ca5ea6aa5f59dedd4de7931f8a32a040c16942 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 01:37:12 +0100 Subject: [PATCH 12/15] removed repeated dependency --- lightrag/api/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/lightrag/api/requirements.txt b/lightrag/api/requirements.txt index f68b0e19..9154809c 100644 --- a/lightrag/api/requirements.txt +++ b/lightrag/api/requirements.txt @@ -9,7 +9,6 @@ ollama openai pipmaster python-dotenv -python-dotenv python-multipart tenacity tiktoken From 58f1058198075cac0bc18b3dcf810902d7397469 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 02:03:02 +0100 Subject: [PATCH 13/15] added some explanation to document --- docs/DockerDeployment.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/DockerDeployment.md b/docs/DockerDeployment.md index ab3fd854..b728add6 100644 --- a/docs/DockerDeployment.md +++ b/docs/DockerDeployment.md @@ -103,13 +103,15 @@ data/ 1. Using with Ollama: ```env LLM_BINDING=ollama -LLM_BINDING_HOST=http://localhost:11434 +LLM_BINDING_HOST=http://host.docker.internal:11434 LLM_MODEL=mistral EMBEDDING_BINDING=ollama -EMBEDDING_BINDING_HOST=http://localhost:11434 +EMBEDDING_BINDING_HOST=http://host.docker.internal:11434 EMBEDDING_MODEL=bge-m3 ``` +you can't just use localhost from docker, that's why you need to use host.docker.internal which is defined in the docker compose file and should allow you to access the localhost services. + 2. Using with OpenAI: ```env LLM_BINDING=openai From 5bb9d9f0c0c3c49f49e8413ba2df27b1b3662366 Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 02:31:23 +0100 Subject: [PATCH 14/15] Removed unnecessary files from docker --- Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index eab315aa..08276099 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,8 +23,11 @@ WORKDIR /app # Copy only necessary files from builder COPY --from=builder /root/.local /root/.local -COPY . . +COPY ./lightrag ./lightrag +COPY setup.py . +COPY .env . +RUN pip install . # Make sure scripts in .local are usable ENV PATH=/root/.local/bin:$PATH From 6813742a8606c330101ac7465a9671647404be4a Mon Sep 17 00:00:00 2001 From: Saifeddine ALOUI Date: Fri, 17 Jan 2025 02:34:29 +0100 Subject: [PATCH 15/15] fixed some linting issues --- lightrag/api/lightrag_server.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index ae47bb47..b898277a 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -26,6 +26,8 @@ from contextlib import asynccontextmanager from starlette.status import HTTP_403_FORBIDDEN import pipmaster as pm +from dotenv import load_dotenv + def get_default_host(binding_type: str) -> str: default_hosts = { @@ -39,9 +41,6 @@ def get_default_host(binding_type: str) -> str: ) # fallback to ollama if unknown -from dotenv import load_dotenv - - def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any: """ Get value from environment variable with type conversion @@ -58,7 +57,7 @@ def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any: if value is None: return default - if value_type == bool: + if isinstance(value_type, bool): return value.lower() in ("true", "1", "yes") try: return value_type(value)