Merge pull request #1224 from danielaskdd/main
Remove the comments at the end of the environment variable lines in .env file
This commit is contained in:
61
env.example
61
env.example
@@ -3,9 +3,11 @@
|
|||||||
### Server Configuration
|
### Server Configuration
|
||||||
# HOST=0.0.0.0
|
# HOST=0.0.0.0
|
||||||
# PORT=9621
|
# PORT=9621
|
||||||
# WORKERS=1
|
# WORKERS=2
|
||||||
# NAMESPACE_PREFIX=lightrag # separating data from difference Lightrag instances
|
### separating data from difference Lightrag instances
|
||||||
# MAX_GRAPH_NODES=1000 # Max nodes return from grap retrieval
|
# NAMESPACE_PREFIX=lightrag
|
||||||
|
### Max nodes return from grap retrieval
|
||||||
|
# MAX_GRAPH_NODES=1000
|
||||||
# CORS_ORIGINS=http://localhost:3000,http://localhost:8080
|
# CORS_ORIGINS=http://localhost:3000,http://localhost:8080
|
||||||
|
|
||||||
### Optional SSL Configuration
|
### Optional SSL Configuration
|
||||||
@@ -13,7 +15,7 @@
|
|||||||
# SSL_CERTFILE=/path/to/cert.pem
|
# SSL_CERTFILE=/path/to/cert.pem
|
||||||
# SSL_KEYFILE=/path/to/key.pem
|
# SSL_KEYFILE=/path/to/key.pem
|
||||||
|
|
||||||
### Directory Configuration
|
### Directory Configuration (defaults to current working directory)
|
||||||
# WORKING_DIR=<absolute_path_for_working_dir>
|
# WORKING_DIR=<absolute_path_for_working_dir>
|
||||||
# INPUT_DIR=<absolute_path_for_doc_input_dir>
|
# INPUT_DIR=<absolute_path_for_doc_input_dir>
|
||||||
|
|
||||||
@@ -23,9 +25,10 @@
|
|||||||
### Logging level
|
### Logging level
|
||||||
# LOG_LEVEL=INFO
|
# LOG_LEVEL=INFO
|
||||||
# VERBOSE=False
|
# VERBOSE=False
|
||||||
# LOG_DIR=/path/to/log/directory # Log file directory path, defaults to current working directory
|
# LOG_MAX_BYTES=10485760
|
||||||
# LOG_MAX_BYTES=10485760 # Log file max size in bytes, defaults to 10MB
|
# LOG_BACKUP_COUNT=5
|
||||||
# LOG_BACKUP_COUNT=5 # Number of backup files to keep, defaults to 5
|
### Logfile location (defaults to current working directory)
|
||||||
|
# LOG_DIR=/path/to/log/directory
|
||||||
|
|
||||||
### Settings for RAG query
|
### Settings for RAG query
|
||||||
# HISTORY_TURNS=3
|
# HISTORY_TURNS=3
|
||||||
@@ -36,28 +39,33 @@
|
|||||||
# MAX_TOKEN_ENTITY_DESC=4000
|
# MAX_TOKEN_ENTITY_DESC=4000
|
||||||
|
|
||||||
### Settings for document indexing
|
### Settings for document indexing
|
||||||
ENABLE_LLM_CACHE_FOR_EXTRACT=true # Enable LLM cache for entity extraction
|
ENABLE_LLM_CACHE_FOR_EXTRACT=true
|
||||||
SUMMARY_LANGUAGE=English
|
SUMMARY_LANGUAGE=English
|
||||||
# CHUNK_SIZE=1200
|
# CHUNK_SIZE=1200
|
||||||
# CHUNK_OVERLAP_SIZE=100
|
# CHUNK_OVERLAP_SIZE=100
|
||||||
# MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary
|
### Max tokens for entity or relations summary
|
||||||
# MAX_PARALLEL_INSERT=2 # Number of parallel processing documents in one patch
|
# MAX_TOKEN_SUMMARY=500
|
||||||
|
### Number of parallel processing documents in one patch
|
||||||
|
# MAX_PARALLEL_INSERT=2
|
||||||
|
|
||||||
# EMBEDDING_BATCH_NUM=32 # num of chunks send to Embedding in one request
|
### Num of chunks send to Embedding in single request
|
||||||
# EMBEDDING_FUNC_MAX_ASYNC=16 # Max concurrency requests for Embedding
|
# EMBEDDING_BATCH_NUM=32
|
||||||
|
### Max concurrency requests for Embedding
|
||||||
|
# EMBEDDING_FUNC_MAX_ASYNC=16
|
||||||
# MAX_EMBED_TOKENS=8192
|
# MAX_EMBED_TOKENS=8192
|
||||||
|
|
||||||
### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
|
### LLM Configuration
|
||||||
TIMEOUT=150 # Time out in seconds for LLM, None for infinite timeout
|
TIMEOUT=150 # Time out in seconds for LLM, None for infinite timeout
|
||||||
TEMPERATURE=0.5
|
TEMPERATURE=0.5
|
||||||
MAX_ASYNC=4 # Max concurrency requests of LLM
|
MAX_ASYNC=4 # Max concurrency requests of LLM
|
||||||
MAX_TOKENS=32768 # Max tokens send to LLM (less than context size of the model)
|
MAX_TOKENS=32768 # Max tokens send to LLM (less than context size of the model)
|
||||||
|
|
||||||
|
### Ollama example (For local services installed with docker, you can use host.docker.internal as host)
|
||||||
LLM_BINDING=ollama
|
LLM_BINDING=ollama
|
||||||
LLM_MODEL=mistral-nemo:latest
|
LLM_MODEL=mistral-nemo:latest
|
||||||
LLM_BINDING_API_KEY=your_api_key
|
LLM_BINDING_API_KEY=your_api_key
|
||||||
### Ollama example
|
|
||||||
LLM_BINDING_HOST=http://localhost:11434
|
LLM_BINDING_HOST=http://localhost:11434
|
||||||
|
|
||||||
### OpenAI alike example
|
### OpenAI alike example
|
||||||
# LLM_BINDING=openai
|
# LLM_BINDING=openai
|
||||||
# LLM_MODEL=gpt-4o
|
# LLM_MODEL=gpt-4o
|
||||||
@@ -103,9 +111,10 @@ ORACLE_DSN=localhost:1521/XEPDB1
|
|||||||
ORACLE_USER=your_username
|
ORACLE_USER=your_username
|
||||||
ORACLE_PASSWORD='your_password'
|
ORACLE_PASSWORD='your_password'
|
||||||
ORACLE_CONFIG_DIR=/path/to/oracle/config
|
ORACLE_CONFIG_DIR=/path/to/oracle/config
|
||||||
#ORACLE_WALLET_LOCATION=/path/to/wallet # optional
|
#ORACLE_WALLET_LOCATION=/path/to/wallet
|
||||||
#ORACLE_WALLET_PASSWORD='your_password' # optional
|
#ORACLE_WALLET_PASSWORD='your_password'
|
||||||
#ORACLE_WORKSPACE=default # separating all data from difference Lightrag instances(deprecated, use NAMESPACE_PREFIX in future)
|
### separating all data from difference Lightrag instances(deprecating, use NAMESPACE_PREFIX in future)
|
||||||
|
#ORACLE_WORKSPACE=default
|
||||||
|
|
||||||
### TiDB Configuration
|
### TiDB Configuration
|
||||||
TIDB_HOST=localhost
|
TIDB_HOST=localhost
|
||||||
@@ -113,7 +122,8 @@ TIDB_PORT=4000
|
|||||||
TIDB_USER=your_username
|
TIDB_USER=your_username
|
||||||
TIDB_PASSWORD='your_password'
|
TIDB_PASSWORD='your_password'
|
||||||
TIDB_DATABASE=your_database
|
TIDB_DATABASE=your_database
|
||||||
#TIDB_WORKSPACE=default # separating all data from difference Lightrag instances(deprecated, use NAMESPACE_PREFIX in future)
|
### separating all data from difference Lightrag instances(deprecating, use NAMESPACE_PREFIX in future)
|
||||||
|
#TIDB_WORKSPACE=default
|
||||||
|
|
||||||
### PostgreSQL Configuration
|
### PostgreSQL Configuration
|
||||||
POSTGRES_HOST=localhost
|
POSTGRES_HOST=localhost
|
||||||
@@ -121,7 +131,8 @@ POSTGRES_PORT=5432
|
|||||||
POSTGRES_USER=your_username
|
POSTGRES_USER=your_username
|
||||||
POSTGRES_PASSWORD='your_password'
|
POSTGRES_PASSWORD='your_password'
|
||||||
POSTGRES_DATABASE=your_database
|
POSTGRES_DATABASE=your_database
|
||||||
#POSTGRES_WORKSPACE=default # separating all data from difference Lightrag instances(deprecated, use NAMESPACE_PREFIX in future)
|
### separating all data from difference Lightrag instances(deprecating, use NAMESPACE_PREFIX in future)
|
||||||
|
#POSTGRES_WORKSPACE=default
|
||||||
|
|
||||||
### Independent AGM Configuration(not for AMG embedded in PostreSQL)
|
### Independent AGM Configuration(not for AMG embedded in PostreSQL)
|
||||||
AGE_POSTGRES_DB=
|
AGE_POSTGRES_DB=
|
||||||
@@ -130,8 +141,9 @@ AGE_POSTGRES_PASSWORD=
|
|||||||
AGE_POSTGRES_HOST=
|
AGE_POSTGRES_HOST=
|
||||||
# AGE_POSTGRES_PORT=8529
|
# AGE_POSTGRES_PORT=8529
|
||||||
|
|
||||||
|
### separating all data from difference Lightrag instances(deprecating, use NAMESPACE_PREFIX in future)
|
||||||
# AGE Graph Name(apply to PostgreSQL and independent AGM)
|
# AGE Graph Name(apply to PostgreSQL and independent AGM)
|
||||||
# AGE_GRAPH_NAME=lightrag # deprecated, use NAME_SPACE_PREFIX instead
|
# AGE_GRAPH_NAME=lightrag
|
||||||
|
|
||||||
### Neo4j Configuration
|
### Neo4j Configuration
|
||||||
NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
|
NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
|
||||||
@@ -141,7 +153,8 @@ NEO4J_PASSWORD='your_password'
|
|||||||
### MongoDB Configuration
|
### MongoDB Configuration
|
||||||
MONGO_URI=mongodb://root:root@localhost:27017/
|
MONGO_URI=mongodb://root:root@localhost:27017/
|
||||||
MONGO_DATABASE=LightRAG
|
MONGO_DATABASE=LightRAG
|
||||||
MONGODB_GRAPH=false # deprecated (keep for backward compatibility)
|
### separating all data from difference Lightrag instances(deprecating, use NAMESPACE_PREFIX in future)
|
||||||
|
# MONGODB_GRAPH=false
|
||||||
|
|
||||||
### Milvus Configuration
|
### Milvus Configuration
|
||||||
MILVUS_URI=http://localhost:19530
|
MILVUS_URI=http://localhost:19530
|
||||||
@@ -158,9 +171,9 @@ QDRANT_URL=http://localhost:16333
|
|||||||
REDIS_URI=redis://localhost:6379
|
REDIS_URI=redis://localhost:6379
|
||||||
|
|
||||||
### For JWT Auth
|
### For JWT Auth
|
||||||
AUTH_ACCOUNTS='admin:admin123,user1:pass456' # username:password,username:password
|
# AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
||||||
TOKEN_SECRET=Your-Key-For-LightRAG-API-Server # JWT key
|
# TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
|
||||||
TOKEN_EXPIRE_HOURS=4 # expire duration
|
# TOKEN_EXPIRE_HOURS=4
|
||||||
|
|
||||||
### API-Key to access LightRAG Server API
|
### API-Key to access LightRAG Server API
|
||||||
# LIGHTRAG_API_KEY=your-secure-api-key-here
|
# LIGHTRAG_API_KEY=your-secure-api-key-here
|
||||||
|
@@ -52,7 +52,8 @@ LLM_BINDING=openai
|
|||||||
LLM_MODEL=gpt-4o
|
LLM_MODEL=gpt-4o
|
||||||
LLM_BINDING_HOST=https://api.openai.com/v1
|
LLM_BINDING_HOST=https://api.openai.com/v1
|
||||||
LLM_BINDING_API_KEY=your_api_key
|
LLM_BINDING_API_KEY=your_api_key
|
||||||
MAX_TOKENS=32768 # 发送给 LLM 的最大 token 数(小于模型上下文大小)
|
### 发送给 LLM 的最大 token 数(小于模型上下文大小)
|
||||||
|
MAX_TOKENS=32768
|
||||||
|
|
||||||
EMBEDDING_BINDING=ollama
|
EMBEDDING_BINDING=ollama
|
||||||
EMBEDDING_BINDING_HOST=http://localhost:11434
|
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||||
@@ -68,7 +69,8 @@ LLM_BINDING=ollama
|
|||||||
LLM_MODEL=mistral-nemo:latest
|
LLM_MODEL=mistral-nemo:latest
|
||||||
LLM_BINDING_HOST=http://localhost:11434
|
LLM_BINDING_HOST=http://localhost:11434
|
||||||
# LLM_BINDING_API_KEY=your_api_key
|
# LLM_BINDING_API_KEY=your_api_key
|
||||||
MAX_TOKENS=8192 # 发送给 LLM 的最大 token 数(基于您的 Ollama 服务器容量)
|
### 发送给 LLM 的最大 token 数(基于您的 Ollama 服务器容量)
|
||||||
|
MAX_TOKENS=8192
|
||||||
|
|
||||||
EMBEDDING_BINDING=ollama
|
EMBEDDING_BINDING=ollama
|
||||||
EMBEDDING_BINDING_HOST=http://localhost:11434
|
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||||
@@ -117,9 +119,12 @@ LightRAG 服务器可以在 `Gunicorn + Uvicorn` 预加载模式下运行。Guni
|
|||||||
虽然 LightRAG 服务器使用一个工作进程来处理文档索引流程,但通过 Uvicorn 的异步任务支持,可以并行处理多个文件。文档索引速度的瓶颈主要在于 LLM。如果您的 LLM 支持高并发,您可以通过增加 LLM 的并发级别来加速文档索引。以下是几个与并发处理相关的环境变量及其默认值:
|
虽然 LightRAG 服务器使用一个工作进程来处理文档索引流程,但通过 Uvicorn 的异步任务支持,可以并行处理多个文件。文档索引速度的瓶颈主要在于 LLM。如果您的 LLM 支持高并发,您可以通过增加 LLM 的并发级别来加速文档索引。以下是几个与并发处理相关的环境变量及其默认值:
|
||||||
|
|
||||||
```
|
```
|
||||||
WORKERS=2 # 工作进程数,不大于 (2 x 核心数) + 1
|
### 工作进程数,数字不大于 (2 x 核心数) + 1
|
||||||
MAX_PARALLEL_INSERT=2 # 一批中并行处理的文件数
|
WORKERS=2
|
||||||
MAX_ASYNC=4 # LLM 的最大并发请求数
|
### 一批中并行处理的文件数
|
||||||
|
MAX_PARALLEL_INSERT=2
|
||||||
|
# LLM 的最大并发请求数
|
||||||
|
MAX_ASYNC=4
|
||||||
```
|
```
|
||||||
|
|
||||||
### 将 Lightrag 安装为 Linux 服务
|
### 将 Lightrag 安装为 Linux 服务
|
||||||
@@ -201,9 +206,9 @@ LightRAG API 服务器使用基于 HS256 算法的 JWT 认证。要启用安全
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# JWT 认证
|
# JWT 认证
|
||||||
AUTH_ACCOUNTS='admin:admin123,user1:pass456' # 登录帐号与密码
|
AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
||||||
TOKEN_SECRET=your-key # JWT 密钥
|
TOKEN_SECRET='your-key'
|
||||||
TOKEN_EXPIRE_HOURS=4 # 过期时间
|
TOKEN_EXPIRE_HOURS=4
|
||||||
```
|
```
|
||||||
|
|
||||||
> 目前仅支持配置一个管理员账户和密码。尚未开发和实现完整的账户系统。
|
> 目前仅支持配置一个管理员账户和密码。尚未开发和实现完整的账户系统。
|
||||||
@@ -237,8 +242,11 @@ LLM_BINDING=azure_openai
|
|||||||
LLM_BINDING_HOST=your-azure-endpoint
|
LLM_BINDING_HOST=your-azure-endpoint
|
||||||
LLM_MODEL=your-model-deployment-name
|
LLM_MODEL=your-model-deployment-name
|
||||||
LLM_BINDING_API_KEY=your-azure-api-key
|
LLM_BINDING_API_KEY=your-azure-api-key
|
||||||
AZURE_OPENAI_API_VERSION=2024-08-01-preview # 可选,默认为最新版本
|
### API Version可选,默认为最新版本
|
||||||
EMBEDDING_BINDING=azure_openai # 如果使用 Azure OpenAI 进行嵌入
|
AZURE_OPENAI_API_VERSION=2024-08-01-preview
|
||||||
|
|
||||||
|
### 如果使用 Azure OpenAI 进行嵌入
|
||||||
|
EMBEDDING_BINDING=azure_openai
|
||||||
EMBEDDING_MODEL=your-embedding-deployment-name
|
EMBEDDING_MODEL=your-embedding-deployment-name
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -361,7 +369,47 @@ LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
|
|||||||
| --embedding-binding | ollama | 嵌入绑定类型(lollms、ollama、openai、azure_openai) |
|
| --embedding-binding | ollama | 嵌入绑定类型(lollms、ollama、openai、azure_openai) |
|
||||||
| auto-scan-at-startup | - | 扫描输入目录中的新文件并开始索引 |
|
| auto-scan-at-startup | - | 扫描输入目录中的新文件并开始索引 |
|
||||||
|
|
||||||
### 使用示例
|
### .env 文件示例
|
||||||
|
|
||||||
|
```bash
|
||||||
|
### Server Configuration
|
||||||
|
# HOST=0.0.0.0
|
||||||
|
PORT=9621
|
||||||
|
WORKERS=2
|
||||||
|
|
||||||
|
### Settings for document indexing
|
||||||
|
ENABLE_LLM_CACHE_FOR_EXTRACT=true
|
||||||
|
SUMMARY_LANGUAGE=Chinese
|
||||||
|
MAX_PARALLEL_INSERT=2
|
||||||
|
|
||||||
|
### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
|
||||||
|
TIMEOUT=200
|
||||||
|
TEMPERATURE=0.0
|
||||||
|
MAX_ASYNC=4
|
||||||
|
MAX_TOKENS=32768
|
||||||
|
|
||||||
|
LLM_BINDING=openai
|
||||||
|
LLM_MODEL=gpt-4o-mini
|
||||||
|
LLM_BINDING_HOST=https://api.openai.com/v1
|
||||||
|
LLM_BINDING_API_KEY=your-api-key
|
||||||
|
|
||||||
|
### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
|
||||||
|
EMBEDDING_MODEL=bge-m3:latest
|
||||||
|
EMBEDDING_DIM=1024
|
||||||
|
EMBEDDING_BINDING=ollama
|
||||||
|
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||||
|
|
||||||
|
### For JWT Auth
|
||||||
|
# AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
||||||
|
# TOKEN_SECRET=your-key-for-LightRAG-API-Server-xxx
|
||||||
|
# TOKEN_EXPIRE_HOURS=48
|
||||||
|
|
||||||
|
# LIGHTRAG_API_KEY=your-secure-api-key-here-123
|
||||||
|
# WHITELIST_PATHS=/api/*
|
||||||
|
# WHITELIST_PATHS=/health,/api/*
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### 使用 ollama 默认本地服务器作为 llm 和嵌入后端运行 Lightrag 服务器
|
#### 使用 ollama 默认本地服务器作为 llm 和嵌入后端运行 Lightrag 服务器
|
||||||
|
|
||||||
|
@@ -52,7 +52,8 @@ LLM_BINDING=openai
|
|||||||
LLM_MODEL=gpt-4o
|
LLM_MODEL=gpt-4o
|
||||||
LLM_BINDING_HOST=https://api.openai.com/v1
|
LLM_BINDING_HOST=https://api.openai.com/v1
|
||||||
LLM_BINDING_API_KEY=your_api_key
|
LLM_BINDING_API_KEY=your_api_key
|
||||||
MAX_TOKENS=32768 # Max tokens send to LLM (less than model context size)
|
### Max tokens send to LLM (less than model context size)
|
||||||
|
MAX_TOKENS=32768
|
||||||
|
|
||||||
EMBEDDING_BINDING=ollama
|
EMBEDDING_BINDING=ollama
|
||||||
EMBEDDING_BINDING_HOST=http://localhost:11434
|
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||||
@@ -68,7 +69,8 @@ LLM_BINDING=ollama
|
|||||||
LLM_MODEL=mistral-nemo:latest
|
LLM_MODEL=mistral-nemo:latest
|
||||||
LLM_BINDING_HOST=http://localhost:11434
|
LLM_BINDING_HOST=http://localhost:11434
|
||||||
# LLM_BINDING_API_KEY=your_api_key
|
# LLM_BINDING_API_KEY=your_api_key
|
||||||
MAX_TOKENS=8192 # Max tokens send to LLM (base on your Ollama Server capacity)
|
### Max tokens send to LLM (base on your Ollama Server capacity)
|
||||||
|
MAX_TOKENS=8192
|
||||||
|
|
||||||
EMBEDDING_BINDING=ollama
|
EMBEDDING_BINDING=ollama
|
||||||
EMBEDDING_BINDING_HOST=http://localhost:11434
|
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||||
@@ -121,9 +123,12 @@ The LightRAG Server can operate in the `Gunicorn + Uvicorn` preload mode. Gunico
|
|||||||
Though LightRAG Server uses one workers to process the document indexing pipeline, with aysnc task supporting of Uvicorn, multiple files can be processed in parallell. The bottleneck of document indexing speed mainly lies with the LLM. If your LLM supports high concurrency, you can accelerate document indexing by increasing the concurrency level of the LLM. Below are several environment variables related to concurrent processing, along with their default values:
|
Though LightRAG Server uses one workers to process the document indexing pipeline, with aysnc task supporting of Uvicorn, multiple files can be processed in parallell. The bottleneck of document indexing speed mainly lies with the LLM. If your LLM supports high concurrency, you can accelerate document indexing by increasing the concurrency level of the LLM. Below are several environment variables related to concurrent processing, along with their default values:
|
||||||
|
|
||||||
```
|
```
|
||||||
WORKERS=2 # Num of worker processes, not greater then (2 x number_of_cores) + 1
|
### Num of worker processes, not greater then (2 x number_of_cores) + 1
|
||||||
MAX_PARALLEL_INSERT=2 # Num of parallel files to process in one batch
|
WORKERS=2
|
||||||
MAX_ASYNC=4 # Max concurrency requests of LLM
|
### Num of parallel files to process in one batch
|
||||||
|
MAX_PARALLEL_INSERT=2
|
||||||
|
### Max concurrency requests of LLM
|
||||||
|
MAX_ASYNC=4
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install Lightrag as a Linux Service
|
### Install Lightrag as a Linux Service
|
||||||
@@ -207,9 +212,9 @@ LightRAG API Server implements JWT-based authentication using HS256 algorithm. T
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
# For jwt auth
|
# For jwt auth
|
||||||
AUTH_ACCOUNTS='admin:admin123,user1:pass456' # login name and password, separated by comma
|
AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
||||||
TOKEN_SECRET=your-key # JWT key
|
TOKEN_SECRET='your-key'
|
||||||
TOKEN_EXPIRE_HOURS=4 # expire duration
|
TOKEN_EXPIRE_HOURS=4
|
||||||
```
|
```
|
||||||
|
|
||||||
> Currently, only the configuration of an administrator account and password is supported. A comprehensive account system is yet to be developed and implemented.
|
> Currently, only the configuration of an administrator account and password is supported. A comprehensive account system is yet to be developed and implemented.
|
||||||
@@ -246,10 +251,12 @@ LLM_BINDING=azure_openai
|
|||||||
LLM_BINDING_HOST=your-azure-endpoint
|
LLM_BINDING_HOST=your-azure-endpoint
|
||||||
LLM_MODEL=your-model-deployment-name
|
LLM_MODEL=your-model-deployment-name
|
||||||
LLM_BINDING_API_KEY=your-azure-api-key
|
LLM_BINDING_API_KEY=your-azure-api-key
|
||||||
AZURE_OPENAI_API_VERSION=2024-08-01-preview # optional, defaults to latest version
|
### API version is optional, defaults to latest version
|
||||||
EMBEDDING_BINDING=azure_openai # if using Azure OpenAI for embeddings
|
AZURE_OPENAI_API_VERSION=2024-08-01-preview
|
||||||
EMBEDDING_MODEL=your-embedding-deployment-name
|
|
||||||
|
|
||||||
|
### if using Azure OpenAI for embeddings
|
||||||
|
EMBEDDING_BINDING=azure_openai
|
||||||
|
EMBEDDING_MODEL=your-embedding-deployment-name
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@@ -373,76 +380,48 @@ You can not change storage implementation selection after you add documents to L
|
|||||||
| --embedding-binding | ollama | Embedding binding type (lollms, ollama, openai, azure_openai) |
|
| --embedding-binding | ollama | Embedding binding type (lollms, ollama, openai, azure_openai) |
|
||||||
| auto-scan-at-startup | - | Scan input directory for new files and start indexing |
|
| auto-scan-at-startup | - | Scan input directory for new files and start indexing |
|
||||||
|
|
||||||
### Example Usage
|
### .env Examples
|
||||||
|
|
||||||
#### Running a Lightrag server with ollama default local server as llm and embedding backends
|
|
||||||
|
|
||||||
Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding
|
### Server Configuration
|
||||||
lightrag-server
|
# HOST=0.0.0.0
|
||||||
|
PORT=9621
|
||||||
|
WORKERS=2
|
||||||
|
|
||||||
|
### Settings for document indexing
|
||||||
|
ENABLE_LLM_CACHE_FOR_EXTRACT=true
|
||||||
|
SUMMARY_LANGUAGE=Chinese
|
||||||
|
MAX_PARALLEL_INSERT=2
|
||||||
|
|
||||||
|
### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
|
||||||
|
TIMEOUT=200
|
||||||
|
TEMPERATURE=0.0
|
||||||
|
MAX_ASYNC=4
|
||||||
|
MAX_TOKENS=32768
|
||||||
|
|
||||||
|
LLM_BINDING=openai
|
||||||
|
LLM_MODEL=gpt-4o-mini
|
||||||
|
LLM_BINDING_HOST=https://api.openai.com/v1
|
||||||
|
LLM_BINDING_API_KEY=your-api-key
|
||||||
|
|
||||||
|
### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
|
||||||
|
EMBEDDING_MODEL=bge-m3:latest
|
||||||
|
EMBEDDING_DIM=1024
|
||||||
|
EMBEDDING_BINDING=ollama
|
||||||
|
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||||
|
|
||||||
|
### For JWT Auth
|
||||||
|
# AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
||||||
|
# TOKEN_SECRET=your-key-for-LightRAG-API-Server-xxx
|
||||||
|
# TOKEN_EXPIRE_HOURS=48
|
||||||
|
|
||||||
|
# LIGHTRAG_API_KEY=your-secure-api-key-here-123
|
||||||
|
# WHITELIST_PATHS=/api/*
|
||||||
|
# WHITELIST_PATHS=/health,/api/*
|
||||||
|
|
||||||
# Using an authentication key
|
|
||||||
lightrag-server --key my-key
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Running a Lightrag server with lollms default local server as llm and embedding backends
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding
|
|
||||||
# Configure LLM_BINDING=lollms and EMBEDDING_BINDING=lollms in .env or config.ini
|
|
||||||
lightrag-server
|
|
||||||
|
|
||||||
# Using an authentication key
|
|
||||||
lightrag-server --key my-key
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Running a Lightrag server with openai server as llm and embedding backends
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Run lightrag with openai, GPT-4o-mini for llm, and text-embedding-3-small for embedding
|
|
||||||
# Configure in .env or config.ini:
|
|
||||||
# LLM_BINDING=openai
|
|
||||||
# LLM_MODEL=GPT-4o-mini
|
|
||||||
# EMBEDDING_BINDING=openai
|
|
||||||
# EMBEDDING_MODEL=text-embedding-3-small
|
|
||||||
lightrag-server
|
|
||||||
|
|
||||||
# Using an authentication key
|
|
||||||
lightrag-server --key my-key
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Running a Lightrag server with azure openai server as llm and embedding backends
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Run lightrag with azure_openai
|
|
||||||
# Configure in .env or config.ini:
|
|
||||||
# LLM_BINDING=azure_openai
|
|
||||||
# LLM_MODEL=your-model
|
|
||||||
# EMBEDDING_BINDING=azure_openai
|
|
||||||
# EMBEDDING_MODEL=your-embedding-model
|
|
||||||
lightrag-server
|
|
||||||
|
|
||||||
# Using an authentication key
|
|
||||||
lightrag-server --key my-key
|
|
||||||
```
|
|
||||||
|
|
||||||
**Important Notes:**
|
|
||||||
- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance
|
|
||||||
- For Ollama: Make sure the specified models are installed in your Ollama instance
|
|
||||||
- For OpenAI: Ensure you have set up your OPENAI_API_KEY environment variable
|
|
||||||
- For Azure OpenAI: Build and configure your server as stated in the Prequisites section
|
|
||||||
|
|
||||||
For help on any server, use the --help flag:
|
|
||||||
```bash
|
|
||||||
lightrag-server --help
|
|
||||||
```
|
|
||||||
|
|
||||||
Note: If you don't need the API functionality, you can install the base package without API support using:
|
|
||||||
```bash
|
|
||||||
pip install lightrag-hku
|
|
||||||
```
|
|
||||||
|
|
||||||
## API Endpoints
|
## API Endpoints
|
||||||
|
|
||||||
|
@@ -591,7 +591,7 @@ async def extract_entities(
|
|||||||
processed_chunks += 1
|
processed_chunks += 1
|
||||||
entities_count = len(maybe_nodes)
|
entities_count = len(maybe_nodes)
|
||||||
relations_count = len(maybe_edges)
|
relations_count = len(maybe_edges)
|
||||||
log_message = f" Chunk {processed_chunks}/{total_chunks}: extracted {entities_count} entities and {relations_count} relationships (deduplicated)"
|
log_message = f" Chk {processed_chunks}/{total_chunks}: extracted {entities_count} Ent + {relations_count} Rel (deduplicated)"
|
||||||
logger.info(log_message)
|
logger.info(log_message)
|
||||||
if pipeline_status is not None:
|
if pipeline_status is not None:
|
||||||
async with pipeline_status_lock:
|
async with pipeline_status_lock:
|
||||||
@@ -656,7 +656,7 @@ async def extract_entities(
|
|||||||
pipeline_status["latest_message"] = log_message
|
pipeline_status["latest_message"] = log_message
|
||||||
pipeline_status["history_messages"].append(log_message)
|
pipeline_status["history_messages"].append(log_message)
|
||||||
|
|
||||||
log_message = f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships (deduplicated)"
|
log_message = f"Extracted {len(all_entities_data)} entities + {len(all_relationships_data)} relationships (deduplicated)"
|
||||||
logger.info(log_message)
|
logger.info(log_message)
|
||||||
if pipeline_status is not None:
|
if pipeline_status is not None:
|
||||||
async with pipeline_status_lock:
|
async with pipeline_status_lock:
|
||||||
|
Reference in New Issue
Block a user