Update env sample file
This commit is contained in:
160
env.example
160
env.example
@@ -1,12 +1,25 @@
|
||||
### This is sample file of .env
|
||||
|
||||
|
||||
### Server Configuration
|
||||
# HOST=0.0.0.0
|
||||
# PORT=9621
|
||||
HOST=0.0.0.0
|
||||
PORT=9621
|
||||
WEBUI_TITLE='My Graph KB'
|
||||
WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
|
||||
OLLAMA_EMULATING_MODEL_TAG=latest
|
||||
# WORKERS=2
|
||||
# CORS_ORIGINS=http://localhost:3000,http://localhost:8080
|
||||
WEBUI_TITLE='Graph RAG Engine'
|
||||
WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
|
||||
|
||||
### Login Configuration
|
||||
# AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
||||
# TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
|
||||
# TOKEN_EXPIRE_HOURS=48
|
||||
# GUEST_TOKEN_EXPIRE_HOURS=24
|
||||
# JWT_ALGORITHM=HS256
|
||||
|
||||
### API-Key to access LightRAG Server API
|
||||
# LIGHTRAG_API_KEY=your-secure-api-key-here
|
||||
# WHITELIST_PATHS=/health,/api/*
|
||||
|
||||
### Optional SSL Configuration
|
||||
# SSL=true
|
||||
@@ -14,11 +27,10 @@ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
|
||||
# SSL_KEYFILE=/path/to/key.pem
|
||||
|
||||
### Directory Configuration (defaults to current working directory)
|
||||
# WORKING_DIR=<absolute_path_for_working_dir>
|
||||
### Should be set if deploy by docker (Set by Dockerfile instead of .env)
|
||||
### Default value is ./inputs and ./rag_storage
|
||||
# INPUT_DIR=<absolute_path_for_doc_input_dir>
|
||||
|
||||
### Ollama Emulating Model Tag
|
||||
# OLLAMA_EMULATING_MODEL_TAG=latest
|
||||
# WORKING_DIR=<absolute_path_for_working_dir>
|
||||
|
||||
### Max nodes return from grap retrieval
|
||||
# MAX_GRAPH_NODES=1000
|
||||
@@ -39,82 +51,57 @@ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
|
||||
# MAX_TOKEN_RELATION_DESC=4000
|
||||
# MAX_TOKEN_ENTITY_DESC=4000
|
||||
|
||||
### Settings for document indexing
|
||||
### Entity and ralation summarization configuration
|
||||
### Language: English, Chinese, French, German ...
|
||||
SUMMARY_LANGUAGE=English
|
||||
### Number of duplicated entities/edges to trigger LLM re-summary on merge ( at least 3 is recommented)
|
||||
# FORCE_LLM_SUMMARY_ON_MERGE=6
|
||||
### Max tokens for entity/relations description after merge
|
||||
# MAX_TOKEN_SUMMARY=500
|
||||
|
||||
### Number of parallel processing documents(Less than MAX_ASYNC/2 is recommended)
|
||||
# MAX_PARALLEL_INSERT=2
|
||||
### Chunk size for document splitting, 500~1500 is recommended
|
||||
# CHUNK_SIZE=1200
|
||||
# CHUNK_OVERLAP_SIZE=100
|
||||
|
||||
### Number of parallel processing documents in one patch
|
||||
# MAX_PARALLEL_INSERT=2
|
||||
|
||||
### Max tokens for entity/relations description after merge
|
||||
# MAX_TOKEN_SUMMARY=500
|
||||
### Number of entities/edges to trigger LLM re-summary on merge ( at least 3 is recommented)
|
||||
# FORCE_LLM_SUMMARY_ON_MERGE=6
|
||||
### LLM Configuration
|
||||
ENABLE_LLM_CACHE=true
|
||||
ENABLE_LLM_CACHE_FOR_EXTRACT=true
|
||||
### Time out in seconds for LLM, None for infinite timeout
|
||||
TIMEOUT=240
|
||||
### Some models like o1-mini require temperature to be set to 1
|
||||
TEMPERATURE=0
|
||||
### Max concurrency requests of LLM
|
||||
MAX_ASYNC=4
|
||||
### Max tokens send to LLM for entity relation summaries (less than context size of the model)
|
||||
MAX_TOKENS=32768
|
||||
### LLM Binding type: openai, ollama, lollms
|
||||
LLM_BINDING=openai
|
||||
LLM_MODEL=gpt-4o
|
||||
LLM_BINDING_HOST=https://api.openai.com/v1
|
||||
LLM_BINDING_API_KEY=your_api_key
|
||||
|
||||
### Embedding Configuration
|
||||
### Embedding Binding type: openai, ollama, lollms
|
||||
EMBEDDING_BINDING=ollama
|
||||
EMBEDDING_MODEL=bge-m3:latest
|
||||
EMBEDDING_DIM=1024
|
||||
EMBEDDING_BINDING_API_KEY=your_api_key
|
||||
# If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
|
||||
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||
### Num of chunks send to Embedding in single request
|
||||
# EMBEDDING_BATCH_NUM=32
|
||||
### Max concurrency requests for Embedding
|
||||
# EMBEDDING_FUNC_MAX_ASYNC=16
|
||||
### Maximum tokens sent to Embedding for each chunk (no longer in use?)
|
||||
# MAX_EMBED_TOKENS=8192
|
||||
|
||||
### LLM Configuration
|
||||
### Time out in seconds for LLM, None for infinite timeout
|
||||
TIMEOUT=150
|
||||
### Some models like o1-mini require temperature to be set to 1
|
||||
TEMPERATURE=0.5
|
||||
### Max concurrency requests of LLM
|
||||
MAX_ASYNC=4
|
||||
### Max tokens send to LLM (less than context size of the model)
|
||||
MAX_TOKENS=32768
|
||||
ENABLE_LLM_CACHE=true
|
||||
ENABLE_LLM_CACHE_FOR_EXTRACT=true
|
||||
|
||||
### Ollama example (For local services installed with docker, you can use host.docker.internal as host)
|
||||
LLM_BINDING=ollama
|
||||
LLM_MODEL=mistral-nemo:latest
|
||||
LLM_BINDING_API_KEY=your_api_key
|
||||
LLM_BINDING_HOST=http://localhost:11434
|
||||
|
||||
### OpenAI alike example
|
||||
# LLM_BINDING=openai
|
||||
# LLM_MODEL=gpt-4o
|
||||
# LLM_BINDING_HOST=https://api.openai.com/v1
|
||||
# LLM_BINDING_API_KEY=your_api_key
|
||||
### lollms example
|
||||
# LLM_BINDING=lollms
|
||||
# LLM_MODEL=mistral-nemo:latest
|
||||
# LLM_BINDING_HOST=http://localhost:9600
|
||||
# LLM_BINDING_API_KEY=your_api_key
|
||||
|
||||
### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
|
||||
EMBEDDING_MODEL=bge-m3:latest
|
||||
EMBEDDING_DIM=1024
|
||||
# EMBEDDING_BINDING_API_KEY=your_api_key
|
||||
### ollama example
|
||||
EMBEDDING_BINDING=ollama
|
||||
EMBEDDING_BINDING_HOST=http://localhost:11434
|
||||
### OpenAI alike example
|
||||
# EMBEDDING_BINDING=openai
|
||||
# EMBEDDING_BINDING_HOST=https://api.openai.com/v1
|
||||
### Lollms example
|
||||
# EMBEDDING_BINDING=lollms
|
||||
# EMBEDDING_BINDING_HOST=http://localhost:9600
|
||||
|
||||
### Optional for Azure (LLM_BINDING_HOST, LLM_BINDING_API_KEY take priority)
|
||||
# AZURE_OPENAI_API_VERSION=2024-08-01-preview
|
||||
# AZURE_OPENAI_DEPLOYMENT=gpt-4o
|
||||
# AZURE_OPENAI_API_KEY=your_api_key
|
||||
# AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
|
||||
|
||||
# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
|
||||
# AZURE_EMBEDDING_API_VERSION=2023-05-15
|
||||
|
||||
### Data storage selection
|
||||
LIGHTRAG_KV_STORAGE=JsonKVStorage
|
||||
LIGHTRAG_VECTOR_STORAGE=NanoVectorDBStorage
|
||||
LIGHTRAG_GRAPH_STORAGE=NetworkXStorage
|
||||
LIGHTRAG_DOC_STATUS_STORAGE=JsonDocStatusStorage
|
||||
# LIGHTRAG_KV_STORAGE=PGKVStorage
|
||||
# LIGHTRAG_VECTOR_STORAGE=PGVectorStorage
|
||||
# LIGHTRAG_DOC_STATUS_STORAGE=PGDocStatusStorage
|
||||
# LIGHTRAG_GRAPH_STORAGE=Neo4JStorage
|
||||
|
||||
### TiDB Configuration (Deprecated)
|
||||
# TIDB_HOST=localhost
|
||||
@@ -135,22 +122,22 @@ POSTGRES_MAX_CONNECTIONS=12
|
||||
### separating all data from difference Lightrag instances(deprecating)
|
||||
# POSTGRES_WORKSPACE=default
|
||||
|
||||
### Neo4j Configuration
|
||||
NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
|
||||
NEO4J_USERNAME=neo4j
|
||||
NEO4J_PASSWORD='your_password'
|
||||
|
||||
### Independent AGM Configuration(not for AMG embedded in PostreSQL)
|
||||
AGE_POSTGRES_DB=
|
||||
AGE_POSTGRES_USER=
|
||||
AGE_POSTGRES_PASSWORD=
|
||||
AGE_POSTGRES_HOST=
|
||||
# AGE_POSTGRES_DB=
|
||||
# AGE_POSTGRES_USER=
|
||||
# AGE_POSTGRES_PASSWORD=
|
||||
# AGE_POSTGRES_HOST=
|
||||
# AGE_POSTGRES_PORT=8529
|
||||
|
||||
# AGE Graph Name(apply to PostgreSQL and independent AGM)
|
||||
### AGE_GRAPH_NAME is precated
|
||||
# AGE_GRAPH_NAME=lightrag
|
||||
|
||||
### Neo4j Configuration
|
||||
NEO4J_URI=neo4j+s://xxxxxxxx.databases.neo4j.io
|
||||
NEO4J_USERNAME=neo4j
|
||||
NEO4J_PASSWORD='your_password'
|
||||
|
||||
### MongoDB Configuration
|
||||
MONGO_URI=mongodb://root:root@localhost:27017/
|
||||
MONGO_DATABASE=LightRAG
|
||||
@@ -170,14 +157,3 @@ QDRANT_URL=http://localhost:16333
|
||||
|
||||
### Redis
|
||||
REDIS_URI=redis://localhost:6379
|
||||
|
||||
### For JWT Auth
|
||||
# AUTH_ACCOUNTS='admin:admin123,user1:pass456'
|
||||
# TOKEN_SECRET=Your-Key-For-LightRAG-API-Server
|
||||
# TOKEN_EXPIRE_HOURS=48
|
||||
# GUEST_TOKEN_EXPIRE_HOURS=24
|
||||
# JWT_ALGORITHM=HS256
|
||||
|
||||
### API-Key to access LightRAG Server API
|
||||
# LIGHTRAG_API_KEY=your-secure-api-key-here
|
||||
# WHITELIST_PATHS=/health,/api/*
|
||||
|
Reference in New Issue
Block a user