update README.md and .env.example

This commit is contained in:
yangdx
2025-02-23 13:26:38 +08:00
parent 637d6756b8
commit 460bc3a6aa
2 changed files with 188 additions and 202 deletions

View File

@@ -16,70 +16,65 @@
# WORKING_DIR=<absolute_path_for_working_dir>
# INPUT_DIR=<absolute_path_for_doc_input_dir>
### Logging level
LOG_LEVEL=INFO
VERBOSE=False
### Optional Timeout
TIMEOUT=300
# Ollama Emulating Model Tag
### Ollama Emulating Model Tag
# OLLAMA_EMULATING_MODEL_TAG=latest
### RAG Configuration
MAX_ASYNC=4
EMBEDDING_DIM=1024
MAX_EMBED_TOKENS=8192
### Settings relative to query
HISTORY_TURNS=3
COSINE_THRESHOLD=0.2
TOP_K=60
MAX_TOKEN_TEXT_CHUNK=4000
MAX_TOKEN_RELATION_DESC=4000
MAX_TOKEN_ENTITY_DESC=4000
### Settings relative to indexing
CHUNK_SIZE=1200
CHUNK_OVERLAP_SIZE=100
MAX_TOKENS=32768
MAX_TOKEN_SUMMARY=500
SUMMARY_LANGUAGE=English
### Logging level
# LOG_LEVEL=INFO
# VERBOSE=False
### LLM Configuration (Use valid host. For local services, you can use host.docker.internal)
### Max async calls for LLM
# MAX_ASYNC=4
### Optional Timeout for LLM
# TIMEOUT=None # Time out in seconds, None for infinite timeout
### Settings for RAG query
# HISTORY_TURNS=3
# COSINE_THRESHOLD=0.2
# TOP_K=60
# MAX_TOKEN_TEXT_CHUNK=4000
# MAX_TOKEN_RELATION_DESC=4000
# MAX_TOKEN_ENTITY_DESC=4000
### Settings for document indexing
# CHUNK_SIZE=1200
# CHUNK_OVERLAP_SIZE=100
# MAX_TOKENS=32768 # Max tokens send to LLM for summarization
# MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary
# SUMMARY_LANGUAGE=English
# MAX_EMBED_TOKENS=8192
### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
LLM_MODEL=mistral-nemo:latest
LLM_BINDING_API_KEY=your_api_key
### Ollama example
LLM_BINDING=ollama
LLM_BINDING_HOST=http://host.docker.internal:11434
LLM_MODEL=mistral-nemo:latest
LLM_BINDING_HOST=http://localhost:11434
### OpenAI alike example
# LLM_BINDING=openai
# LLM_MODEL=deepseek-chat
# LLM_BINDING_HOST=https://api.deepseek.com
# LLM_BINDING_API_KEY=your_api_key
### for OpenAI LLM (LLM_BINDING_API_KEY take priority)
# OPENAI_API_KEY=your_api_key
### Lollms example
# LLM_BINDING_HOST=https://api.openai.com/v1
### lollms example
# LLM_BINDING=lollms
# LLM_BINDING_HOST=http://host.docker.internal:9600
# LLM_MODEL=mistral-nemo:latest
# LLM_BINDING_HOST=http://localhost:9600
### Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)
# Ollama example
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://host.docker.internal:11434
### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
EMBEDDING_MODEL=bge-m3:latest
EMBEDDING_DIM=1024
# EMBEDDING_BINDING_API_KEY=your_api_key
### ollama example
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://localhost:11434
### OpenAI alike example
# EMBEDDING_BINDING=openai
# LLM_BINDING_HOST=https://api.openai.com/v1
### Lollms example
# EMBEDDING_BINDING=lollms
# EMBEDDING_BINDING_HOST=http://host.docker.internal:9600
# EMBEDDING_MODEL=bge-m3:latest
# EMBEDDING_BINDING_HOST=http://localhost:9600
### Optional for Azure (LLM_BINDING_HOST, LLM_BINDING_API_KEY take priority)
# AZURE_OPENAI_API_VERSION=2024-08-01-preview
# AZURE_OPENAI_DEPLOYMENT=gpt-4o
# AZURE_OPENAI_API_KEY=myapikey
# AZURE_OPENAI_API_KEY=your_api_key
# AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
@@ -138,4 +133,4 @@ MONGODB_GRAPH=false # deprecated (keep for backward compatibility)
### Qdrant
QDRANT_URL=http://localhost:16333
QDRANT_API_KEY=your-api-key # 可选
# QDRANT_API_KEY=your-api-key