Add ENABLE_LLM_CACHE env support

This commit is contained in:
yangdx
2025-04-09 12:42:17 +08:00
parent 567c3448a7
commit 37007244c2
4 changed files with 14 additions and 5 deletions

View File

@@ -40,7 +40,6 @@ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
# MAX_TOKEN_ENTITY_DESC=4000
### Settings for document indexing
ENABLE_LLM_CACHE_FOR_EXTRACT=true
SUMMARY_LANGUAGE=English
# CHUNK_SIZE=1200
# CHUNK_OVERLAP_SIZE=100
@@ -64,6 +63,8 @@ TEMPERATURE=0.5
MAX_ASYNC=4
### Max tokens send to LLM (less than context size of the model)
MAX_TOKENS=32768
ENABLE_LLM_CACHE=true
ENABLE_LLM_CACHE_FOR_EXTRACT=true
### Ollama example (For local services installed with docker, you can use host.docker.internal as host)
LLM_BINDING=ollama