Settign LLM cache option for entity extraction from env

This commit is contained in:
yangdx
2025-03-07 15:03:33 +08:00
parent 3286a0d57f
commit c2f861fba9
3 changed files with 9 additions and 2 deletions

View File

@@ -50,6 +50,7 @@
# MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary
# SUMMARY_LANGUAGE=English
# MAX_EMBED_TOKENS=8192
# ENABLE_LLM_CACHE_FOR_EXTRACT=false # Enable LLM cache for entity extraction, defaults to false
### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
LLM_BINDING=ollama