Settign LLM cache option for entity extraction from env

This commit is contained in:
yangdx
2025-03-07 15:03:33 +08:00
parent 3286a0d57f
commit c2f861fba9
3 changed files with 9 additions and 2 deletions

View File

@@ -50,6 +50,7 @@
# MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary
# SUMMARY_LANGUAGE=English
# MAX_EMBED_TOKENS=8192
# ENABLE_LLM_CACHE_FOR_EXTRACT=false # Enable LLM cache for entity extraction, defaults to false
### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
LLM_BINDING=ollama

View File

@@ -223,6 +223,9 @@ LightRAG supports binding to various LLM/Embedding backends:
Use environment variables `LLM_BINDING` or CLI argument `--llm-binding` to select LLM backend type. Use environment variables `EMBEDDING_BINDING` or CLI argument `--embedding-binding` to select LLM backend type.
### Entity Extraction Configuration
- ENABLE_LLM_CACHE_FOR_EXTRACT: Enable LLM cache for entity extraction (default: false)
### Storage Types Supported
LightRAG uses 4 types of storage for difference purposes:

View File

@@ -50,6 +50,9 @@ from .auth import auth_handler
# This update allows the user to put a different.env file for each lightrag folder
load_dotenv(".env", override=True)
# Read entity extraction cache config
enable_llm_cache = os.getenv("ENABLE_LLM_CACHE_FOR_EXTRACT", "false").lower() == "true"
# Initialize config parser
config = configparser.ConfigParser()
config.read("config.ini")
@@ -323,7 +326,7 @@ def create_app(args):
vector_db_storage_cls_kwargs={
"cosine_better_than_threshold": args.cosine_threshold
},
enable_llm_cache_for_entity_extract=False, # set to True for debuging to reduce llm fee
enable_llm_cache_for_entity_extract=enable_llm_cache, # Read from environment variable
embedding_cache_config={
"enabled": True,
"similarity_threshold": 0.95,
@@ -352,7 +355,7 @@ def create_app(args):
vector_db_storage_cls_kwargs={
"cosine_better_than_threshold": args.cosine_threshold
},
enable_llm_cache_for_entity_extract=False, # set to True for debuging to reduce llm fee
enable_llm_cache_for_entity_extract=enable_llm_cache, # Read from environment variable
embedding_cache_config={
"enabled": True,
"similarity_threshold": 0.95,