diff --git a/env.example b/env.example index cd92abc8..955741ef 100644 --- a/env.example +++ b/env.example @@ -50,6 +50,7 @@ # MAX_TOKEN_SUMMARY=500 # Max tokens for entity or relations summary # SUMMARY_LANGUAGE=English # MAX_EMBED_TOKENS=8192 +# ENABLE_LLM_CACHE_FOR_EXTRACT=false # Enable LLM cache for entity extraction, defaults to false ### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) LLM_BINDING=ollama diff --git a/lightrag/api/README.md b/lightrag/api/README.md index f9a2a197..7a07ddb8 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -223,6 +223,11 @@ LightRAG supports binding to various LLM/Embedding backends: Use environment variables `LLM_BINDING` or CLI argument `--llm-binding` to select LLM backend type. Use environment variables `EMBEDDING_BINDING` or CLI argument `--embedding-binding` to select LLM backend type. +### Entity Extraction Configuration +* ENABLE_LLM_CACHE_FOR_EXTRACT: Enable LLM cache for entity extraction (default: false) + +It's very common to set `ENABLE_LLM_CACHE_FOR_EXTRACT` to true for test environment to reduce the cost of LLM calls. + ### Storage Types Supported LightRAG uses 4 types of storage for difference purposes: diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 2891b542..5df4f765 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -50,6 +50,9 @@ from .auth import auth_handler # This update allows the user to put a different.env file for each lightrag folder load_dotenv(".env", override=True) +# Read entity extraction cache config +enable_llm_cache = os.getenv("ENABLE_LLM_CACHE_FOR_EXTRACT", "false").lower() == "true" + # Initialize config parser config = configparser.ConfigParser() config.read("config.ini") @@ -323,7 +326,7 @@ def create_app(args): vector_db_storage_cls_kwargs={ "cosine_better_than_threshold": args.cosine_threshold }, - enable_llm_cache_for_entity_extract=False, # set to True for debuging to reduce llm fee + enable_llm_cache_for_entity_extract=enable_llm_cache, # Read from environment variable embedding_cache_config={ "enabled": True, "similarity_threshold": 0.95, @@ -352,7 +355,7 @@ def create_app(args): vector_db_storage_cls_kwargs={ "cosine_better_than_threshold": args.cosine_threshold }, - enable_llm_cache_for_entity_extract=False, # set to True for debuging to reduce llm fee + enable_llm_cache_for_entity_extract=enable_llm_cache, # Read from environment variable embedding_cache_config={ "enabled": True, "similarity_threshold": 0.95,