Merge branch 'HKUDS:main' into main

This commit is contained in:
Saifeddine ALOUI
2025-03-08 23:06:53 +01:00
committed by GitHub
7 changed files with 97 additions and 52 deletions

View File

@@ -223,6 +223,11 @@ LightRAG supports binding to various LLM/Embedding backends:
Use environment variables `LLM_BINDING` or CLI argument `--llm-binding` to select LLM backend type. Use environment variables `EMBEDDING_BINDING` or CLI argument `--embedding-binding` to select LLM backend type.
### Entity Extraction Configuration
* ENABLE_LLM_CACHE_FOR_EXTRACT: Enable LLM cache for entity extraction (default: false)
It's very common to set `ENABLE_LLM_CACHE_FOR_EXTRACT` to true for test environment to reduce the cost of LLM calls.
### Storage Types Supported
LightRAG uses 4 types of storage for difference purposes:

View File

@@ -50,6 +50,9 @@ from .auth import auth_handler
# This update allows the user to put a different.env file for each lightrag folder
load_dotenv(".env", override=True)
# Read entity extraction cache config
enable_llm_cache = os.getenv("ENABLE_LLM_CACHE_FOR_EXTRACT", "false").lower() == "true"
# Initialize config parser
config = configparser.ConfigParser()
config.read("config.ini")
@@ -323,7 +326,7 @@ def create_app(args):
vector_db_storage_cls_kwargs={
"cosine_better_than_threshold": args.cosine_threshold
},
enable_llm_cache_for_entity_extract=False, # set to True for debuging to reduce llm fee
enable_llm_cache_for_entity_extract=enable_llm_cache, # Read from environment variable
embedding_cache_config={
"enabled": True,
"similarity_threshold": 0.95,
@@ -352,7 +355,7 @@ def create_app(args):
vector_db_storage_cls_kwargs={
"cosine_better_than_threshold": args.cosine_threshold
},
enable_llm_cache_for_entity_extract=False, # set to True for debuging to reduce llm fee
enable_llm_cache_for_entity_extract=enable_llm_cache, # Read from environment variable
embedding_cache_config={
"enabled": True,
"similarity_threshold": 0.95,