Refactor LLM cache handling and entity extraction

- Removed custom LLM function in entity extraction
- Simplified cache handling logic
- Added `force_llm_cache` parameter
- Updated cache handling conditions
This commit is contained in:
yangdx
2025-02-02 01:28:46 +08:00
parent 6c7d7c25d3
commit b45ae1567c
2 changed files with 4 additions and 32 deletions

View File

@@ -484,10 +484,10 @@ def dequantize_embedding(
async def handle_cache(
hashing_kv, args_hash, prompt, mode="default", cache_type=None, llm=None
hashing_kv, args_hash, prompt, mode="default", cache_type=None, force_llm_cache=False
):
"""Generic cache handling function"""
if hashing_kv is None or not hashing_kv.global_config.get("enable_llm_cache"):
if hashing_kv is None or not (force_llm_cache or hashing_kv.global_config.get("enable_llm_cache")):
return None, None, None, None
if mode != "default":
@@ -513,9 +513,7 @@ async def handle_cache(
similarity_threshold=embedding_cache_config["similarity_threshold"],
mode=mode,
use_llm_check=use_llm_check,
llm_func=llm
if (use_llm_check and llm is not None)
else (llm_model_func if use_llm_check else None),
llm_func=llm_model_func if use_llm_check else None,
original_prompt=prompt if use_llm_check else None,
cache_type=cache_type,
)