Unify llm_response_cache and hashing_kv, prevent creating an independent hashing_kv.

This commit is contained in:
yangdx
2025-03-09 22:15:26 +08:00
parent e47883d872
commit bc42afe7b6
5 changed files with 30 additions and 96 deletions

View File

@@ -410,7 +410,6 @@ async def extract_entities(
_prompt,
"default",
cache_type="extract",
force_llm_cache=True,
)
if cached_return:
logger.debug(f"Found cache for {arg_hash}")
@@ -432,6 +431,7 @@ async def extract_entities(
cache_type="extract",
),
)
logger.info(f"Extract: saved cache for {arg_hash}")
return res
if history_messages: