Fix cache persistence bugs

This commit is contained in:
yangdx
2025-04-16 01:24:59 +08:00
parent ca955cee5d
commit 051e632ab3
2 changed files with 89 additions and 83 deletions

View File

@@ -859,6 +859,7 @@ async def kg_query(
.strip()
)
if hashing_kv.global_config.get("enable_llm_cache"):
# Save to cache
await save_to_cache(
hashing_kv,
@@ -873,6 +874,7 @@ async def kg_query(
cache_type="query",
),
)
return response
@@ -989,6 +991,7 @@ async def extract_keywords_only(
"high_level_keywords": hl_keywords,
"low_level_keywords": ll_keywords,
}
if hashing_kv.global_config.get("enable_llm_cache"):
await save_to_cache(
hashing_kv,
CacheData(
@@ -1002,6 +1005,7 @@ async def extract_keywords_only(
cache_type="keywords",
),
)
return hl_keywords, ll_keywords
@@ -1205,6 +1209,7 @@ async def mix_kg_vector_query(
.strip()
)
if hashing_kv.global_config.get("enable_llm_cache"):
# 7. Save cache - Only cache after collecting complete response
await save_to_cache(
hashing_kv,
@@ -1973,6 +1978,7 @@ async def naive_query(
.strip()
)
if hashing_kv.global_config.get("enable_llm_cache"):
# Save to cache
await save_to_cache(
hashing_kv,
@@ -2121,6 +2127,7 @@ async def kg_query_with_keywords(
.strip()
)
if hashing_kv.global_config.get("enable_llm_cache"):
# 7. Save cache - 只有在收集完整响应后才缓存
await save_to_cache(
hashing_kv,

View File

@@ -731,9 +731,6 @@ async def save_to_cache(hashing_kv, cache_data: CacheData):
hashing_kv: The key-value storage for caching
cache_data: The cache data to save
"""
if not hashing_kv.global_config.get("enable_llm_cache"):
return
# Skip if storage is None or content is a streaming response
if hashing_kv is None or not cache_data.content:
return
@@ -776,6 +773,8 @@ async def save_to_cache(hashing_kv, cache_data: CacheData):
"original_prompt": cache_data.prompt,
}
logger.info(f" == LLM cache == saving {cache_data.mode}: {cache_data.args_hash}")
# Only upsert if there's actual new content
await hashing_kv.upsert({cache_data.mode: mode_cache})
@@ -1314,8 +1313,7 @@ async def use_llm_func_with_cache(
res: str = await use_llm_func(input_text, **kwargs)
# Save to cache
logger.info(f" == LLM cache == saving {arg_hash}")
if llm_response_cache.global_config.get("enable_llm_cache_for_entity_extract"):
await save_to_cache(
llm_response_cache,
CacheData(
@@ -1325,6 +1323,7 @@ async def use_llm_func_with_cache(
cache_type=cache_type,
),
)
return res
# When cache is disabled, directly call LLM