fix(llm): hashing_kv初始化修复

-hybrid模式对hashing_kv的依赖不止global_config,干脆复用llm_response_cache的初始化结构
This commit is contained in:
GG
2024-12-17 16:44:42 +08:00
parent a3bf3a58f0
commit 2d048b5eb0

View File

@@ -252,7 +252,14 @@ class LightRAG:
self.llm_model_func = limit_async_func_call(self.llm_model_max_async)(
partial(
self.llm_model_func,
hashing_kv=self.llm_response_cache,
hashing_kv=self.llm_response_cache
if self.llm_response_cache
and hasattr(self.llm_response_cache, "global_config")
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
embedding_func=None,
),
**self.llm_model_kwargs,
)
)
@@ -515,7 +522,14 @@ class LightRAG:
self.text_chunks,
param,
asdict(self),
hashing_kv=self.llm_response_cache,
hashing_kv=self.llm_response_cache
if self.llm_response_cache
and hasattr(self.llm_response_cache, "global_config")
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
embedding_func=None,
),
)
elif param.mode == "naive":
response = await naive_query(
@@ -524,7 +538,14 @@ class LightRAG:
self.text_chunks,
param,
asdict(self),
hashing_kv=self.llm_response_cache,
hashing_kv=self.llm_response_cache
if self.llm_response_cache
and hasattr(self.llm_response_cache, "global_config")
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
embedding_func=None,
),
)
else:
raise ValueError(f"Unknown mode {param.mode}")