Fix bugs
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -10,6 +10,7 @@ if not os.path.exists(WORKING_DIR):
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
embedding_func=openai_embed,
|
||||
llm_model_func=gpt_4o_mini_complete,
|
||||
# llm_model_func=gpt_4o_complete
|
||||
)
|
||||
|
@@ -1504,7 +1504,7 @@ async def naive_query(
|
||||
use_model_func = global_config["llm_model_func"]
|
||||
args_hash = compute_args_hash(query_param.mode, query, cache_type="query")
|
||||
cached_response, quantized, min_val, max_val = await handle_cache(
|
||||
hashing_kv, args_hash, query, "default", cache_type="query"
|
||||
hashing_kv, args_hash, query, query_param.mode, cache_type="query"
|
||||
)
|
||||
if cached_response is not None:
|
||||
return cached_response
|
||||
|
Reference in New Issue
Block a user