From 0c3b7541081f98b254699c5b7da313663119b6d0 Mon Sep 17 00:00:00 2001 From: zrguo Date: Tue, 11 Feb 2025 11:42:46 +0800 Subject: [PATCH] Fix bugs --- examples/lightrag_openai_demo.py | 3 ++- lightrag/operate.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/lightrag_openai_demo.py b/examples/lightrag_openai_demo.py index 7a43a710..c5393fc8 100644 --- a/examples/lightrag_openai_demo.py +++ b/examples/lightrag_openai_demo.py @@ -1,7 +1,7 @@ import os from lightrag import LightRAG, QueryParam -from lightrag.llm.openai import gpt_4o_mini_complete +from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed WORKING_DIR = "./dickens" @@ -10,6 +10,7 @@ if not os.path.exists(WORKING_DIR): rag = LightRAG( working_dir=WORKING_DIR, + embedding_func=openai_embed, llm_model_func=gpt_4o_mini_complete, # llm_model_func=gpt_4o_complete ) diff --git a/lightrag/operate.py b/lightrag/operate.py index 811b4194..db7f59a5 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -1504,7 +1504,7 @@ async def naive_query( use_model_func = global_config["llm_model_func"] args_hash = compute_args_hash(query_param.mode, query, cache_type="query") cached_response, quantized, min_val, max_val = await handle_cache( - hashing_kv, args_hash, query, "default", cache_type="query" + hashing_kv, args_hash, query, query_param.mode, cache_type="query" ) if cached_response is not None: return cached_response