diff --git a/lightrag/llm.py b/lightrag/llm.py index 53626b76..72af880e 100644 --- a/lightrag/llm.py +++ b/lightrag/llm.py @@ -616,6 +616,7 @@ async def jina_embedding( data_list = await fetch_data(url, headers, data) return np.array([dp["embedding"] for dp in data_list]) + @wrap_embedding_func_with_attrs(embedding_dim=2048, max_token_size=512) @retry( stop=stop_after_attempt(3), diff --git a/lightrag/operate.py b/lightrag/operate.py index 72734867..468f4b2f 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -1103,7 +1103,6 @@ async def naive_query( response = await use_model_func( query, system_prompt=sys_prompt, - mode=query_param.mode, ) if len(response) > len(sys_prompt): diff --git a/lightrag/utils.py b/lightrag/utils.py index 49a7b498..32d5c87f 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -383,7 +383,7 @@ async def get_best_cached_response( except Exception as e: # Catch all possible exceptions logger.warning(f"LLM similarity check failed: {e}") return None # Return None directly when LLM check fails - + prompt_display = ( best_prompt[:50] + "..." if len(best_prompt) > 50 else best_prompt )