diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 11c333ec..533be818 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -725,10 +725,7 @@ def create_app(args): from lightrag.llm.ollama import ollama_model_complete, ollama_embed if args.llm_binding == "openai" or args.embedding_binding == "openai": from lightrag.llm.openai import openai_complete_if_cache, openai_embed - if ( - args.llm_binding == "azure_openai" - or args.embedding_binding == "azure_openai" - ): + if args.llm_binding == "azure_openai" or args.embedding_binding == "azure_openai": from lightrag.llm.azure_openai import ( azure_openai_complete_if_cache, azure_openai_embed, diff --git a/lightrag/operate.py b/lightrag/operate.py index 74ffa462..154b810e 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -1543,7 +1543,9 @@ async def naive_query( sys_prompt_temp = PROMPTS["naive_rag_response"] sys_prompt = sys_prompt_temp.format( - content_data=section, response_type=query_param.response_type, history=history_context + content_data=section, + response_type=query_param.response_type, + history=history_context, ) if query_param.only_need_prompt: