diff --git a/README.md b/README.md index 3ccdef08..92a32703 100644 --- a/README.md +++ b/README.md @@ -171,7 +171,7 @@ rag = LightRAG(working_dir=WORKING_DIR) # Create query parameters query_param = QueryParam( - mode="hybrid", # or other mode: "local", "global", "hybrid" + mode="hybrid", # or other mode: "local", "global", "hybrid", "mix" and "naive" ) # Example 1: Using the default system prompt @@ -184,11 +184,20 @@ print(response_default) # Example 2: Using a custom prompt custom_prompt = """ You are an expert assistant in environmental science. Provide detailed and structured answers with examples. +---Conversation History--- +{history} + +---Knowledge Base--- +{context_data} + +---Response Rules--- + +- Target format and length: {response_type} """ response_custom = rag.query( "What are the primary benefits of renewable energy?", param=query_param, - prompt=custom_prompt # Pass the custom prompt + system_prompt=custom_prompt # Pass the custom prompt ) print(response_custom) ``` diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 6220406b..bf1c02d2 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -984,7 +984,10 @@ class LightRAG: await self._insert_done() def query( - self, query: str, param: QueryParam = QueryParam(), prompt: str | None = None + self, + query: str, + param: QueryParam = QueryParam(), + system_prompt: str | None = None, ) -> str | Iterator[str]: """ Perform a sync query. @@ -999,13 +1002,13 @@ class LightRAG: """ loop = always_get_an_event_loop() - return loop.run_until_complete(self.aquery(query, param, prompt)) # type: ignore + return loop.run_until_complete(self.aquery(query, param, system_prompt)) # type: ignore async def aquery( self, query: str, param: QueryParam = QueryParam(), - prompt: str | None = None, + system_prompt: str | None = None, ) -> str | AsyncIterator[str]: """ Perform a async query. @@ -1037,7 +1040,7 @@ class LightRAG: global_config=asdict(self), embedding_func=self.embedding_func, ), - prompt=prompt, + system_prompt=system_prompt, ) elif param.mode == "naive": response = await naive_query( @@ -1056,6 +1059,7 @@ class LightRAG: global_config=asdict(self), embedding_func=self.embedding_func, ), + system_prompt=system_prompt, ) elif param.mode == "mix": response = await mix_kg_vector_query( @@ -1077,6 +1081,7 @@ class LightRAG: global_config=asdict(self), embedding_func=self.embedding_func, ), + system_prompt=system_prompt, ) else: raise ValueError(f"Unknown mode {param.mode}") diff --git a/lightrag/operate.py b/lightrag/operate.py index 678194bd..727c65f7 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -613,7 +613,7 @@ async def kg_query( query_param: QueryParam, global_config: dict[str, str], hashing_kv: BaseKVStorage | None = None, - prompt: str | None = None, + system_prompt: str | None = None, ) -> str: # Handle cache use_model_func = global_config["llm_model_func"] @@ -677,7 +677,7 @@ async def kg_query( query_param.conversation_history, query_param.history_turns ) - sys_prompt_temp = prompt if prompt else PROMPTS["rag_response"] + sys_prompt_temp = system_prompt if system_prompt else PROMPTS["rag_response"] sys_prompt = sys_prompt_temp.format( context_data=context, response_type=query_param.response_type, @@ -828,6 +828,7 @@ async def mix_kg_vector_query( query_param: QueryParam, global_config: dict[str, str], hashing_kv: BaseKVStorage | None = None, + system_prompt: str | None = None, ) -> str | AsyncIterator[str]: """ Hybrid retrieval implementation combining knowledge graph and vector search. @@ -962,15 +963,19 @@ async def mix_kg_vector_query( return {"kg_context": kg_context, "vector_context": vector_context} # 5. Construct hybrid prompt - sys_prompt = PROMPTS["mix_rag_response"].format( - kg_context=kg_context - if kg_context - else "No relevant knowledge graph information found", - vector_context=vector_context - if vector_context - else "No relevant text information found", - response_type=query_param.response_type, - history=history_context, + sys_prompt = ( + system_prompt + if system_prompt + else PROMPTS["mix_rag_response"].format( + kg_context=kg_context + if kg_context + else "No relevant knowledge graph information found", + vector_context=vector_context + if vector_context + else "No relevant text information found", + response_type=query_param.response_type, + history=history_context, + ) ) if query_param.only_need_prompt: @@ -1599,6 +1604,7 @@ async def naive_query( query_param: QueryParam, global_config: dict[str, str], hashing_kv: BaseKVStorage | None = None, + system_prompt: str | None = None, ) -> str | AsyncIterator[str]: # Handle cache use_model_func = global_config["llm_model_func"] @@ -1651,7 +1657,7 @@ async def naive_query( query_param.conversation_history, query_param.history_turns ) - sys_prompt_temp = PROMPTS["naive_rag_response"] + sys_prompt_temp = system_prompt if system_prompt else PROMPTS["naive_rag_response"] sys_prompt = sys_prompt_temp.format( content_data=section, response_type=query_param.response_type,