Fix linting errors
This commit is contained in:
@@ -756,10 +756,7 @@ class LightRAG:
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
def query_with_separate_keyword_extraction(
|
def query_with_separate_keyword_extraction(
|
||||||
self,
|
self, query: str, prompt: str, param: QueryParam = QueryParam()
|
||||||
query: str,
|
|
||||||
prompt: str,
|
|
||||||
param: QueryParam = QueryParam()
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
1. Extract keywords from the 'query' using new function in operate.py.
|
1. Extract keywords from the 'query' using new function in operate.py.
|
||||||
@@ -767,13 +764,12 @@ class LightRAG:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
loop = always_get_an_event_loop()
|
loop = always_get_an_event_loop()
|
||||||
return loop.run_until_complete(self.aquery_with_separate_keyword_extraction(query, prompt, param))
|
return loop.run_until_complete(
|
||||||
|
self.aquery_with_separate_keyword_extraction(query, prompt, param)
|
||||||
|
)
|
||||||
|
|
||||||
async def aquery_with_separate_keyword_extraction(
|
async def aquery_with_separate_keyword_extraction(
|
||||||
self,
|
self, query: str, prompt: str, param: QueryParam = QueryParam()
|
||||||
query: str,
|
|
||||||
prompt: str,
|
|
||||||
param: QueryParam = QueryParam()
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
1. Calls extract_keywords_only to get HL/LL keywords from 'query'.
|
1. Calls extract_keywords_only to get HL/LL keywords from 'query'.
|
||||||
@@ -788,15 +784,16 @@ class LightRAG:
|
|||||||
text=query,
|
text=query,
|
||||||
param=param,
|
param=param,
|
||||||
global_config=asdict(self),
|
global_config=asdict(self),
|
||||||
hashing_kv=self.llm_response_cache or self.key_string_value_json_storage_cls(
|
hashing_kv=self.llm_response_cache
|
||||||
|
or self.key_string_value_json_storage_cls(
|
||||||
namespace="llm_response_cache",
|
namespace="llm_response_cache",
|
||||||
global_config=asdict(self),
|
global_config=asdict(self),
|
||||||
embedding_func=None,
|
embedding_func=None,
|
||||||
)
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
param.hl_keywords=hl_keywords,
|
param.hl_keywords = (hl_keywords,)
|
||||||
param.ll_keywords=ll_keywords,
|
param.ll_keywords = (ll_keywords,)
|
||||||
|
|
||||||
# ---------------------
|
# ---------------------
|
||||||
# STEP 2: Final Query Logic
|
# STEP 2: Final Query Logic
|
||||||
@@ -817,7 +814,8 @@ class LightRAG:
|
|||||||
param,
|
param,
|
||||||
asdict(self),
|
asdict(self),
|
||||||
hashing_kv=self.llm_response_cache
|
hashing_kv=self.llm_response_cache
|
||||||
if self.llm_response_cache and hasattr(self.llm_response_cache, "global_config")
|
if self.llm_response_cache
|
||||||
|
and hasattr(self.llm_response_cache, "global_config")
|
||||||
else self.key_string_value_json_storage_cls(
|
else self.key_string_value_json_storage_cls(
|
||||||
namespace="llm_response_cache",
|
namespace="llm_response_cache",
|
||||||
global_config=asdict(self),
|
global_config=asdict(self),
|
||||||
@@ -832,7 +830,8 @@ class LightRAG:
|
|||||||
param,
|
param,
|
||||||
asdict(self),
|
asdict(self),
|
||||||
hashing_kv=self.llm_response_cache
|
hashing_kv=self.llm_response_cache
|
||||||
if self.llm_response_cache and hasattr(self.llm_response_cache, "global_config")
|
if self.llm_response_cache
|
||||||
|
and hasattr(self.llm_response_cache, "global_config")
|
||||||
else self.key_string_value_json_storage_cls(
|
else self.key_string_value_json_storage_cls(
|
||||||
namespace="llm_response_cache",
|
namespace="llm_response_cache",
|
||||||
global_config=asdict(self),
|
global_config=asdict(self),
|
||||||
@@ -850,7 +849,8 @@ class LightRAG:
|
|||||||
param,
|
param,
|
||||||
asdict(self),
|
asdict(self),
|
||||||
hashing_kv=self.llm_response_cache
|
hashing_kv=self.llm_response_cache
|
||||||
if self.llm_response_cache and hasattr(self.llm_response_cache, "global_config")
|
if self.llm_response_cache
|
||||||
|
and hasattr(self.llm_response_cache, "global_config")
|
||||||
else self.key_string_value_json_storage_cls(
|
else self.key_string_value_json_storage_cls(
|
||||||
namespace="llm_response_cache",
|
namespace="llm_response_cache",
|
||||||
global_config=asdict(self),
|
global_config=asdict(self),
|
||||||
|
@@ -680,6 +680,7 @@ async def kg_query(
|
|||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
async def kg_query_with_keywords(
|
async def kg_query_with_keywords(
|
||||||
query: str,
|
query: str,
|
||||||
knowledge_graph_inst: BaseGraphStorage,
|
knowledge_graph_inst: BaseGraphStorage,
|
||||||
@@ -717,7 +718,9 @@ async def kg_query_with_keywords(
|
|||||||
|
|
||||||
# If neither has any keywords, you could handle that logic here.
|
# If neither has any keywords, you could handle that logic here.
|
||||||
if not hl_keywords and not ll_keywords:
|
if not hl_keywords and not ll_keywords:
|
||||||
logger.warning("No keywords found in query_param. Could default to global mode or fail.")
|
logger.warning(
|
||||||
|
"No keywords found in query_param. Could default to global mode or fail."
|
||||||
|
)
|
||||||
return PROMPTS["fail_response"]
|
return PROMPTS["fail_response"]
|
||||||
if not ll_keywords and query_param.mode in ["local", "hybrid"]:
|
if not ll_keywords and query_param.mode in ["local", "hybrid"]:
|
||||||
logger.warning("low_level_keywords is empty, switching to global mode.")
|
logger.warning("low_level_keywords is empty, switching to global mode.")
|
||||||
@@ -727,8 +730,16 @@ async def kg_query_with_keywords(
|
|||||||
query_param.mode = "local"
|
query_param.mode = "local"
|
||||||
|
|
||||||
# Flatten low-level and high-level keywords if needed
|
# Flatten low-level and high-level keywords if needed
|
||||||
ll_keywords_flat = [item for sublist in ll_keywords for item in sublist] if any(isinstance(i, list) for i in ll_keywords) else ll_keywords
|
ll_keywords_flat = (
|
||||||
hl_keywords_flat = [item for sublist in hl_keywords for item in sublist] if any(isinstance(i, list) for i in hl_keywords) else hl_keywords
|
[item for sublist in ll_keywords for item in sublist]
|
||||||
|
if any(isinstance(i, list) for i in ll_keywords)
|
||||||
|
else ll_keywords
|
||||||
|
)
|
||||||
|
hl_keywords_flat = (
|
||||||
|
[item for sublist in hl_keywords for item in sublist]
|
||||||
|
if any(isinstance(i, list) for i in hl_keywords)
|
||||||
|
else hl_keywords
|
||||||
|
)
|
||||||
|
|
||||||
# Join the flattened lists
|
# Join the flattened lists
|
||||||
ll_keywords_str = ", ".join(ll_keywords_flat) if ll_keywords_flat else ""
|
ll_keywords_str = ", ".join(ll_keywords_flat) if ll_keywords_flat else ""
|
||||||
@@ -803,6 +814,7 @@ async def kg_query_with_keywords(
|
|||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
async def extract_keywords_only(
|
async def extract_keywords_only(
|
||||||
text: str,
|
text: str,
|
||||||
param: QueryParam,
|
param: QueryParam,
|
||||||
@@ -881,6 +893,7 @@ async def extract_keywords_only(
|
|||||||
)
|
)
|
||||||
return hl_keywords, ll_keywords
|
return hl_keywords, ll_keywords
|
||||||
|
|
||||||
|
|
||||||
async def _build_query_context(
|
async def _build_query_context(
|
||||||
query: list,
|
query: list,
|
||||||
knowledge_graph_inst: BaseGraphStorage,
|
knowledge_graph_inst: BaseGraphStorage,
|
||||||
|
Reference in New Issue
Block a user