Fix linting
This commit is contained in:
@@ -148,7 +148,7 @@ async def _handle_entity_relation_summary(
|
||||
|
||||
# Use LLM function with cache
|
||||
summary = await use_llm_func_with_cache(
|
||||
use_prompt,
|
||||
use_prompt,
|
||||
use_llm_func,
|
||||
llm_response_cache=llm_response_cache,
|
||||
max_tokens=summary_max_tokens,
|
||||
@@ -446,7 +446,6 @@ async def extract_entities(
|
||||
pipeline_status_lock=None,
|
||||
llm_response_cache: BaseKVStorage | None = None,
|
||||
) -> None:
|
||||
|
||||
use_llm_func: callable = global_config["llm_model_func"]
|
||||
entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]
|
||||
|
||||
|
@@ -913,18 +913,18 @@ def lazy_external_import(module_name: str, class_name: str) -> Callable[..., Any
|
||||
|
||||
|
||||
async def use_llm_func_with_cache(
|
||||
input_text: str,
|
||||
input_text: str,
|
||||
use_llm_func: callable,
|
||||
llm_response_cache: 'BaseKVStorage | None' = None,
|
||||
llm_response_cache: "BaseKVStorage | None" = None,
|
||||
max_tokens: int = None,
|
||||
history_messages: list[dict[str, str]] = None,
|
||||
cache_type: str = "extract"
|
||||
cache_type: str = "extract",
|
||||
) -> str:
|
||||
"""Call LLM function with cache support
|
||||
|
||||
|
||||
If cache is available and enabled (determined by handle_cache based on mode),
|
||||
retrieve result from cache; otherwise call LLM function and save result to cache.
|
||||
|
||||
|
||||
Args:
|
||||
input_text: Input text to send to LLM
|
||||
use_llm_func: LLM function to call
|
||||
@@ -932,7 +932,7 @@ async def use_llm_func_with_cache(
|
||||
max_tokens: Maximum tokens for generation
|
||||
history_messages: History messages list
|
||||
cache_type: Type of cache
|
||||
|
||||
|
||||
Returns:
|
||||
LLM response text
|
||||
"""
|
||||
|
Reference in New Issue
Block a user