Fix linting

This commit is contained in:
yangdx
2025-04-10 03:58:04 +08:00
parent 8d858da4d0
commit 496f87a1e6
2 changed files with 7 additions and 8 deletions

View File

@@ -446,7 +446,6 @@ async def extract_entities(
pipeline_status_lock=None, pipeline_status_lock=None,
llm_response_cache: BaseKVStorage | None = None, llm_response_cache: BaseKVStorage | None = None,
) -> None: ) -> None:
use_llm_func: callable = global_config["llm_model_func"] use_llm_func: callable = global_config["llm_model_func"]
entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"] entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]

View File

@@ -915,10 +915,10 @@ def lazy_external_import(module_name: str, class_name: str) -> Callable[..., Any
async def use_llm_func_with_cache( async def use_llm_func_with_cache(
input_text: str, input_text: str,
use_llm_func: callable, use_llm_func: callable,
llm_response_cache: 'BaseKVStorage | None' = None, llm_response_cache: "BaseKVStorage | None" = None,
max_tokens: int = None, max_tokens: int = None,
history_messages: list[dict[str, str]] = None, history_messages: list[dict[str, str]] = None,
cache_type: str = "extract" cache_type: str = "extract",
) -> str: ) -> str:
"""Call LLM function with cache support """Call LLM function with cache support