Add verbose debug option to control detailed debug output level

• Added VERBOSE env var & CLI flag
• Implemented verbose_debug() function
• Added verbose option to splash screen
• Reduced default debug output length
• Modified LLM debug logging behavior
This commit is contained in:
yangdx
2025-02-17 01:38:18 +08:00
parent 9ec920661d
commit 806eadf5dc
6 changed files with 43 additions and 9 deletions

View File

@@ -24,6 +24,7 @@ from .utils import (
CacheData,
statistic_data,
get_conversation_turns,
verbose_debug,
)
from .base import (
BaseGraphStorage,
@@ -688,7 +689,7 @@ async def kg_query(
return sys_prompt
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
logger.debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
verbose_debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
response = await use_model_func(
query,
@@ -977,7 +978,7 @@ async def mix_kg_vector_query(
return sys_prompt
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
logger.debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}")
verbose_debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}")
# 6. Generate response
response = await use_model_func(
@@ -1807,7 +1808,7 @@ async def kg_query_with_keywords(
return sys_prompt
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
logger.debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
verbose_debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
response = await use_model_func(
query,