diff --git a/.env.example b/.env.example index c4c5847b..f70244e5 100644 --- a/.env.example +++ b/.env.example @@ -18,6 +18,7 @@ ### Logging level LOG_LEVEL=INFO +VERBOSE=False ### Optional Timeout TIMEOUT=300 diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 97f1156f..4fe0b8ae 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -130,8 +130,8 @@ def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any: if value is None: return default - if isinstance(value_type, bool): - return value.lower() in ("true", "1", "yes") + if value_type is bool: + return value.lower() in ("true", "1", "yes", "t", "on") try: return value_type(value) except ValueError: @@ -233,6 +233,8 @@ def display_splash_screen(args: argparse.Namespace) -> None: ASCIIColors.yellow(f"{ollama_server_infos.LIGHTRAG_MODEL}") ASCIIColors.white(" ├─ Log Level: ", end="") ASCIIColors.yellow(f"{args.log_level}") + ASCIIColors.white(" ├─ Verbose Debug: ", end="") + ASCIIColors.yellow(f"{args.verbose}") ASCIIColors.white(" └─ Timeout: ", end="") ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}") @@ -564,6 +566,13 @@ def parse_args() -> argparse.Namespace: help="Prefix of the namespace", ) + parser.add_argument( + "--verbose", + type=bool, + default=get_env_value("VERBOSE", False, bool), + help="Verbose debug output(default: from env or false)", + ) + args = parser.parse_args() # conver relative path to absolute path @@ -685,6 +694,11 @@ global_top_k = 60 # default value def create_app(args): + # Initialize verbose debug setting + from lightrag.utils import set_verbose_debug + + set_verbose_debug(args.verbose) + global global_top_k global_top_k = args.top_k # save top_k from args diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py index e6d00377..399e29df 100644 --- a/lightrag/llm/openai.py +++ b/lightrag/llm/openai.py @@ -40,7 +40,7 @@ __version__ = "1.0.0" __author__ = "lightrag Team" __status__ = "Production" - +from ..utils import verbose_debug import sys import os @@ -129,8 +129,8 @@ async def openai_complete_if_cache( logger.debug("===== Query Input to LLM =====") logger.debug(f"Model: {model} Base URL: {base_url}") logger.debug(f"Additional kwargs: {kwargs}") - logger.debug(f"Query: {prompt}") - logger.debug(f"System prompt: {system_prompt}") + verbose_debug(f"Query: {prompt}") + verbose_debug(f"System prompt: {system_prompt}") # logger.debug(f"Messages: {messages}") try: diff --git a/lightrag/llm/zhipu.py b/lightrag/llm/zhipu.py index 9f5d9ca5..5a73f41d 100644 --- a/lightrag/llm/zhipu.py +++ b/lightrag/llm/zhipu.py @@ -43,6 +43,7 @@ __status__ = "Production" import sys import re import json +from ..utils import verbose_debug if sys.version_info < (3, 9): pass @@ -119,7 +120,7 @@ async def zhipu_complete_if_cache( # Add debug logging logger.debug("===== Query Input to LLM =====") logger.debug(f"Query: {prompt}") - logger.debug(f"System prompt: {system_prompt}") + verbose_debug(f"System prompt: {system_prompt}") # Remove unsupported kwargs kwargs = { diff --git a/lightrag/operate.py b/lightrag/operate.py index cc5dffe7..23764957 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -24,6 +24,7 @@ from .utils import ( CacheData, statistic_data, get_conversation_turns, + verbose_debug, ) from .base import ( BaseGraphStorage, @@ -688,7 +689,7 @@ async def kg_query( return sys_prompt len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt)) - logger.debug(f"[kg_query]Prompt Tokens: {len_of_prompts}") + verbose_debug(f"[kg_query]Prompt Tokens: {len_of_prompts}") response = await use_model_func( query, @@ -977,7 +978,7 @@ async def mix_kg_vector_query( return sys_prompt len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt)) - logger.debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}") + verbose_debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}") # 6. Generate response response = await use_model_func( @@ -1807,7 +1808,7 @@ async def kg_query_with_keywords( return sys_prompt len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt)) - logger.debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}") + verbose_debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}") response = await use_model_func( query, diff --git a/lightrag/utils.py b/lightrag/utils.py index c8786e7b..5eb82f66 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -20,6 +20,23 @@ import tiktoken from lightrag.prompt import PROMPTS +VERBOSE_DEBUG = False + + +def verbose_debug(msg: str, *args, **kwargs): + """Function for outputting detailed debug information. + When VERBOSE_DEBUG=True, outputs the complete message. + When VERBOSE_DEBUG=False, outputs only the first 30 characters. + """ + if VERBOSE_DEBUG: + logger.debug(msg, *args, **kwargs) + + +def set_verbose_debug(enabled: bool): + """Enable or disable verbose debug output""" + global VERBOSE_DEBUG + VERBOSE_DEBUG = enabled + class UnlimitedSemaphore: """A context manager that allows unlimited access."""