Add verbose debug option to control detailed debug output level
• Added VERBOSE env var & CLI flag • Implemented verbose_debug() function • Added verbose option to splash screen • Reduced default debug output length • Modified LLM debug logging behavior
This commit is contained in:
@@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
### Logging level
|
### Logging level
|
||||||
LOG_LEVEL=INFO
|
LOG_LEVEL=INFO
|
||||||
|
VERBOSE=False
|
||||||
|
|
||||||
### Optional Timeout
|
### Optional Timeout
|
||||||
TIMEOUT=300
|
TIMEOUT=300
|
||||||
|
@@ -130,8 +130,8 @@ def get_env_value(env_key: str, default: Any, value_type: type = str) -> Any:
|
|||||||
if value is None:
|
if value is None:
|
||||||
return default
|
return default
|
||||||
|
|
||||||
if isinstance(value_type, bool):
|
if value_type is bool:
|
||||||
return value.lower() in ("true", "1", "yes")
|
return value.lower() in ("true", "1", "yes", "t", "on")
|
||||||
try:
|
try:
|
||||||
return value_type(value)
|
return value_type(value)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
@@ -233,6 +233,8 @@ def display_splash_screen(args: argparse.Namespace) -> None:
|
|||||||
ASCIIColors.yellow(f"{ollama_server_infos.LIGHTRAG_MODEL}")
|
ASCIIColors.yellow(f"{ollama_server_infos.LIGHTRAG_MODEL}")
|
||||||
ASCIIColors.white(" ├─ Log Level: ", end="")
|
ASCIIColors.white(" ├─ Log Level: ", end="")
|
||||||
ASCIIColors.yellow(f"{args.log_level}")
|
ASCIIColors.yellow(f"{args.log_level}")
|
||||||
|
ASCIIColors.white(" ├─ Verbose Debug: ", end="")
|
||||||
|
ASCIIColors.yellow(f"{args.verbose}")
|
||||||
ASCIIColors.white(" └─ Timeout: ", end="")
|
ASCIIColors.white(" └─ Timeout: ", end="")
|
||||||
ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}")
|
ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}")
|
||||||
|
|
||||||
@@ -564,6 +566,13 @@ def parse_args() -> argparse.Namespace:
|
|||||||
help="Prefix of the namespace",
|
help="Prefix of the namespace",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--verbose",
|
||||||
|
type=bool,
|
||||||
|
default=get_env_value("VERBOSE", False, bool),
|
||||||
|
help="Verbose debug output(default: from env or false)",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# conver relative path to absolute path
|
# conver relative path to absolute path
|
||||||
@@ -685,6 +694,11 @@ global_top_k = 60 # default value
|
|||||||
|
|
||||||
|
|
||||||
def create_app(args):
|
def create_app(args):
|
||||||
|
# Initialize verbose debug setting
|
||||||
|
from lightrag.utils import set_verbose_debug
|
||||||
|
|
||||||
|
set_verbose_debug(args.verbose)
|
||||||
|
|
||||||
global global_top_k
|
global global_top_k
|
||||||
global_top_k = args.top_k # save top_k from args
|
global_top_k = args.top_k # save top_k from args
|
||||||
|
|
||||||
|
@@ -40,7 +40,7 @@ __version__ = "1.0.0"
|
|||||||
__author__ = "lightrag Team"
|
__author__ = "lightrag Team"
|
||||||
__status__ = "Production"
|
__status__ = "Production"
|
||||||
|
|
||||||
|
from ..utils import verbose_debug
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
||||||
@@ -129,8 +129,8 @@ async def openai_complete_if_cache(
|
|||||||
logger.debug("===== Query Input to LLM =====")
|
logger.debug("===== Query Input to LLM =====")
|
||||||
logger.debug(f"Model: {model} Base URL: {base_url}")
|
logger.debug(f"Model: {model} Base URL: {base_url}")
|
||||||
logger.debug(f"Additional kwargs: {kwargs}")
|
logger.debug(f"Additional kwargs: {kwargs}")
|
||||||
logger.debug(f"Query: {prompt}")
|
verbose_debug(f"Query: {prompt}")
|
||||||
logger.debug(f"System prompt: {system_prompt}")
|
verbose_debug(f"System prompt: {system_prompt}")
|
||||||
# logger.debug(f"Messages: {messages}")
|
# logger.debug(f"Messages: {messages}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@@ -43,6 +43,7 @@ __status__ = "Production"
|
|||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
|
from ..utils import verbose_debug
|
||||||
|
|
||||||
if sys.version_info < (3, 9):
|
if sys.version_info < (3, 9):
|
||||||
pass
|
pass
|
||||||
@@ -119,7 +120,7 @@ async def zhipu_complete_if_cache(
|
|||||||
# Add debug logging
|
# Add debug logging
|
||||||
logger.debug("===== Query Input to LLM =====")
|
logger.debug("===== Query Input to LLM =====")
|
||||||
logger.debug(f"Query: {prompt}")
|
logger.debug(f"Query: {prompt}")
|
||||||
logger.debug(f"System prompt: {system_prompt}")
|
verbose_debug(f"System prompt: {system_prompt}")
|
||||||
|
|
||||||
# Remove unsupported kwargs
|
# Remove unsupported kwargs
|
||||||
kwargs = {
|
kwargs = {
|
||||||
|
@@ -24,6 +24,7 @@ from .utils import (
|
|||||||
CacheData,
|
CacheData,
|
||||||
statistic_data,
|
statistic_data,
|
||||||
get_conversation_turns,
|
get_conversation_turns,
|
||||||
|
verbose_debug,
|
||||||
)
|
)
|
||||||
from .base import (
|
from .base import (
|
||||||
BaseGraphStorage,
|
BaseGraphStorage,
|
||||||
@@ -688,7 +689,7 @@ async def kg_query(
|
|||||||
return sys_prompt
|
return sys_prompt
|
||||||
|
|
||||||
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
|
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
|
||||||
logger.debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
|
verbose_debug(f"[kg_query]Prompt Tokens: {len_of_prompts}")
|
||||||
|
|
||||||
response = await use_model_func(
|
response = await use_model_func(
|
||||||
query,
|
query,
|
||||||
@@ -977,7 +978,7 @@ async def mix_kg_vector_query(
|
|||||||
return sys_prompt
|
return sys_prompt
|
||||||
|
|
||||||
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
|
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
|
||||||
logger.debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}")
|
verbose_debug(f"[mix_kg_vector_query]Prompt Tokens: {len_of_prompts}")
|
||||||
|
|
||||||
# 6. Generate response
|
# 6. Generate response
|
||||||
response = await use_model_func(
|
response = await use_model_func(
|
||||||
@@ -1807,7 +1808,7 @@ async def kg_query_with_keywords(
|
|||||||
return sys_prompt
|
return sys_prompt
|
||||||
|
|
||||||
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
|
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
|
||||||
logger.debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
|
verbose_debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
|
||||||
|
|
||||||
response = await use_model_func(
|
response = await use_model_func(
|
||||||
query,
|
query,
|
||||||
|
@@ -20,6 +20,23 @@ import tiktoken
|
|||||||
|
|
||||||
from lightrag.prompt import PROMPTS
|
from lightrag.prompt import PROMPTS
|
||||||
|
|
||||||
|
VERBOSE_DEBUG = False
|
||||||
|
|
||||||
|
|
||||||
|
def verbose_debug(msg: str, *args, **kwargs):
|
||||||
|
"""Function for outputting detailed debug information.
|
||||||
|
When VERBOSE_DEBUG=True, outputs the complete message.
|
||||||
|
When VERBOSE_DEBUG=False, outputs only the first 30 characters.
|
||||||
|
"""
|
||||||
|
if VERBOSE_DEBUG:
|
||||||
|
logger.debug(msg, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def set_verbose_debug(enabled: bool):
|
||||||
|
"""Enable or disable verbose debug output"""
|
||||||
|
global VERBOSE_DEBUG
|
||||||
|
VERBOSE_DEBUG = enabled
|
||||||
|
|
||||||
|
|
||||||
class UnlimitedSemaphore:
|
class UnlimitedSemaphore:
|
||||||
"""A context manager that allows unlimited access."""
|
"""A context manager that allows unlimited access."""
|
||||||
|
Reference in New Issue
Block a user