From 491c78dac1d5cb7b4a5158c815ee0abaff240934 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 28 Mar 2025 21:33:59 +0800 Subject: [PATCH] Improve OpenAI LLM logging with more detailed debug information --- lightrag/llm/openai.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py index 772e77d5..1fbe3d64 100644 --- a/lightrag/llm/openai.py +++ b/lightrag/llm/openai.py @@ -90,11 +90,13 @@ async def openai_complete_if_cache( messages.extend(history_messages) messages.append({"role": "user", "content": prompt}) - logger.debug("===== Sending Query to LLM =====") + logger.debug("===== Entering func of LLM =====") logger.debug(f"Model: {model} Base URL: {base_url}") logger.debug(f"Additional kwargs: {kwargs}") - verbose_debug(f"Query: {prompt}") + logger.debug(f"Num of history messages: {len(history_messages)}") verbose_debug(f"System prompt: {system_prompt}") + verbose_debug(f"Query: {prompt}") + logger.debug("===== Sending Query to LLM =====") try: if "response_format" in kwargs: @@ -163,6 +165,9 @@ async def openai_complete_if_cache( "total_tokens": getattr(response.usage, "total_tokens", 0), } token_tracker.add_usage(token_counts) + + logger.debug(f"Response content len: {len(content)}") + verbose_debug(f"Response: {response}") return content