Enhance OpenAI API error handling and logging for better reliability

• Add InvalidResponseError custom exception
• Improve error logging for API failures
• Add empty response content validation
• Add more detailed debug logging info
• Add retry for invalid response cases
This commit is contained in:
yangdx
2025-02-06 19:42:57 +08:00
parent 290a4d5ec0
commit 506e39e14e

View File

@@ -77,12 +77,15 @@ from lightrag.types import GPTKeywordExtractionFormat
import numpy as np import numpy as np
from typing import Union from typing import Union
class InvalidResponseError(Exception):
"""Custom exception class for triggering retry mechanism"""
pass
@retry( @retry(
stop=stop_after_attempt(3), stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10), wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type( retry=retry_if_exception_type(
(RateLimitError, APIConnectionError, APITimeoutError) (RateLimitError, APIConnectionError, APITimeoutError, InvalidResponseError)
), ),
) )
async def openai_complete_if_cache( async def openai_complete_if_cache(
@@ -112,17 +115,35 @@ async def openai_complete_if_cache(
# 添加日志输出 # 添加日志输出
logger.debug("===== Query Input to LLM =====") logger.debug("===== Query Input to LLM =====")
logger.debug(f"Model: {model} Base URL: {base_url}")
logger.debug(f"Additional kwargs: {kwargs}")
logger.debug(f"Query: {prompt}") logger.debug(f"Query: {prompt}")
logger.debug(f"System prompt: {system_prompt}") logger.debug(f"System prompt: {system_prompt}")
logger.debug("Full context:") # logger.debug(f"Messages: {messages}")
if "response_format" in kwargs:
response = await openai_async_client.beta.chat.completions.parse( try:
model=model, messages=messages, **kwargs if "response_format" in kwargs:
) response = await openai_async_client.beta.chat.completions.parse(
else: model=model, messages=messages, **kwargs
response = await openai_async_client.chat.completions.create( )
model=model, messages=messages, **kwargs else:
) response = await openai_async_client.chat.completions.create(
model=model, messages=messages, **kwargs
)
except APIConnectionError as e:
logger.error(f"OpenAI API Connection Error: {str(e)}")
raise
except RateLimitError as e:
logger.error(f"OpenAI API Rate Limit Error: {str(e)}")
raise
except APITimeoutError as e:
logger.error(f"OpenAI API Timeout Error: {str(e)}")
raise
except Exception as e:
logger.error(f"OpenAI API Call Failed: {str(e)}")
logger.error(f"Model: {model}")
logger.error(f"Request parameters: {kwargs}")
raise
if hasattr(response, "__aiter__"): if hasattr(response, "__aiter__"):
@@ -140,8 +161,23 @@ async def openai_complete_if_cache(
raise raise
return inner() return inner()
else: else:
if (
not response
or not response.choices
or not hasattr(response.choices[0], "message")
or not hasattr(response.choices[0].message, "content")
):
logger.error("Invalid response from OpenAI API")
raise InvalidResponseError("Invalid response from OpenAI API")
content = response.choices[0].message.content content = response.choices[0].message.content
if not content or content.strip() == "":
logger.error("Received empty content from OpenAI API")
raise InvalidResponseError("Received empty content from OpenAI API")
if r"\u" in content: if r"\u" in content:
content = safe_unicode_decode(content.encode("utf-8")) content = safe_unicode_decode(content.encode("utf-8"))
return content return content