Use the context manager for the openai client

This avoids issues of resource cleanup (too many open files) when dealing with massively parallel calls to the openai API since RAII in python is highly unreliable in such contexts.
This commit is contained in:
Arjun Rao
2025-05-08 11:42:53 +10:00
parent f2c522ce7a
commit b7eae4d7c0

View File

@@ -177,6 +177,7 @@ async def openai_complete_if_cache(
logger.debug("===== Sending Query to LLM =====")
try:
async with openai_async_client:
if "response_format" in kwargs:
response = await openai_async_client.beta.chat.completions.parse(
model=model, messages=messages, **kwargs
@@ -421,6 +422,7 @@ async def openai_embed(
api_key=api_key, base_url=base_url, client_configs=client_configs
)
async with openai_async_client:
response = await openai_async_client.embeddings.create(
model=model, input=texts, encoding_format="float"
)