From df69d386c58c290fc7eacd9bec1886bc64f63473 Mon Sep 17 00:00:00 2001 From: Nick French <64120654+nickjfrench@users.noreply.github.com> Date: Fri, 17 Jan 2025 12:10:26 -0500 Subject: [PATCH] Fixes #596 - Hardcoded model deployment name in azure_openai_complete Fixes #596 Update `azure_openai_complete` function to accept a model parameter with a default value of 'gpt-4o-mini'. * Modify the function signature of `azure_openai_complete` to include a `model` parameter with a default value of 'gpt-4o-mini'. * Pass the `model` parameter to the `azure_openai_complete_if_cache` function instead of the hardcoded model name 'conversation-4o-mini'. --- For more details, open the [Copilot Workspace session](https://copilot-workspace.githubnext.com/HKUDS/LightRAG/issues/596?shareId=XXXX-XXXX-XXXX-XXXX). --- lightrag/llm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightrag/llm.py b/lightrag/llm.py index 89fb89e0..1f52d4ae 100644 --- a/lightrag/llm.py +++ b/lightrag/llm.py @@ -622,11 +622,11 @@ async def nvidia_openai_complete( async def azure_openai_complete( - prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs + model: str = "gpt-4o-mini", prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs ) -> str: keyword_extraction = kwargs.pop("keyword_extraction", None) result = await azure_openai_complete_if_cache( - "conversation-4o-mini", + model, prompt, system_prompt=system_prompt, history_messages=history_messages,