From 347843d54507c0a3524e64d80b3ff6a5aa6515e0 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 19 Jan 2025 14:04:03 +0800 Subject: [PATCH] Use LLM_MODEL env var in Azure OpenAI function - Remove model parameter from azure_openai_complete (all LLM complete functions must have the same parameter structure) - Use LLM_MODEL env var in Azure OpenAI function - Comment out Lollms example in .env.example (duplication with Ollama example) --- .env.example | 6 +++--- lightrag/llm.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.env.example b/.env.example index 68cb9d13..21c1030a 100644 --- a/.env.example +++ b/.env.example @@ -13,9 +13,9 @@ LLM_BINDING_HOST=http://host.docker.internal:11434 LLM_MODEL=mistral-nemo:latest # Lollms example -LLM_BINDING=lollms -LLM_BINDING_HOST=http://host.docker.internal:9600 -LLM_MODEL=mistral-nemo:latest +# LLM_BINDING=lollms +# LLM_BINDING_HOST=http://host.docker.internal:9600 +# LLM_MODEL=mistral-nemo:latest # Embedding Configuration (Use valid host. For local services, you can use host.docker.internal) diff --git a/lightrag/llm.py b/lightrag/llm.py index 1f52d4ae..c7f51aea 100644 --- a/lightrag/llm.py +++ b/lightrag/llm.py @@ -622,11 +622,11 @@ async def nvidia_openai_complete( async def azure_openai_complete( - model: str = "gpt-4o-mini", prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs + prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs ) -> str: keyword_extraction = kwargs.pop("keyword_extraction", None) result = await azure_openai_complete_if_cache( - model, + os.getenv("LLM_MODEL", "gpt-4o-mini"), prompt, system_prompt=system_prompt, history_messages=history_messages,