Use LLM_MODEL env var in Azure OpenAI function

- Remove model parameter from azure_openai_complete (all LLM complete functions must have the same parameter structure)
- Use LLM_MODEL env var in Azure OpenAI function
- Comment out Lollms example in .env.example (duplication with Ollama example)
This commit is contained in:
yangdx
2025-01-19 14:04:03 +08:00
parent b50fdeccaa
commit 347843d545
2 changed files with 5 additions and 5 deletions

View File

@@ -13,9 +13,9 @@ LLM_BINDING_HOST=http://host.docker.internal:11434
LLM_MODEL=mistral-nemo:latest LLM_MODEL=mistral-nemo:latest
# Lollms example # Lollms example
LLM_BINDING=lollms # LLM_BINDING=lollms
LLM_BINDING_HOST=http://host.docker.internal:9600 # LLM_BINDING_HOST=http://host.docker.internal:9600
LLM_MODEL=mistral-nemo:latest # LLM_MODEL=mistral-nemo:latest
# Embedding Configuration (Use valid host. For local services, you can use host.docker.internal) # Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)

View File

@@ -622,11 +622,11 @@ async def nvidia_openai_complete(
async def azure_openai_complete( async def azure_openai_complete(
model: str = "gpt-4o-mini", prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str: ) -> str:
keyword_extraction = kwargs.pop("keyword_extraction", None) keyword_extraction = kwargs.pop("keyword_extraction", None)
result = await azure_openai_complete_if_cache( result = await azure_openai_complete_if_cache(
model, os.getenv("LLM_MODEL", "gpt-4o-mini"),
prompt, prompt,
system_prompt=system_prompt, system_prompt=system_prompt,
history_messages=history_messages, history_messages=history_messages,