Merge pull request #602 from danielaskdd/fix-azure-model-name-problem
Use LLM_MODEL env var in Azure OpenAI function
This commit is contained in:
@@ -13,9 +13,9 @@ LLM_BINDING_HOST=http://host.docker.internal:11434
|
|||||||
LLM_MODEL=mistral-nemo:latest
|
LLM_MODEL=mistral-nemo:latest
|
||||||
|
|
||||||
# Lollms example
|
# Lollms example
|
||||||
LLM_BINDING=lollms
|
# LLM_BINDING=lollms
|
||||||
LLM_BINDING_HOST=http://host.docker.internal:9600
|
# LLM_BINDING_HOST=http://host.docker.internal:9600
|
||||||
LLM_MODEL=mistral-nemo:latest
|
# LLM_MODEL=mistral-nemo:latest
|
||||||
|
|
||||||
|
|
||||||
# Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)
|
# Embedding Configuration (Use valid host. For local services, you can use host.docker.internal)
|
||||||
|
@@ -9,7 +9,7 @@ User=netman
|
|||||||
MemoryHigh=8G
|
MemoryHigh=8G
|
||||||
MemoryMax=12G
|
MemoryMax=12G
|
||||||
WorkingDirectory=/home/netman/lightrag-xyj
|
WorkingDirectory=/home/netman/lightrag-xyj
|
||||||
ExecStart=/home/netman/lightrag-xyj/lightrag/api/start_lightrag_server.sh
|
ExecStart=/home/netman/lightrag-xyj/start_lightrag_server.sh
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=10
|
RestartSec=10
|
||||||
|
|
@@ -427,7 +427,7 @@ This intelligent caching mechanism:
|
|||||||
|
|
||||||
## Install Lightrag as a Linux Service
|
## Install Lightrag as a Linux Service
|
||||||
|
|
||||||
Create your service file: `lightrag.sevice`. Modified the following lines from `lightrag.sevice.example`
|
Create your service file: `lightrag-server.sevice`. Modified the following lines from `lightrag-server.sevice.example`
|
||||||
|
|
||||||
```text
|
```text
|
||||||
Description=LightRAG Ollama Service
|
Description=LightRAG Ollama Service
|
||||||
|
@@ -622,11 +622,11 @@ async def nvidia_openai_complete(
|
|||||||
|
|
||||||
|
|
||||||
async def azure_openai_complete(
|
async def azure_openai_complete(
|
||||||
model: str = "gpt-4o-mini", prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
result = await azure_openai_complete_if_cache(
|
result = await azure_openai_complete_if_cache(
|
||||||
model,
|
os.getenv("LLM_MODEL", "gpt-4o-mini"),
|
||||||
prompt,
|
prompt,
|
||||||
system_prompt=system_prompt,
|
system_prompt=system_prompt,
|
||||||
history_messages=history_messages,
|
history_messages=history_messages,
|
||||||
|
Reference in New Issue
Block a user