From b008c586ea03e6ceebdb2174d02b80199e850ca9 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 19 Jan 2025 13:28:29 +0800 Subject: [PATCH 1/2] Refactor service and script paths for LightRAG. - Renamed service file to `lightrag-server.service.example` - Updated `ExecStart` path in service file - Corrected README.md service file reference - Moved `start_lightrag.sh.example` to root - No functionality is changed --- .../lightrag.service.example => lightrag-server.service.example | 2 +- lightrag/api/README.md | 2 +- .../api/start_lightrag.sh.example => start_lightrag.sh.example | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename lightrag/api/lightrag.service.example => lightrag-server.service.example (77%) rename lightrag/api/start_lightrag.sh.example => start_lightrag.sh.example (100%) diff --git a/lightrag/api/lightrag.service.example b/lightrag-server.service.example similarity index 77% rename from lightrag/api/lightrag.service.example rename to lightrag-server.service.example index 5113ddde..3e02935e 100644 --- a/lightrag/api/lightrag.service.example +++ b/lightrag-server.service.example @@ -9,7 +9,7 @@ User=netman MemoryHigh=8G MemoryMax=12G WorkingDirectory=/home/netman/lightrag-xyj -ExecStart=/home/netman/lightrag-xyj/lightrag/api/start_lightrag_server.sh +ExecStart=/home/netman/lightrag-xyj/start_lightrag_server.sh Restart=always RestartSec=10 diff --git a/lightrag/api/README.md b/lightrag/api/README.md index e0c835c6..545811e4 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -427,7 +427,7 @@ This intelligent caching mechanism: ## Install Lightrag as a Linux Service -Create your service file: `lightrag.sevice`. Modified the following lines from `lightrag.sevice.example` +Create your service file: `lightrag-server.sevice`. Modified the following lines from `lightrag-server.sevice.example` ```text Description=LightRAG Ollama Service diff --git a/lightrag/api/start_lightrag.sh.example b/start_lightrag.sh.example similarity index 100% rename from lightrag/api/start_lightrag.sh.example rename to start_lightrag.sh.example From 347843d54507c0a3524e64d80b3ff6a5aa6515e0 Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 19 Jan 2025 14:04:03 +0800 Subject: [PATCH 2/2] Use LLM_MODEL env var in Azure OpenAI function - Remove model parameter from azure_openai_complete (all LLM complete functions must have the same parameter structure) - Use LLM_MODEL env var in Azure OpenAI function - Comment out Lollms example in .env.example (duplication with Ollama example) --- .env.example | 6 +++--- lightrag/llm.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.env.example b/.env.example index 68cb9d13..21c1030a 100644 --- a/.env.example +++ b/.env.example @@ -13,9 +13,9 @@ LLM_BINDING_HOST=http://host.docker.internal:11434 LLM_MODEL=mistral-nemo:latest # Lollms example -LLM_BINDING=lollms -LLM_BINDING_HOST=http://host.docker.internal:9600 -LLM_MODEL=mistral-nemo:latest +# LLM_BINDING=lollms +# LLM_BINDING_HOST=http://host.docker.internal:9600 +# LLM_MODEL=mistral-nemo:latest # Embedding Configuration (Use valid host. For local services, you can use host.docker.internal) diff --git a/lightrag/llm.py b/lightrag/llm.py index 1f52d4ae..c7f51aea 100644 --- a/lightrag/llm.py +++ b/lightrag/llm.py @@ -622,11 +622,11 @@ async def nvidia_openai_complete( async def azure_openai_complete( - model: str = "gpt-4o-mini", prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs + prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs ) -> str: keyword_extraction = kwargs.pop("keyword_extraction", None) result = await azure_openai_complete_if_cache( - model, + os.getenv("LLM_MODEL", "gpt-4o-mini"), prompt, system_prompt=system_prompt, history_messages=history_messages,