From 57884f2fb8c259e6c93d417b621c84741f907dea Mon Sep 17 00:00:00 2001 From: yangdx Date: Sun, 23 Feb 2025 16:52:41 +0800 Subject: [PATCH] Refine LLM settings in env sample file --- .env.example | 6 +++++- lightrag/api/README.md | 3 +-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.env.example b/.env.example index b15f5758..2b5c284c 100644 --- a/.env.example +++ b/.env.example @@ -45,17 +45,21 @@ # MAX_EMBED_TOKENS=8192 ### LLM Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) +LLM_BINDING=ollama LLM_MODEL=mistral-nemo:latest LLM_BINDING_API_KEY=your_api_key ### Ollama example -LLM_BINDING=ollama LLM_BINDING_HOST=http://localhost:11434 ### OpenAI alike example # LLM_BINDING=openai +# LLM_MODEL=gpt-4o # LLM_BINDING_HOST=https://api.openai.com/v1 +# LLM_BINDING_API_KEY=your_api_key ### lollms example # LLM_BINDING=lollms +# LLM_MODEL=mistral-nemo:latest # LLM_BINDING_HOST=http://localhost:9600 +# LLM_BINDING_API_KEY=your_api_key ### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) EMBEDDING_MODEL=bge-m3:latest diff --git a/lightrag/api/README.md b/lightrag/api/README.md index e9dd817b..d06a8d9e 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -45,7 +45,7 @@ EMBEDDING_BINDING_HOST=http://localhost:11434 LLM_BINDING_HOST=http://localhost:9600 EMBEDDING_BINDING_HOST=http://localhost:9600 -# for openai, openai compatible or azure openai backend +# for openai, openai compatible or azure openai backend LLM_BINDING_HOST=https://api.openai.com/v1 EMBEDDING_BINDING_HOST=http://localhost:9600 ``` @@ -502,4 +502,3 @@ A query prefix in the query string can determines which LightRAG query mode is u For example, chat message "/mix 唐僧有几个徒弟" will trigger a mix mode query for LighRAG. A chat message without query prefix will trigger a hybrid mode query by default。 "/bypass" is not a LightRAG query mode, it will tell API Server to pass the query directly to the underlying LLM with chat history. So user can use LLM to answer question base on the chat history. If you are using Open WebUI as front end, you can just switch the model to a normal LLM instead of using /bypass prefix. -