From a3ff0534d694e89f421eb29c864c73d045b27a7c Mon Sep 17 00:00:00 2001 From: yangdx Date: Sat, 29 Mar 2025 13:52:29 +0800 Subject: [PATCH] Remove the comments at the end of the environment variable lines in .env file --- env.example | 10 +++++++--- lightrag/api/README.md | 1 - 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/env.example b/env.example index ca780fc2..20d80d43 100644 --- a/env.example +++ b/env.example @@ -55,10 +55,14 @@ SUMMARY_LANGUAGE=English # MAX_EMBED_TOKENS=8192 ### LLM Configuration -TIMEOUT=150 # Time out in seconds for LLM, None for infinite timeout +### Time out in seconds for LLM, None for infinite timeout +TIMEOUT=150 +### Some models like o1-mini require temperature to be set to 1 TEMPERATURE=0.5 -MAX_ASYNC=4 # Max concurrency requests of LLM -MAX_TOKENS=32768 # Max tokens send to LLM (less than context size of the model) +### Max concurrency requests of LLM +MAX_ASYNC=4 +### Max tokens send to LLM (less than context size of the model) +MAX_TOKENS=32768 ### Ollama example (For local services installed with docker, you can use host.docker.internal as host) LLM_BINDING=ollama diff --git a/lightrag/api/README.md b/lightrag/api/README.md index 8e8a1980..8b2e8177 100644 --- a/lightrag/api/README.md +++ b/lightrag/api/README.md @@ -422,7 +422,6 @@ EMBEDDING_BINDING_HOST=http://localhost:11434 ``` - ## API Endpoints All servers (LoLLMs, Ollama, OpenAI and Azure OpenAI) provide the same REST API endpoints for RAG functionality. When API Server is running, visit: