From 5cf875755a042c93164a5e5d240ce866f5fb37e9 Mon Sep 17 00:00:00 2001 From: yangdx Date: Mon, 3 Feb 2025 13:07:08 +0800 Subject: [PATCH] Update API endpoint documentation to clarify Ollama server compatibility MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Add Ollama server doc for /api/tags • Update /api/generate endpoint docs • Update /api/chat endpoint docs --- lightrag/api/lightrag_server.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index a72e1b4c..ec58f552 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -1477,7 +1477,7 @@ def create_app(args): @app.get("/api/tags") async def get_tags(): - """Get available models""" + """Retrun available models acting as an Ollama server""" return OllamaTagResponse( models=[ { @@ -1521,7 +1521,7 @@ def create_app(args): @app.post("/api/generate") async def generate(raw_request: Request, request: OllamaGenerateRequest): - """Handle generate completion requests + """Handle generate completion requests acting as an Ollama model For compatiblity purpuse, the request is not processed by LightRAG, and will be handled by underlying LLM model. """ @@ -1663,7 +1663,7 @@ def create_app(args): @app.post("/api/chat") async def chat(raw_request: Request, request: OllamaChatRequest): - """Process chat completion requests. + """Process chat completion requests acting as an Ollama model Routes user queries through LightRAG by selecting query mode based on prefix indicators. Detects and forwards OpenWebUI session-related requests (for meta data generation task) directly to LLM. """