Update API endpoint documentation

This commit is contained in:
yangdx
2025-01-26 11:36:24 +08:00
parent f8d26cb193
commit f045fc3d59

View File

@@ -1446,7 +1446,10 @@ def create_app(args):
@app.post("/api/generate") @app.post("/api/generate")
async def generate(raw_request: Request, request: OllamaGenerateRequest): async def generate(raw_request: Request, request: OllamaGenerateRequest):
"""Handle generate completion requests""" """Handle generate completion requests
For compatiblity purpuse, the request is not processed by LightRAG,
and will be handled by underlying LLM model.
"""
try: try:
query = request.prompt query = request.prompt
start_time = time.time_ns() start_time = time.time_ns()
@@ -1585,7 +1588,10 @@ def create_app(args):
@app.post("/api/chat") @app.post("/api/chat")
async def chat(raw_request: Request, request: OllamaChatRequest): async def chat(raw_request: Request, request: OllamaChatRequest):
"""Handle chat completion requests""" """Process chat completion requests.
Routes user queries through LightRAG by selecting query mode based on prefix indicators.
Detects and forwards OpenWebUI session-related requests (for meta data generation task) directly to LLM.
"""
try: try:
# Get all messages # Get all messages
messages = request.messages messages = request.messages
@@ -1605,7 +1611,6 @@ def create_app(args):
start_time = time.time_ns() start_time = time.time_ns()
prompt_tokens = estimate_tokens(cleaned_query) prompt_tokens = estimate_tokens(cleaned_query)
# 构建 query_param
param_dict = { param_dict = {
"mode": mode, "mode": mode,
"stream": request.stream, "stream": request.stream,
@@ -1613,7 +1618,6 @@ def create_app(args):
"conversation_history": conversation_history, "conversation_history": conversation_history,
} }
# 如果设置了 history_turns添加到参数中
if args.history_turns is not None: if args.history_turns is not None:
param_dict["history_turns"] = args.history_turns param_dict["history_turns"] = args.history_turns