From f81b1cdf0a65cc157e9d2402f54ae05aac432e24 Mon Sep 17 00:00:00 2001 From: yangdx Date: Wed, 15 Jan 2025 20:46:45 +0800 Subject: [PATCH] =?UTF-8?q?=E4=B8=BAOllama=20API=E8=BF=94=E5=9B=9E?= =?UTF-8?q?=E7=BB=93=E6=9E=9C=E6=B7=BB=E5=8A=A0=E5=9B=BE=E5=83=8F=E5=AD=97?= =?UTF-8?q?=E6=AE=B5=E5=92=8C=E6=80=A7=E8=83=BD=E7=BB=9F=E8=AE=A1=E4=BF=A1?= =?UTF-8?q?=E6=81=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 在OllamaMessage中添加images字段 - 响应消息中增加images字段 - 完成标记中添加性能统计信息 - 更新测试用例以处理性能统计 - 移除测试用例中的/naive前缀 --- lightrag/api/lightrag_ollama.py | 24 +++++++++++++++--------- test_lightrag_ollama_chat.py | 18 +++++++++++------- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/lightrag/api/lightrag_ollama.py b/lightrag/api/lightrag_ollama.py index 004c2739..3b92902f 100644 --- a/lightrag/api/lightrag_ollama.py +++ b/lightrag/api/lightrag_ollama.py @@ -231,6 +231,7 @@ class SearchMode(str, Enum): class OllamaMessage(BaseModel): role: str content: str + images: Optional[List[str]] = None class OllamaChatRequest(BaseModel): model: str = LIGHTRAG_MODEL @@ -712,7 +713,8 @@ def create_app(args): "created_at": LIGHTRAG_CREATED_AT, "message": { "role": "assistant", - "content": response + "content": response, + "images": None }, "done": True } @@ -726,21 +728,24 @@ def create_app(args): "created_at": LIGHTRAG_CREATED_AT, "message": { "role": "assistant", - "content": chunk + "content": chunk, + "images": None }, "done": False } yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n" - # 发送完成标记 + # 发送完成标记,包含性能统计信息 data = { "model": LIGHTRAG_MODEL, "created_at": LIGHTRAG_CREATED_AT, - "message": { - "role": "assistant", - "content": "" - }, - "done": True + "done": True, + "total_duration": 0, # 由于我们没有实际统计这些指标,暂时使用默认值 + "load_duration": 0, + "prompt_eval_count": 0, + "prompt_eval_duration": 0, + "eval_count": 0, + "eval_duration": 0 } yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n" return # 确保生成器在发送完成标记后立即结束 @@ -777,7 +782,8 @@ def create_app(args): created_at=LIGHTRAG_CREATED_AT, message=OllamaMessage( role="assistant", - content=str(response_text) # 确保转换为字符串 + content=str(response_text), # 确保转换为字符串 + images=None ), done=True ) diff --git a/test_lightrag_ollama_chat.py b/test_lightrag_ollama_chat.py index 067b8877..b941ee27 100644 --- a/test_lightrag_ollama_chat.py +++ b/test_lightrag_ollama_chat.py @@ -35,7 +35,7 @@ def test_stream_chat(): "messages": [ { "role": "user", - "content": "/naive 孙悟空有什么法力,性格特征是什么" + "content": "孙悟空有什么法力,性格特征是什么" } ], "stream": True @@ -51,12 +51,16 @@ def test_stream_chat(): for event in client.events(): try: data = json.loads(event.data) - message = data.get("message", {}) - content = message.get("content", "") - if content: # 只收集非空内容 - output_buffer.append(content) - if data.get("done", False): # 如果收到完成标记,退出循环 - break + if data.get("done", False): # 如果是完成标记 + if "total_duration" in data: # 最终的性能统计消息 + print("\n=== 性能统计 ===") + print(json.dumps(data, ensure_ascii=False, indent=2)) + break + else: # 正常的内容消息 + message = data.get("message", {}) + content = message.get("content", "") + if content: # 只收集非空内容 + output_buffer.append(content) except json.JSONDecodeError: print("Error decoding JSON from SSE event") finally: