修复测试用例流结束判断

This commit is contained in:
yangdx
2025-01-15 21:26:20 +08:00
parent af9ac188f0
commit 6d44178f63
2 changed files with 13 additions and 13 deletions

View File

@@ -743,12 +743,12 @@ def create_app(args):
"model": LIGHTRAG_MODEL, "model": LIGHTRAG_MODEL,
"created_at": LIGHTRAG_CREATED_AT, "created_at": LIGHTRAG_CREATED_AT,
"done": True, "done": True,
"total_duration": 0, # 由于我们没有实际统计这些指标,暂时使用默认值 "total_duration": 1, # 由于我们没有实际统计这些指标,暂时使用默认值
"load_duration": 0, "load_duration": 1,
"prompt_eval_count": 0, "prompt_eval_count": 999,
"prompt_eval_duration": 0, "prompt_eval_duration": 1,
"eval_count": 0, "eval_count": 999,
"eval_duration": 0 "eval_duration": 1
} }
yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n" yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
return # 确保生成器在发送完成标记后立即结束 return # 确保生成器在发送完成标记后立即结束
@@ -789,12 +789,12 @@ def create_app(args):
"images": None "images": None
}, },
"done": True, "done": True,
"total_duration": 0, # 由于我们没有实际统计这些指标,暂时使用默认值 "total_duration": 1, # 由于我们没有实际统计这些指标,暂时使用默认值
"load_duration": 0, "load_duration": 1,
"prompt_eval_count": 0, "prompt_eval_count": 999,
"prompt_eval_duration": 0, "prompt_eval_duration": 1,
"eval_count": 0, "eval_count": 999,
"eval_duration": 0 "eval_duration": 1
} }
except Exception as e: except Exception as e:
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))

View File

@@ -70,7 +70,7 @@ def test_stream_chat():
for event in client.events(): for event in client.events():
try: try:
data = json.loads(event.data) data = json.loads(event.data)
if data.get("done", False): # 如果是完成标记 if data.get("done", True): # 如果是完成标记
if "total_duration" in data: # 最终的性能统计消息 if "total_duration" in data: # 最终的性能统计消息
print("\n=== 性能统计 ===") print("\n=== 性能统计 ===")
print(json.dumps(data, ensure_ascii=False, indent=2)) print(json.dumps(data, ensure_ascii=False, indent=2))