From 725d5af215111e052797b1853856bf3ba591f98f Mon Sep 17 00:00:00 2001 From: Luca Congiu Date: Tue, 24 Dec 2024 09:56:33 +0100 Subject: [PATCH] Refactor code formatting and update requirements for improved clarity and consistency --- .gitignore | 2 +- api/.env.aoi.example | 2 +- api/azure_openai_lightrag_server.py | 24 +++++++++++++++--------- api/requirements.txt | 18 +++++++++--------- examples/.env.oai.example | 2 +- lightrag/llm.py | 2 +- 6 files changed, 28 insertions(+), 22 deletions(-) diff --git a/.gitignore b/.gitignore index 7b7aaad5..929881ef 100644 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,4 @@ gui/ .env venv/ examples/input/ -examples/output/ \ No newline at end of file +examples/output/ diff --git a/api/.env.aoi.example b/api/.env.aoi.example index 288d7ff3..cea86da2 100644 --- a/api/.env.aoi.example +++ b/api/.env.aoi.example @@ -4,4 +4,4 @@ AZURE_OPENAI_API_KEY=myapikey AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large -AZURE_EMBEDDING_API_VERSION=2023-05-15 \ No newline at end of file +AZURE_EMBEDDING_API_VERSION=2023-05-15 diff --git a/api/azure_openai_lightrag_server.py b/api/azure_openai_lightrag_server.py index 7ad95a77..0bf9c654 100644 --- a/api/azure_openai_lightrag_server.py +++ b/api/azure_openai_lightrag_server.py @@ -4,7 +4,10 @@ import asyncio import logging import argparse from lightrag import LightRAG, QueryParam -from lightrag.llm import azure_openai_complete_if_cache, azure_openai_complete, azure_openai_embedding +from lightrag.llm import ( + azure_openai_complete_if_cache, + azure_openai_embedding, +) from lightrag.utils import EmbeddingFunc from typing import Optional, List from enum import Enum @@ -28,6 +31,7 @@ AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") AZURE_EMBEDDING_DEPLOYMENT = os.getenv("AZURE_EMBEDDING_DEPLOYMENT") AZURE_EMBEDDING_API_VERSION = os.getenv("AZURE_EMBEDDING_API_VERSION") + def parse_args(): parser = argparse.ArgumentParser( description="LightRAG FastAPI Server with OpenAI integration" @@ -132,7 +136,7 @@ class SearchMode(str, Enum): class QueryRequest(BaseModel): query: str mode: SearchMode = SearchMode.hybrid - #stream: bool = False + # stream: bool = False class QueryResponse(BaseModel): @@ -205,10 +209,11 @@ def create_app(args): embedding_func=EmbeddingFunc( embedding_dim=embedding_dim, max_token_size=args.max_embed_tokens, - func=lambda texts: azure_openai_embedding(texts, model=args.embedding_model), + func=lambda texts: azure_openai_embedding( + texts, model=args.embedding_model + ), ), ) - @app.on_event("startup") async def startup_event(): @@ -266,9 +271,7 @@ def create_app(args): if os.path.exists(cachefile): with open(cachefile, "w") as f: f.write("{}") - return { - "status": "success" - } + return {"status": "success"} except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @@ -319,15 +322,17 @@ def create_app(args): param=QueryParam(mode=request.mode, stream=True), ) if inspect.isasyncgen(response): + async def stream_generator(): async for chunk in response: yield json.dumps({"data": chunk}) + "\n" - return StreamingResponse(stream_generator(), media_type="application/json") + return StreamingResponse( + stream_generator(), media_type="application/json" + ) else: return QueryResponse(response=response) - except Exception as e: raise HTTPException(status_code=500, detail=str(e)) @@ -433,5 +438,6 @@ def create_app(args): if __name__ == "__main__": args = parse_args() import uvicorn + app = create_app(args) uvicorn.run(app, host=args.host, port=args.port) diff --git a/api/requirements.txt b/api/requirements.txt index c83c3382..221d7f40 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -1,17 +1,17 @@ +aioboto3 ascii_colors fastapi -python-multipart -uvicorn -nest_asyncio lightrag-hku -tqdm -aioboto3 +nano_vectordb +nest_asyncio numpy ollama -torch openai +python-dotenv +python-multipart tenacity -transformers tiktoken -nano_vectordb -python-dotenv \ No newline at end of file +torch +tqdm +transformers +uvicorn diff --git a/examples/.env.oai.example b/examples/.env.oai.example index 288d7ff3..cea86da2 100644 --- a/examples/.env.oai.example +++ b/examples/.env.oai.example @@ -4,4 +4,4 @@ AZURE_OPENAI_API_KEY=myapikey AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large -AZURE_EMBEDDING_API_VERSION=2023-05-15 \ No newline at end of file +AZURE_EMBEDDING_API_VERSION=2023-05-15 diff --git a/lightrag/llm.py b/lightrag/llm.py index 0b844204..25792d53 100644 --- a/lightrag/llm.py +++ b/lightrag/llm.py @@ -148,7 +148,7 @@ async def azure_openai_complete_if_cache( response = await openai_async_client.chat.completions.create( model=model, messages=messages, **kwargs ) - + if hasattr(response, "__aiter__"): async def inner():