Refactor code formatting and update requirements for improved clarity and consistency
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -18,4 +18,4 @@ gui/
|
|||||||
.env
|
.env
|
||||||
venv/
|
venv/
|
||||||
examples/input/
|
examples/input/
|
||||||
examples/output/
|
examples/output/
|
||||||
|
@@ -4,4 +4,4 @@ AZURE_OPENAI_API_KEY=myapikey
|
|||||||
AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
|
AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
|
||||||
|
|
||||||
AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
|
AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
|
||||||
AZURE_EMBEDDING_API_VERSION=2023-05-15
|
AZURE_EMBEDDING_API_VERSION=2023-05-15
|
||||||
|
@@ -4,7 +4,10 @@ import asyncio
|
|||||||
import logging
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm import azure_openai_complete_if_cache, azure_openai_complete, azure_openai_embedding
|
from lightrag.llm import (
|
||||||
|
azure_openai_complete_if_cache,
|
||||||
|
azure_openai_embedding,
|
||||||
|
)
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
@@ -28,6 +31,7 @@ AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
|
|||||||
AZURE_EMBEDDING_DEPLOYMENT = os.getenv("AZURE_EMBEDDING_DEPLOYMENT")
|
AZURE_EMBEDDING_DEPLOYMENT = os.getenv("AZURE_EMBEDDING_DEPLOYMENT")
|
||||||
AZURE_EMBEDDING_API_VERSION = os.getenv("AZURE_EMBEDDING_API_VERSION")
|
AZURE_EMBEDDING_API_VERSION = os.getenv("AZURE_EMBEDDING_API_VERSION")
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="LightRAG FastAPI Server with OpenAI integration"
|
description="LightRAG FastAPI Server with OpenAI integration"
|
||||||
@@ -132,7 +136,7 @@ class SearchMode(str, Enum):
|
|||||||
class QueryRequest(BaseModel):
|
class QueryRequest(BaseModel):
|
||||||
query: str
|
query: str
|
||||||
mode: SearchMode = SearchMode.hybrid
|
mode: SearchMode = SearchMode.hybrid
|
||||||
#stream: bool = False
|
# stream: bool = False
|
||||||
|
|
||||||
|
|
||||||
class QueryResponse(BaseModel):
|
class QueryResponse(BaseModel):
|
||||||
@@ -205,10 +209,11 @@ def create_app(args):
|
|||||||
embedding_func=EmbeddingFunc(
|
embedding_func=EmbeddingFunc(
|
||||||
embedding_dim=embedding_dim,
|
embedding_dim=embedding_dim,
|
||||||
max_token_size=args.max_embed_tokens,
|
max_token_size=args.max_embed_tokens,
|
||||||
func=lambda texts: azure_openai_embedding(texts, model=args.embedding_model),
|
func=lambda texts: azure_openai_embedding(
|
||||||
|
texts, model=args.embedding_model
|
||||||
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.on_event("startup")
|
@app.on_event("startup")
|
||||||
async def startup_event():
|
async def startup_event():
|
||||||
@@ -266,9 +271,7 @@ def create_app(args):
|
|||||||
if os.path.exists(cachefile):
|
if os.path.exists(cachefile):
|
||||||
with open(cachefile, "w") as f:
|
with open(cachefile, "w") as f:
|
||||||
f.write("{}")
|
f.write("{}")
|
||||||
return {
|
return {"status": "success"}
|
||||||
"status": "success"
|
|
||||||
}
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
@@ -319,15 +322,17 @@ def create_app(args):
|
|||||||
param=QueryParam(mode=request.mode, stream=True),
|
param=QueryParam(mode=request.mode, stream=True),
|
||||||
)
|
)
|
||||||
if inspect.isasyncgen(response):
|
if inspect.isasyncgen(response):
|
||||||
|
|
||||||
async def stream_generator():
|
async def stream_generator():
|
||||||
async for chunk in response:
|
async for chunk in response:
|
||||||
yield json.dumps({"data": chunk}) + "\n"
|
yield json.dumps({"data": chunk}) + "\n"
|
||||||
|
|
||||||
return StreamingResponse(stream_generator(), media_type="application/json")
|
return StreamingResponse(
|
||||||
|
stream_generator(), media_type="application/json"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
return QueryResponse(response=response)
|
return QueryResponse(response=response)
|
||||||
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
@@ -433,5 +438,6 @@ def create_app(args):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
args = parse_args()
|
args = parse_args()
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
|
||||||
app = create_app(args)
|
app = create_app(args)
|
||||||
uvicorn.run(app, host=args.host, port=args.port)
|
uvicorn.run(app, host=args.host, port=args.port)
|
||||||
|
@@ -1,17 +1,17 @@
|
|||||||
|
aioboto3
|
||||||
ascii_colors
|
ascii_colors
|
||||||
fastapi
|
fastapi
|
||||||
python-multipart
|
|
||||||
uvicorn
|
|
||||||
nest_asyncio
|
|
||||||
lightrag-hku
|
lightrag-hku
|
||||||
tqdm
|
nano_vectordb
|
||||||
aioboto3
|
nest_asyncio
|
||||||
numpy
|
numpy
|
||||||
ollama
|
ollama
|
||||||
torch
|
|
||||||
openai
|
openai
|
||||||
|
python-dotenv
|
||||||
|
python-multipart
|
||||||
tenacity
|
tenacity
|
||||||
transformers
|
|
||||||
tiktoken
|
tiktoken
|
||||||
nano_vectordb
|
torch
|
||||||
python-dotenv
|
tqdm
|
||||||
|
transformers
|
||||||
|
uvicorn
|
||||||
|
@@ -4,4 +4,4 @@ AZURE_OPENAI_API_KEY=myapikey
|
|||||||
AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
|
AZURE_OPENAI_ENDPOINT=https://myendpoint.openai.azure.com
|
||||||
|
|
||||||
AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
|
AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
|
||||||
AZURE_EMBEDDING_API_VERSION=2023-05-15
|
AZURE_EMBEDDING_API_VERSION=2023-05-15
|
||||||
|
@@ -148,7 +148,7 @@ async def azure_openai_complete_if_cache(
|
|||||||
response = await openai_async_client.chat.completions.create(
|
response = await openai_async_client.chat.completions.create(
|
||||||
model=model, messages=messages, **kwargs
|
model=model, messages=messages, **kwargs
|
||||||
)
|
)
|
||||||
|
|
||||||
if hasattr(response, "__aiter__"):
|
if hasattr(response, "__aiter__"):
|
||||||
|
|
||||||
async def inner():
|
async def inner():
|
||||||
|
Reference in New Issue
Block a user