Separated llms from the main llm.py file and fixed some deprication bugs

This commit is contained in:
Saifeddine ALOUI
2025-01-25 00:11:00 +01:00
parent 7e1638525c
commit 34018cb1e0
55 changed files with 2144 additions and 1301 deletions

View File

@@ -5,7 +5,7 @@ import numpy as np
from lightrag import LightRAG
from lightrag.utils import EmbeddingFunc
from lightrag.llm import openai_complete_if_cache, openai_embedding
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
## For Upstage API
@@ -25,7 +25,7 @@ async def llm_model_func(
async def embedding_func(texts: list[str]) -> np.ndarray:
return await openai_embedding(
return await openai_embed(
texts,
model="solar-embedding-1-large-query",
api_key=os.getenv("UPSTAGE_API_KEY"),

View File

@@ -4,7 +4,7 @@ import json
import asyncio
from lightrag import LightRAG, QueryParam
from tqdm import tqdm
from lightrag.llm import openai_complete_if_cache, openai_embedding
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
@@ -26,7 +26,7 @@ async def llm_model_func(
async def embedding_func(texts: list[str]) -> np.ndarray:
return await openai_embedding(
return await openai_embed(
texts,
model="solar-embedding-1-large-query",
api_key=os.getenv("UPSTAGE_API_KEY"),