Separated llms from the main llm.py file and fixed some deprication bugs

This commit is contained in:
Saifeddine ALOUI
2025-01-25 00:11:00 +01:00
parent 7e1638525c
commit 34018cb1e0
55 changed files with 2144 additions and 1301 deletions

View File

@@ -10,7 +10,7 @@ import os
# logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.WARN)
from lightrag import LightRAG, QueryParam
from lightrag.llm import ollama_embedding, ollama_model_complete
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
WORKING_DIR = "./dickens_gremlin"
@@ -41,7 +41,7 @@ rag = LightRAG(
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embedding(
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
),
),