Add huggingface model support
This commit is contained in:
@@ -1,18 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from lightrag import LightRAG
|
||||
|
||||
# os.environ["OPENAI_API_KEY"] = ""
|
||||
|
||||
WORKING_DIR = ""
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
with open('./text.txt', 'r') as f:
|
||||
text = f.read()
|
||||
|
||||
rag.insert(text)
|
36
examples/lightrag_hf_demo.py
Normal file
36
examples/lightrag_hf_demo.py
Normal file
@@ -0,0 +1,36 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm import hf_model_complete, hf_embedding
|
||||
from transformers import AutoModel,AutoTokenizer
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=hf_model_complete,
|
||||
llm_model_name='meta-llama/Llama-3.1-8B-Instruct',
|
||||
embedding_func=hf_embedding,
|
||||
tokenizer=AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2"),
|
||||
embed_model=AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
|
||||
)
|
||||
|
||||
|
||||
with open("./book.txt") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
|
||||
|
||||
# Perform local search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
|
||||
|
||||
# Perform global search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
|
||||
|
||||
# Perform hybrid search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
|
33
examples/lightrag_openai_demo.py
Normal file
33
examples/lightrag_openai_demo.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete
|
||||
from transformers import AutoModel,AutoTokenizer
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_complete
|
||||
# llm_model_func=gpt_4o_mini_complete
|
||||
)
|
||||
|
||||
|
||||
with open("./book.txt") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
|
||||
|
||||
# Perform local search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
|
||||
|
||||
# Perform global search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
|
||||
|
||||
# Perform hybrid search
|
||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
|
@@ -1,16 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from lightrag import LightRAG, QueryParam
|
||||
|
||||
# os.environ["OPENAI_API_KEY"] = ""
|
||||
|
||||
WORKING_DIR = ""
|
||||
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
mode = 'global'
|
||||
query_param = QueryParam(mode=mode)
|
||||
|
||||
result = rag.query("", param=query_param)
|
||||
print(result)
|
Reference in New Issue
Block a user