Remove deprecated demo code
This commit is contained in:
@@ -1,113 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import nest_asyncio
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
nest_asyncio.apply()
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens_age"
|
|
||||||
|
|
||||||
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
# AGE
|
|
||||||
os.environ["AGE_POSTGRES_DB"] = "postgresDB"
|
|
||||||
os.environ["AGE_POSTGRES_USER"] = "postgresUser"
|
|
||||||
os.environ["AGE_POSTGRES_PASSWORD"] = "postgresPW"
|
|
||||||
os.environ["AGE_POSTGRES_HOST"] = "localhost"
|
|
||||||
os.environ["AGE_POSTGRES_PORT"] = "5455"
|
|
||||||
os.environ["AGE_GRAPH_NAME"] = "dickens"
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=ollama_model_complete,
|
|
||||||
llm_model_name="llama3.1:8b",
|
|
||||||
llm_model_max_async=4,
|
|
||||||
llm_model_max_token_size=32768,
|
|
||||||
llm_model_kwargs={
|
|
||||||
"host": "http://localhost:11434",
|
|
||||||
"options": {"num_ctx": 32768},
|
|
||||||
},
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=768,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=lambda texts: ollama_embed(
|
|
||||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
|
||||||
),
|
|
||||||
),
|
|
||||||
graph_storage="AGEStorage",
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
async def print_stream(stream):
|
|
||||||
async for chunk in stream:
|
|
||||||
print(chunk, end="", flush=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
# Insert example text
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Test different query modes
|
|
||||||
print("\nNaive Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nLocal Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nGlobal Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nHybrid Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# stream response
|
|
||||||
resp = rag.query(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid", stream=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
if inspect.isasyncgen(resp):
|
|
||||||
asyncio.run(print_stream(resp))
|
|
||||||
else:
|
|
||||||
print(resp)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,103 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.openai import openai_complete_if_cache
|
|
||||||
from lightrag.llm.siliconcloud import siliconcloud_embedding
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
import numpy as np
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
|
|
||||||
async def llm_model_func(
|
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
|
||||||
) -> str:
|
|
||||||
return await openai_complete_if_cache(
|
|
||||||
"Qwen/Qwen2.5-7B-Instruct",
|
|
||||||
prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
api_key=os.getenv("SILICONFLOW_API_KEY"),
|
|
||||||
base_url="https://api.siliconflow.cn/v1/",
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
|
||||||
return await siliconcloud_embedding(
|
|
||||||
texts,
|
|
||||||
model="netease-youdao/bce-embedding-base_v1",
|
|
||||||
api_key=os.getenv("SILICONFLOW_API_KEY"),
|
|
||||||
max_token_size=512,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# function test
|
|
||||||
async def test_funcs():
|
|
||||||
result = await llm_model_func("How are you?")
|
|
||||||
print("llm_model_func: ", result)
|
|
||||||
|
|
||||||
result = await embedding_func(["How are you?"])
|
|
||||||
print("embedding_func: ", result)
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(test_funcs())
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=768, max_token_size=512, func=embedding_func
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,110 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.openai import openai_complete_if_cache
|
|
||||||
from lightrag.llm.siliconcloud import siliconcloud_embedding
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from lightrag.utils import TokenTracker
|
|
||||||
import numpy as np
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
token_tracker = TokenTracker()
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
|
|
||||||
async def llm_model_func(
|
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
|
||||||
) -> str:
|
|
||||||
return await openai_complete_if_cache(
|
|
||||||
"Qwen/Qwen2.5-7B-Instruct",
|
|
||||||
prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
api_key=os.getenv("SILICONFLOW_API_KEY"),
|
|
||||||
base_url="https://api.siliconflow.cn/v1/",
|
|
||||||
token_tracker=token_tracker,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
|
||||||
return await siliconcloud_embedding(
|
|
||||||
texts,
|
|
||||||
model="BAAI/bge-m3",
|
|
||||||
api_key=os.getenv("SILICONFLOW_API_KEY"),
|
|
||||||
max_token_size=512,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# function test
|
|
||||||
async def test_funcs():
|
|
||||||
# Context Manager Method
|
|
||||||
with token_tracker:
|
|
||||||
result = await llm_model_func("How are you?")
|
|
||||||
print("llm_model_func: ", result)
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(test_funcs())
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=1024, max_token_size=512, func=embedding_func
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
# Reset tracker before processing queries
|
|
||||||
token_tracker.reset()
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Display final token usage after main query
|
|
||||||
print("Token usage:", token_tracker.get_usage())
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,116 +0,0 @@
|
|||||||
###########################################
|
|
||||||
# TiDB storage implementation is deprecated
|
|
||||||
###########################################
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import os
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
# We use SiliconCloud API to call LLM on Oracle Cloud
|
|
||||||
# More docs here https://docs.siliconflow.cn/introduction
|
|
||||||
BASE_URL = "https://api.siliconflow.cn/v1/"
|
|
||||||
APIKEY = ""
|
|
||||||
CHATMODEL = ""
|
|
||||||
EMBEDMODEL = ""
|
|
||||||
|
|
||||||
os.environ["TIDB_HOST"] = ""
|
|
||||||
os.environ["TIDB_PORT"] = ""
|
|
||||||
os.environ["TIDB_USER"] = ""
|
|
||||||
os.environ["TIDB_PASSWORD"] = ""
|
|
||||||
os.environ["TIDB_DATABASE"] = "lightrag"
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
|
|
||||||
async def llm_model_func(
|
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
|
||||||
) -> str:
|
|
||||||
return await openai_complete_if_cache(
|
|
||||||
CHATMODEL,
|
|
||||||
prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
api_key=APIKEY,
|
|
||||||
base_url=BASE_URL,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
|
||||||
return await siliconcloud_embedding(
|
|
||||||
texts,
|
|
||||||
# model=EMBEDMODEL,
|
|
||||||
api_key=APIKEY,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def get_embedding_dim():
|
|
||||||
test_text = ["This is a test sentence."]
|
|
||||||
embedding = await embedding_func(test_text)
|
|
||||||
embedding_dim = embedding.shape[1]
|
|
||||||
return embedding_dim
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
# Detect embedding dimension
|
|
||||||
embedding_dimension = await get_embedding_dim()
|
|
||||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
|
||||||
|
|
||||||
# Initialize LightRAG
|
|
||||||
# We use TiDB DB as the KV/vector
|
|
||||||
rag = LightRAG(
|
|
||||||
enable_llm_cache=False,
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
chunk_token_size=512,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=512,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
kv_storage="TiDBKVStorage",
|
|
||||||
vector_storage="TiDBVectorDBStorage",
|
|
||||||
graph_storage="TiDBGraphStorage",
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
try:
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = await initialize_rag()
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform search in different modes
|
|
||||||
modes = ["naive", "local", "global", "hybrid"]
|
|
||||||
for mode in modes:
|
|
||||||
print("=" * 20, mode, "=" * 20)
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode=mode),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print("-" * 100, "\n")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
@@ -1,136 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
import numpy as np
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
import logging
|
|
||||||
from openai import OpenAI
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
LLM_MODEL = os.environ.get("LLM_MODEL", "qwen-turbo-latest")
|
|
||||||
LLM_BINDING_HOST = "https://dashscope.aliyuncs.com/compatible-mode/v1"
|
|
||||||
LLM_BINDING_API_KEY = os.getenv("LLM_BINDING_API_KEY")
|
|
||||||
|
|
||||||
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-v3")
|
|
||||||
EMBEDDING_BINDING_HOST = os.getenv("EMBEDDING_BINDING_HOST", LLM_BINDING_HOST)
|
|
||||||
EMBEDDING_BINDING_API_KEY = os.getenv("EMBEDDING_BINDING_API_KEY", LLM_BINDING_API_KEY)
|
|
||||||
EMBEDDING_DIM = int(os.environ.get("EMBEDDING_DIM", 1024))
|
|
||||||
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
|
||||||
EMBEDDING_MAX_BATCH_SIZE = int(os.environ.get("EMBEDDING_MAX_BATCH_SIZE", 10))
|
|
||||||
|
|
||||||
print(f"LLM_MODEL: {LLM_MODEL}")
|
|
||||||
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
if os.path.exists(WORKING_DIR):
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
shutil.rmtree(WORKING_DIR)
|
|
||||||
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
|
|
||||||
async def llm_model_func(
|
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
|
||||||
) -> str:
|
|
||||||
client = OpenAI(
|
|
||||||
api_key=LLM_BINDING_API_KEY,
|
|
||||||
base_url=LLM_BINDING_HOST,
|
|
||||||
)
|
|
||||||
|
|
||||||
messages = []
|
|
||||||
if system_prompt:
|
|
||||||
messages.append({"role": "system", "content": system_prompt})
|
|
||||||
if history_messages:
|
|
||||||
messages.extend(history_messages)
|
|
||||||
messages.append({"role": "user", "content": prompt})
|
|
||||||
|
|
||||||
chat_completion = client.chat.completions.create(
|
|
||||||
model=LLM_MODEL,
|
|
||||||
messages=messages,
|
|
||||||
temperature=kwargs.get("temperature", 0),
|
|
||||||
top_p=kwargs.get("top_p", 1),
|
|
||||||
n=kwargs.get("n", 1),
|
|
||||||
extra_body={"enable_thinking": False},
|
|
||||||
)
|
|
||||||
return chat_completion.choices[0].message.content
|
|
||||||
|
|
||||||
|
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
|
||||||
client = OpenAI(
|
|
||||||
api_key=EMBEDDING_BINDING_API_KEY,
|
|
||||||
base_url=EMBEDDING_BINDING_HOST,
|
|
||||||
)
|
|
||||||
|
|
||||||
print("##### embedding: texts: %d #####" % len(texts))
|
|
||||||
max_batch_size = EMBEDDING_MAX_BATCH_SIZE
|
|
||||||
embeddings = []
|
|
||||||
for i in range(0, len(texts), max_batch_size):
|
|
||||||
batch = texts[i : i + max_batch_size]
|
|
||||||
embedding = client.embeddings.create(model=EMBEDDING_MODEL, input=batch)
|
|
||||||
embeddings += [item.embedding for item in embedding.data]
|
|
||||||
|
|
||||||
return np.array(embeddings)
|
|
||||||
|
|
||||||
|
|
||||||
async def test_funcs():
|
|
||||||
result = await llm_model_func("How are you?")
|
|
||||||
print("Resposta do llm_model_func: ", result)
|
|
||||||
|
|
||||||
result = await embedding_func(["How are you?"])
|
|
||||||
print("Resultado do embedding_func: ", result.shape)
|
|
||||||
print("Dimensão da embedding: ", result.shape[1])
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(test_funcs())
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=EMBEDDING_DIM,
|
|
||||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
query_text = "What are the main themes?"
|
|
||||||
|
|
||||||
print("Result (Naive):")
|
|
||||||
print(rag.query(query_text, param=QueryParam(mode="naive")))
|
|
||||||
|
|
||||||
print("\nResult (Local):")
|
|
||||||
print(rag.query(query_text, param=QueryParam(mode="local")))
|
|
||||||
|
|
||||||
print("\nResult (Global):")
|
|
||||||
print(rag.query(query_text, param=QueryParam(mode="global")))
|
|
||||||
|
|
||||||
print("\nResult (Hybrid):")
|
|
||||||
print(rag.query(query_text, param=QueryParam(mode="hybrid")))
|
|
||||||
|
|
||||||
print("\nResult (mix):")
|
|
||||||
print(rag.query(query_text, param=QueryParam(mode="mix")))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,80 +0,0 @@
|
|||||||
import os
|
|
||||||
import logging
|
|
||||||
import asyncio
|
|
||||||
|
|
||||||
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
api_key = os.environ.get("ZHIPUAI_API_KEY")
|
|
||||||
if api_key is None:
|
|
||||||
raise Exception("Please set ZHIPU_API_KEY in your environment")
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=zhipu_complete,
|
|
||||||
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
|
|
||||||
llm_model_max_async=4,
|
|
||||||
llm_model_max_token_size=32768,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=2048, # Zhipu embedding-3 dimension
|
|
||||||
max_token_size=8192,
|
|
||||||
func=lambda texts: zhipu_embedding(texts),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,109 +0,0 @@
|
|||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.zhipu import zhipu_complete
|
|
||||||
from lightrag.llm.ollama import ollama_embedding
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
ROOT_DIR = os.environ.get("ROOT_DIR")
|
|
||||||
WORKING_DIR = f"{ROOT_DIR}/dickens-pg"
|
|
||||||
|
|
||||||
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
# AGE
|
|
||||||
os.environ["AGE_GRAPH_NAME"] = "dickens"
|
|
||||||
|
|
||||||
os.environ["POSTGRES_HOST"] = "localhost"
|
|
||||||
os.environ["POSTGRES_PORT"] = "15432"
|
|
||||||
os.environ["POSTGRES_USER"] = "rag"
|
|
||||||
os.environ["POSTGRES_PASSWORD"] = "rag"
|
|
||||||
os.environ["POSTGRES_DATABASE"] = "rag"
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=zhipu_complete,
|
|
||||||
llm_model_name="glm-4-flashx",
|
|
||||||
llm_model_max_async=4,
|
|
||||||
llm_model_max_token_size=32768,
|
|
||||||
enable_llm_cache_for_entity_extract=True,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=1024,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=lambda texts: ollama_embedding(
|
|
||||||
texts, embed_model="bge-m3", host="http://localhost:11434"
|
|
||||||
),
|
|
||||||
),
|
|
||||||
kv_storage="PGKVStorage",
|
|
||||||
doc_status_storage="PGDocStatusStorage",
|
|
||||||
graph_storage="PGGraphStorage",
|
|
||||||
vector_storage="PGVectorStorage",
|
|
||||||
auto_manage_storages_states=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = await initialize_rag()
|
|
||||||
|
|
||||||
# add embedding_func for graph database, it's deleted in commit 5661d76860436f7bf5aef2e50d9ee4a59660146c
|
|
||||||
rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
|
|
||||||
|
|
||||||
with open(f"{ROOT_DIR}/book.txt", "r", encoding="utf-8") as f:
|
|
||||||
await rag.ainsert(f.read())
|
|
||||||
|
|
||||||
print("==== Trying to test the rag queries ====")
|
|
||||||
print("**** Start Naive Query ****")
|
|
||||||
start_time = time.time()
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print(f"Naive Query Time: {time.time() - start_time} seconds")
|
|
||||||
# Perform local search
|
|
||||||
print("**** Start Local Query ****")
|
|
||||||
start_time = time.time()
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print(f"Local Query Time: {time.time() - start_time} seconds")
|
|
||||||
# Perform global search
|
|
||||||
print("**** Start Global Query ****")
|
|
||||||
start_time = time.time()
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print(f"Global Query Time: {time.time() - start_time}")
|
|
||||||
# Perform hybrid search
|
|
||||||
print("**** Start Hybrid Query ****")
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
print(f"Hybrid Query Time: {time.time() - start_time} seconds")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
Reference in New Issue
Block a user