Remove deprecated demo code
This commit is contained in:
@@ -1,122 +0,0 @@
|
|||||||
##############################################
|
|
||||||
# Gremlin storage implementation is deprecated
|
|
||||||
##############################################
|
|
||||||
|
|
||||||
import asyncio
|
|
||||||
import inspect
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Uncomment these lines below to filter out somewhat verbose INFO level
|
|
||||||
# logging prints (the default loglevel is INFO).
|
|
||||||
# This has to go before the lightrag imports to work,
|
|
||||||
# which triggers linting errors, so we keep it commented out:
|
|
||||||
# import logging
|
|
||||||
# logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.WARN)
|
|
||||||
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens_gremlin"
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
# Gremlin
|
|
||||||
os.environ["GREMLIN_HOST"] = "localhost"
|
|
||||||
os.environ["GREMLIN_PORT"] = "8182"
|
|
||||||
os.environ["GREMLIN_GRAPH"] = "dickens"
|
|
||||||
|
|
||||||
# Creating a non-default source requires manual
|
|
||||||
# configuration and a restart on the server: use the dafault "g"
|
|
||||||
os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
|
|
||||||
|
|
||||||
# No authorization by default on docker tinkerpop/gremlin-server
|
|
||||||
os.environ["GREMLIN_USER"] = ""
|
|
||||||
os.environ["GREMLIN_PASSWORD"] = ""
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=ollama_model_complete,
|
|
||||||
llm_model_name="llama3.1:8b",
|
|
||||||
llm_model_max_async=4,
|
|
||||||
llm_model_max_token_size=32768,
|
|
||||||
llm_model_kwargs={
|
|
||||||
"host": "http://localhost:11434",
|
|
||||||
"options": {"num_ctx": 32768},
|
|
||||||
},
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=768,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=lambda texts: ollama_embed(
|
|
||||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
|
||||||
),
|
|
||||||
),
|
|
||||||
graph_storage="GremlinStorage",
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
async def print_stream(stream):
|
|
||||||
async for chunk in stream:
|
|
||||||
print(chunk, end="", flush=True)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
# Insert example text
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Test different query modes
|
|
||||||
print("\nNaive Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nLocal Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nGlobal Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nHybrid Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# stream response
|
|
||||||
resp = rag.query(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid", stream=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
if inspect.isasyncgen(resp):
|
|
||||||
asyncio.run(print_stream(resp))
|
|
||||||
else:
|
|
||||||
print(resp)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,104 +0,0 @@
|
|||||||
import os
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
import asyncio
|
|
||||||
import nest_asyncio
|
|
||||||
|
|
||||||
nest_asyncio.apply()
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
# WorkingDir
|
|
||||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
||||||
WORKING_DIR = os.path.join(ROOT_DIR, "myKG")
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
print(f"WorkingDir: {WORKING_DIR}")
|
|
||||||
|
|
||||||
# mongo
|
|
||||||
os.environ["MONGO_URI"] = "mongodb://root:root@localhost:27017/"
|
|
||||||
os.environ["MONGO_DATABASE"] = "LightRAG"
|
|
||||||
|
|
||||||
# neo4j
|
|
||||||
BATCH_SIZE_NODES = 500
|
|
||||||
BATCH_SIZE_EDGES = 100
|
|
||||||
os.environ["NEO4J_URI"] = "bolt://localhost:7687"
|
|
||||||
os.environ["NEO4J_USERNAME"] = "neo4j"
|
|
||||||
os.environ["NEO4J_PASSWORD"] = "neo4j"
|
|
||||||
|
|
||||||
# milvus
|
|
||||||
os.environ["MILVUS_URI"] = "http://localhost:19530"
|
|
||||||
os.environ["MILVUS_USER"] = "root"
|
|
||||||
os.environ["MILVUS_PASSWORD"] = "root"
|
|
||||||
os.environ["MILVUS_DB_NAME"] = "lightrag"
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=ollama_model_complete,
|
|
||||||
llm_model_name="qwen2.5:14b",
|
|
||||||
llm_model_max_async=4,
|
|
||||||
llm_model_max_token_size=32768,
|
|
||||||
llm_model_kwargs={
|
|
||||||
"host": "http://127.0.0.1:11434",
|
|
||||||
"options": {"num_ctx": 32768},
|
|
||||||
},
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=1024,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=lambda texts: ollama_embed(
|
|
||||||
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
|
|
||||||
),
|
|
||||||
),
|
|
||||||
kv_storage="MongoKVStorage",
|
|
||||||
graph_storage="Neo4JStorage",
|
|
||||||
vector_storage="MilvusVectorDBStorage",
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
# Insert example text
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Test different query modes
|
|
||||||
print("\nNaive Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nLocal Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nGlobal Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nHybrid Search:")
|
|
||||||
print(
|
|
||||||
rag.query(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,123 +0,0 @@
|
|||||||
import os
|
|
||||||
import asyncio
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
import numpy as np
|
|
||||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
|
|
||||||
async def llm_model_func(
|
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
|
||||||
) -> str:
|
|
||||||
return await openai_complete_if_cache(
|
|
||||||
"solar-mini",
|
|
||||||
prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
api_key=os.getenv("UPSTAGE_API_KEY"),
|
|
||||||
base_url="https://api.upstage.ai/v1/solar",
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
|
||||||
return await openai_embed(
|
|
||||||
texts,
|
|
||||||
model="solar-embedding-1-large-query",
|
|
||||||
api_key=os.getenv("UPSTAGE_API_KEY"),
|
|
||||||
base_url="https://api.upstage.ai/v1/solar",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def get_embedding_dim():
|
|
||||||
test_text = ["This is a test sentence."]
|
|
||||||
embedding = await embedding_func(test_text)
|
|
||||||
embedding_dim = embedding.shape[1]
|
|
||||||
return embedding_dim
|
|
||||||
|
|
||||||
|
|
||||||
# function test
|
|
||||||
async def test_funcs():
|
|
||||||
result = await llm_model_func("How are you?")
|
|
||||||
print("llm_model_func: ", result)
|
|
||||||
|
|
||||||
result = await embedding_func(["How are you?"])
|
|
||||||
print("embedding_func: ", result)
|
|
||||||
|
|
||||||
|
|
||||||
# asyncio.run(test_funcs())
|
|
||||||
|
|
||||||
|
|
||||||
async def initialize_rag():
|
|
||||||
embedding_dimension = await get_embedding_dim()
|
|
||||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
|
||||||
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
embedding_cache_config={
|
|
||||||
"enabled": True,
|
|
||||||
"similarity_threshold": 0.90,
|
|
||||||
},
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
await rag.initialize_storages()
|
|
||||||
await initialize_pipeline_status()
|
|
||||||
|
|
||||||
return rag
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
try:
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = await initialize_rag()
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
await rag.ainsert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="global"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
Reference in New Issue
Block a user