This commit is contained in:
zrguo
2025-03-03 18:33:42 +08:00
parent 887388c317
commit 1611400854
41 changed files with 1390 additions and 1301 deletions

View File

@@ -12,6 +12,7 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens_gremlin"
@@ -31,59 +32,72 @@ os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
os.environ["GREMLIN_USER"] = ""
os.environ["GREMLIN_PASSWORD"] = ""
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="llama3.1:8b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="llama3.1:8b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
),
),
),
graph_storage="GremlinStorage",
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
graph_storage="GremlinStorage",
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def print_stream(stream):
async for chunk in stream:
print(chunk, end="", flush=True)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
if __name__ == "__main__":
main()