This commit is contained in:
zrguo
2025-03-03 18:33:42 +08:00
parent 887388c317
commit 1611400854
41 changed files with 1390 additions and 1301 deletions

View File

@@ -10,7 +10,7 @@ import os
from dotenv import load_dotenv
from lightrag.kg.postgres_impl import PostgreSQLDB, PGKVStorage
from lightrag.storage import JsonKVStorage
from lightrag.kg.json_kv_impl import JsonKVStorage
from lightrag.namespace import NameSpace
load_dotenv()

View File

@@ -1,4 +1,5 @@
from fastapi import FastAPI, HTTPException, File, UploadFile
from contextlib import asynccontextmanager
from pydantic import BaseModel
import os
from lightrag import LightRAG, QueryParam
@@ -8,12 +9,12 @@ from typing import Optional
import asyncio
import nest_asyncio
import aiofiles
from lightrag.kg.shared_storage import initialize_pipeline_status
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
DEFAULT_RAG_DIR = "index_default"
app = FastAPI(title="LightRAG API", description="API for RAG operations")
DEFAULT_INPUT_FILE = "book.txt"
INPUT_FILE = os.environ.get("INPUT_FILE", f"{DEFAULT_INPUT_FILE}")
@@ -28,23 +29,41 @@ if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="gemma2:9b",
llm_model_max_async=4,
llm_model_max_token_size=8192,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
async def init():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="gemma2:9b",
llm_model_max_async=4,
llm_model_max_token_size=8192,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
),
),
),
)
# Add initialization code
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
@asynccontextmanager
async def lifespan(app: FastAPI):
global rag
rag = await init()
print("done!")
yield
app = FastAPI(
title="LightRAG API", description="API for RAG operations", lifespan=lifespan
)
# Data models
class QueryRequest(BaseModel):
query: str

View File

@@ -1,4 +1,5 @@
from fastapi import FastAPI, HTTPException, File, UploadFile
from contextlib import asynccontextmanager
from pydantic import BaseModel
import os
from lightrag import LightRAG, QueryParam
@@ -8,6 +9,7 @@ import numpy as np
from typing import Optional
import asyncio
import nest_asyncio
from lightrag.kg.shared_storage import initialize_pipeline_status
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
@@ -71,16 +73,35 @@ async def get_embedding_dim():
# Initialize RAG instance
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=asyncio.run(get_embedding_dim()),
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
)
async def init():
embedding_dimension = await get_embedding_dim()
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
@asynccontextmanager
async def lifespan(app: FastAPI):
global rag
rag = await init()
print("done!")
yield
app = FastAPI(
title="LightRAG API", description="API for RAG operations", lifespan=lifespan
)
# Data models

View File

@@ -1,101 +0,0 @@
import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
import asyncio
import nest_asyncio
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
DEFAULT_RAG_DIR = "index_default"
# Configure working directory
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
print(f"WORKING_DIR: {WORKING_DIR}")
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
print(f"LLM_MODEL: {LLM_MODEL}")
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small")
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
BASE_URL = os.environ.get("BASE_URL", "https://api.openai.com/v1")
print(f"BASE_URL: {BASE_URL}")
API_KEY = os.environ.get("API_KEY", "xxxxxxxx")
print(f"API_KEY: {API_KEY}")
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
# LLM model function
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str:
return await openai_complete_if_cache(
model=LLM_MODEL,
prompt=prompt,
system_prompt=system_prompt,
history_messages=history_messages,
base_url=BASE_URL,
api_key=API_KEY,
**kwargs,
)
# Embedding function
async def embedding_func(texts: list[str]) -> np.ndarray:
return await openai_embed(
texts=texts,
model=EMBEDDING_MODEL,
base_url=BASE_URL,
api_key=API_KEY,
)
async def get_embedding_dim():
test_text = ["This is a test sentence."]
embedding = await embedding_func(test_text)
embedding_dim = embedding.shape[1]
print(f"{embedding_dim=}")
return embedding_dim
# Initialize RAG instance
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=asyncio.run(get_embedding_dim()),
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)

View File

@@ -16,6 +16,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
print(os.getcwd())
@@ -113,6 +114,9 @@ async def init():
vector_storage="OracleVectorDBStorage",
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag

View File

@@ -6,6 +6,7 @@ import numpy as np
from dotenv import load_dotenv
import logging
from openai import AzureOpenAI
from lightrag.kg.shared_storage import initialize_pipeline_status
logging.basicConfig(level=logging.INFO)
@@ -90,6 +91,9 @@ rag = LightRAG(
),
)
rag.initialize_storages()
initialize_pipeline_status()
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")

View File

@@ -8,6 +8,12 @@ import logging
from lightrag import LightRAG, QueryParam
from lightrag.llm.bedrock import bedrock_complete, bedrock_embed
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
import asyncio
import nest_asyncio
nest_asyncio.apply()
logging.getLogger("aiobotocore").setLevel(logging.WARNING)
@@ -15,22 +21,31 @@ WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=bedrock_complete,
llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
embedding_func=EmbeddingFunc(
embedding_dim=1024, max_token_size=8192, func=bedrock_embed
),
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
for mode in ["naive", "local", "global", "hybrid"]:
print("\n+-" + "-" * len(mode) + "-+")
print(f"| {mode.capitalize()} |")
print("+-" + "-" * len(mode) + "-+\n")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=bedrock_complete,
llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
embedding_func=EmbeddingFunc(
embedding_dim=1024, max_token_size=8192, func=bedrock_embed
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
def main():
rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
for mode in ["naive", "local", "global", "hybrid"]:
print("\n+-" + "-" * len(mode) + "-+")
print(f"| {mode.capitalize()} |")
print("+-" + "-" * len(mode) + "-+\n")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
)

View File

@@ -8,6 +8,12 @@ from dotenv import load_dotenv
from lightrag.utils import EmbeddingFunc
from lightrag import LightRAG, QueryParam
from sentence_transformers import SentenceTransformer
from lightrag.kg.shared_storage import initialize_pipeline_status
import asyncio
import nest_asyncio
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
load_dotenv()
gemini_api_key = os.getenv("GEMINI_API_KEY")
@@ -60,25 +66,37 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
return embeddings
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=8192,
func=embedding_func,
),
)
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=8192,
func=embedding_func,
),
)
file_path = "story.txt"
with open(file_path, "r") as file:
text = file.read()
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
rag.insert(text)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
file_path = "story.txt"
with open(file_path, "r") as file:
text = file.read()
response = rag.query(
query="What is the main theme of the story?",
param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
)
rag.insert(text)
print(response)
response = rag.query(
query="What is the main theme of the story?",
param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
)
print(response)
if __name__ == "__main__":
main()

View File

@@ -4,51 +4,68 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.hf import hf_model_complete, hf_embed
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel, AutoTokenizer
from lightrag.kg.shared_storage import initialize_pipeline_status
import asyncio
import nest_asyncio
nest_asyncio.apply()
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=hf_model_complete,
llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=5000,
func=lambda texts: hf_embed(
texts,
tokenizer=AutoTokenizer.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
embed_model=AutoModel.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=hf_model_complete,
llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=5000,
func=lambda texts: hf_embed(
texts,
tokenizer=AutoTokenizer.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
embed_model=AutoModel.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
),
),
),
)
)
await rag.initialize_storages()
await initialize_pipeline_status()
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
return rag
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
def main():
rag = asyncio.run(initialize_rag())
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -1,115 +0,0 @@
import numpy as np
from lightrag import LightRAG, QueryParam
from lightrag.utils import EmbeddingFunc
from lightrag.llm.jina import jina_embed
from lightrag.llm.openai import openai_complete_if_cache
import os
import asyncio
async def embedding_func(texts: list[str]) -> np.ndarray:
return await jina_embed(texts, api_key="YourJinaAPIKey")
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
return await openai_complete_if_cache(
"solar-mini",
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
api_key=os.getenv("UPSTAGE_API_KEY"),
base_url="https://api.upstage.ai/v1/solar",
**kwargs,
)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=1024, max_token_size=8192, func=embedding_func
),
)
async def lightraginsert(file_path, semaphore):
async with semaphore:
try:
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
except UnicodeDecodeError:
# If UTF-8 decoding fails, try other encodings
with open(file_path, "r", encoding="gbk") as f:
content = f.read()
await rag.ainsert(content)
async def process_files(directory, concurrency_limit):
semaphore = asyncio.Semaphore(concurrency_limit)
tasks = []
for root, dirs, files in os.walk(directory):
for f in files:
file_path = os.path.join(root, f)
if f.startswith("."):
continue
tasks.append(lightraginsert(file_path, semaphore))
await asyncio.gather(*tasks)
async def main():
try:
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=1024,
max_token_size=8192,
func=embedding_func,
),
)
asyncio.run(process_files(WORKING_DIR, concurrency_limit=4))
# Perform naive search
print(
await rag.aquery(
"What are the top themes in this story?", param=QueryParam(mode="naive")
)
)
# Perform local search
print(
await rag.aquery(
"What are the top themes in this story?", param=QueryParam(mode="local")
)
)
# Perform global search
print(
await rag.aquery(
"What are the top themes in this story?",
param=QueryParam(mode="global"),
)
)
# Perform hybrid search
print(
await rag.aquery(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid"),
)
)
except Exception as e:
print(f"An error occurred: {e}")
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
import asyncio
import nest_asyncio
nest_asyncio.apply()
from lightrag.kg.shared_storage import initialize_pipeline_status
# Configure working directory
WORKING_DIR = "./index_default"
@@ -76,38 +81,53 @@ async def get_embedding_dim():
return embedding_dim
# Initialize RAG instance
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=asyncio.run(get_embedding_dim()),
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
)
async def initialize_rag():
embedding_dimension = await get_embedding_dim()
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
from llama_index.llms.litellm import LiteLLM
from llama_index.embeddings.litellm import LiteLLMEmbedding
import asyncio
import nest_asyncio
nest_asyncio.apply()
from lightrag.kg.shared_storage import initialize_pipeline_status
# Configure working directory
WORKING_DIR = "./index_default"
@@ -79,38 +84,53 @@ async def get_embedding_dim():
return embedding_dim
# Initialize RAG instance
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=asyncio.run(get_embedding_dim()),
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
)
async def initialize_rag():
embedding_dimension = await get_embedding_dim()
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -5,6 +5,12 @@ from lightrag.llm.lmdeploy import lmdeploy_model_if_cache
from lightrag.llm.hf import hf_embed
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel, AutoTokenizer
from lightrag.kg.shared_storage import initialize_pipeline_status
import asyncio
import nest_asyncio
nest_asyncio.apply()
WORKING_DIR = "./dickens"
@@ -35,46 +41,59 @@ async def lmdeploy_model_complete(
**kwargs,
)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=lmdeploy_model_complete,
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=5000,
func=lambda texts: hf_embed(
texts,
tokenizer=AutoTokenizer.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
embed_model=AutoModel.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=lmdeploy_model_complete,
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=5000,
func=lambda texts: hf_embed(
texts,
tokenizer=AutoTokenizer.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
embed_model=AutoModel.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
),
),
),
)
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -1,5 +1,9 @@
import os
import asyncio
import nest_asyncio
nest_asyncio.apply()
from lightrag import LightRAG, QueryParam
from lightrag.llm import (
openai_complete_if_cache,
@@ -7,10 +11,12 @@ from lightrag.llm import (
)
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
# for custom llm_model_func
from lightrag.utils import locate_json_string_body_from_string
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
@@ -91,42 +97,37 @@ async def test_funcs():
# asyncio.run(test_funcs())
async def initialize_rag():
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
# lightRAG class during indexing
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
# llm_model_name="meta/llama3-70b-instruct", #un comment if
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
# so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
# so you can adjust to be able to fit the NVIDIA model (future work)
func=indexing_embedding_func,
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def main():
try:
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
# lightRAG class during indexing
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
# llm_model_name="meta/llama3-70b-instruct", #un comment if
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
# so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
# so you can adjust to be able to fit the NVIDIA model (future work)
func=indexing_embedding_func,
),
)
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# reading file
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
# redefine rag to change embedding into query type
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
# llm_model_name="meta/llama3-70b-instruct", #un comment if
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=512,
func=query_embedding_func,
),
)
# Perform naive search
print("==============Naive===============")
print(

View File

@@ -1,4 +1,8 @@
import asyncio
import nest_asyncio
nest_asyncio.apply()
import inspect
import logging
import os
@@ -6,6 +10,7 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens_age"
@@ -22,59 +27,72 @@ os.environ["AGE_POSTGRES_HOST"] = "localhost"
os.environ["AGE_POSTGRES_PORT"] = "5455"
os.environ["AGE_GRAPH_NAME"] = "dickens"
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="llama3.1:8b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="llama3.1:8b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
),
),
),
graph_storage="AGEStorage",
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
graph_storage="AGEStorage",
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def print_stream(stream):
async for chunk in stream:
print(chunk, end="", flush=True)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
if __name__ == "__main__":
main()

View File

@@ -1,10 +1,14 @@
import asyncio
import nest_asyncio
nest_asyncio.apply()
import os
import inspect
import logging
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -13,58 +17,71 @@ logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="gemma2:2b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="gemma2:2b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
),
),
),
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def print_stream(stream):
async for chunk in stream:
print(chunk, end="", flush=True)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
if __name__ == "__main__":
main()

View File

@@ -12,6 +12,7 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens_gremlin"
@@ -31,59 +32,72 @@ os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
os.environ["GREMLIN_USER"] = ""
os.environ["GREMLIN_PASSWORD"] = ""
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="llama3.1:8b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="llama3.1:8b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
),
),
),
graph_storage="GremlinStorage",
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
graph_storage="GremlinStorage",
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def print_stream(stream):
async for chunk in stream:
print(chunk, end="", flush=True)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# stream response
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
if inspect.isasyncgen(resp):
asyncio.run(print_stream(resp))
else:
print(resp)
if __name__ == "__main__":
main()

View File

@@ -2,6 +2,11 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
import asyncio
import nest_asyncio
nest_asyncio.apply()
from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -27,30 +32,59 @@ os.environ["MILVUS_USER"] = "root"
os.environ["MILVUS_PASSWORD"] = "root"
os.environ["MILVUS_DB_NAME"] = "lightrag"
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="qwen2.5:14b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=1024,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="qwen2.5:14b",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
embedding_func=EmbeddingFunc(
embedding_dim=1024,
max_token_size=8192,
func=lambda texts: ollama_embed(
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
),
),
),
kv_storage="MongoKVStorage",
graph_storage="Neo4JStorage",
vector_storage="MilvusVectorDBStorage",
)
kv_storage="MongoKVStorage",
graph_storage="Neo4JStorage",
vector_storage="MilvusVectorDBStorage",
)
file = "./book.txt"
with open(file, "r") as f:
rag.insert(f.read())
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Insert example text
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Test different query modes
print("\nNaive Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
print("\nLocal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
print("\nGlobal Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
print("\nHybrid Search:")
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -52,21 +53,28 @@ async def test_funcs():
# asyncio.run(test_funcs())
async def initialize_rag():
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=8192,
func=embedding_func,
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def main():
try:
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=8192,
func=embedding_func,
),
)
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())

View File

@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -52,25 +53,33 @@ async def test_funcs():
# asyncio.run(test_funcs())
async def initialize_rag():
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
rag = LightRAG(
working_dir=WORKING_DIR,
embedding_cache_config={
"enabled": True,
"similarity_threshold": 0.90,
},
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=8192,
func=embedding_func,
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def main():
try:
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
rag = LightRAG(
working_dir=WORKING_DIR,
embedding_cache_config={
"enabled": True,
"similarity_threshold": 0.90,
},
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=8192,
func=embedding_func,
),
)
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())

View File

@@ -1,9 +1,11 @@
import inspect
import os
import asyncio
from lightrag import LightRAG
from lightrag.llm import openai_complete, openai_embed
from lightrag.utils import EmbeddingFunc, always_get_an_event_loop
from lightrag import QueryParam
from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -13,42 +15,54 @@ if not os.path.exists(WORKING_DIR):
print(f"WorkingDir: {WORKING_DIR}")
api_key = "empty"
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=openai_complete,
llm_model_name="qwen2.5-14b-instruct@4bit",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
embedding_func=EmbeddingFunc(
embedding_dim=1024,
max_token_size=8192,
func=lambda texts: openai_embed(
texts=texts,
model="text-embedding-bge-m3",
base_url="http://127.0.0.1:1234/v1",
api_key=api_key,
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=openai_complete,
llm_model_name="qwen2.5-14b-instruct@4bit",
llm_model_max_async=4,
llm_model_max_token_size=32768,
llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
embedding_func=EmbeddingFunc(
embedding_dim=1024,
max_token_size=8192,
func=lambda texts: openai_embed(
texts=texts,
model="text-embedding-bge-m3",
base_url="http://127.0.0.1:1234/v1",
api_key=api_key,
),
),
),
)
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def print_stream(stream):
async for chunk in stream:
if chunk:
print(chunk, end="", flush=True)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
resp = rag.query(
"What are the top themes in this story?",
param=QueryParam(mode="hybrid", stream=True),
)
loop = always_get_an_event_loop()
if inspect.isasyncgen(resp):
loop.run_until_complete(print_stream(resp))
else:
print(resp)
if __name__ == "__main__":
main()
loop = always_get_an_event_loop()
if inspect.isasyncgen(resp):
loop.run_until_complete(print_stream(resp))
else:
print(resp)

View File

@@ -1,40 +1,54 @@
import os
import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
rag = LightRAG(
working_dir=WORKING_DIR,
embedding_func=openai_embed,
llm_model_func=gpt_4o_mini_complete,
# llm_model_func=gpt_4o_complete
)
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
embedding_func=openai_embed,
llm_model_func=gpt_4o_mini_complete,
# llm_model_func=gpt_4o_complete
)
await rag.initialize_storages()
await initialize_pipeline_status()
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
return rag
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)

View File

@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -52,7 +53,7 @@ async def create_embedding_function_instance():
async def initialize_rag():
embedding_func_instance = await create_embedding_function_instance()
return LightRAG(
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -60,14 +61,38 @@ async def initialize_rag():
log_level="DEBUG",
)
await rag.initialize_storages()
await initialize_pipeline_status()
# Run the initialization
rag = asyncio.run(initialize_rag())
return rag
with open("book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -1,7 +1,9 @@
import os
import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, openai_complete_if_cache
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -48,23 +50,52 @@ embedding_func = EmbeddingFunc(
texts, embed_model="shaw/dmeta-embedding-zh", host="http://117.50.173.35:11434"
),
)
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
llm_model_max_token_size=32768,
embedding_func=embedding_func,
chunk_token_size=512,
chunk_overlap_token_size=256,
kv_storage="RedisKVStorage",
graph_storage="Neo4JStorage",
vector_storage="MilvusVectorDBStorage",
doc_status_storage="RedisKVStorage",
)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
llm_model_max_token_size=32768,
embedding_func=embedding_func,
chunk_token_size=512,
chunk_overlap_token_size=256,
kv_storage="RedisKVStorage",
graph_storage="Neo4JStorage",
vector_storage="MilvusVectorDBStorage",
doc_status_storage="RedisKVStorage",
)
await rag.initialize_storages()
await initialize_pipeline_status()
file = "../book.txt"
with open(file, "r", encoding="utf-8") as f:
rag.insert(f.read())
return rag
print(rag.query("谁会3D建模 ", param=QueryParam(mode="mix")))
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -6,6 +6,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
print(os.getcwd())
script_directory = Path(__file__).resolve().parent.parent
@@ -63,41 +64,48 @@ async def get_embedding_dim():
embedding_dim = embedding.shape[1]
return embedding_dim
async def initialize_rag():
# Detect embedding dimension
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
# Initialize LightRAG
# We use Oracle DB as the KV/vector/graph storage
# You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
rag = LightRAG(
# log_level="DEBUG",
working_dir=WORKING_DIR,
entity_extract_max_gleaning=1,
enable_llm_cache=True,
enable_llm_cache_for_entity_extract=True,
embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
chunk_token_size=CHUNK_TOKEN_SIZE,
llm_model_max_token_size=MAX_TOKENS,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=500,
func=embedding_func,
),
graph_storage="OracleGraphStorage",
kv_storage="OracleKVStorage",
vector_storage="OracleVectorDBStorage",
addon_params={
"example_number": 1,
"language": "Simplfied Chinese",
"entity_types": ["organization", "person", "geo", "event"],
"insert_batch_size": 2,
},
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def main():
try:
# Detect embedding dimension
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
# Initialize LightRAG
# We use Oracle DB as the KV/vector/graph storage
# You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
rag = LightRAG(
# log_level="DEBUG",
working_dir=WORKING_DIR,
entity_extract_max_gleaning=1,
enable_llm_cache=True,
enable_llm_cache_for_entity_extract=True,
embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
chunk_token_size=CHUNK_TOKEN_SIZE,
llm_model_max_token_size=MAX_TOKENS,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=500,
func=embedding_func,
),
graph_storage="OracleGraphStorage",
kv_storage="OracleKVStorage",
vector_storage="OracleVectorDBStorage",
addon_params={
"example_number": 1,
"language": "Simplfied Chinese",
"entity_types": ["organization", "person", "geo", "event"],
"insert_batch_size": 2,
},
)
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Extract and Insert into LightRAG storage
with open(WORKING_DIR + "/docs.txt", "r", encoding="utf-8") as f:

View File

@@ -5,6 +5,7 @@ from lightrag.llm.openai import openai_complete_if_cache
from lightrag.llm.siliconcloud import siliconcloud_embedding
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -46,35 +47,48 @@ async def test_funcs():
asyncio.run(test_funcs())
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=768, max_token_size=512, func=embedding_func
),
)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=768, max_token_size=512, func=embedding_func
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
with open("./book.txt") as f:
rag.insert(f.read())
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)

View File

@@ -6,6 +6,7 @@ import numpy as np
from lightrag import LightRAG, QueryParam
from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -54,33 +55,40 @@ async def get_embedding_dim():
embedding_dim = embedding.shape[1]
return embedding_dim
async def initialize_rag():
# Detect embedding dimension
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
# Initialize LightRAG
# We use TiDB DB as the KV/vector
rag = LightRAG(
enable_llm_cache=False,
working_dir=WORKING_DIR,
chunk_token_size=512,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=512,
func=embedding_func,
),
kv_storage="TiDBKVStorage",
vector_storage="TiDBVectorDBStorage",
graph_storage="TiDBGraphStorage",
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def main():
try:
# Detect embedding dimension
embedding_dimension = await get_embedding_dim()
print(f"Detected embedding dimension: {embedding_dimension}")
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Initialize LightRAG
# We use TiDB DB as the KV/vector
rag = LightRAG(
enable_llm_cache=False,
working_dir=WORKING_DIR,
chunk_token_size=512,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=512,
func=embedding_func,
),
kv_storage="TiDBKVStorage",
vector_storage="TiDBVectorDBStorage",
graph_storage="TiDBGraphStorage",
)
# Extract and Insert into LightRAG storage
with open("./dickens/demo.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform search in different modes
modes = ["naive", "local", "global", "hybrid"]

View File

@@ -1,10 +1,12 @@
import os
import logging
import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -17,39 +19,51 @@ api_key = os.environ.get("ZHIPUAI_API_KEY")
if api_key is None:
raise Exception("Please set ZHIPU_API_KEY in your environment")
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=zhipu_complete,
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
llm_model_max_async=4,
llm_model_max_token_size=32768,
embedding_func=EmbeddingFunc(
embedding_dim=2048, # Zhipu embedding-3 dimension
max_token_size=8192,
func=lambda texts: zhipu_embedding(texts),
),
)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=zhipu_complete,
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
llm_model_max_async=4,
llm_model_max_token_size=32768,
embedding_func=EmbeddingFunc(
embedding_dim=2048, # Zhipu embedding-3 dimension
max_token_size=8192,
func=lambda texts: zhipu_embedding(texts),
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
return rag
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -8,6 +8,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.zhipu import zhipu_complete
from lightrag.llm.ollama import ollama_embedding
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
load_dotenv()
ROOT_DIR = os.environ.get("ROOT_DIR")
@@ -27,8 +28,7 @@ os.environ["POSTGRES_USER"] = "rag"
os.environ["POSTGRES_PASSWORD"] = "rag"
os.environ["POSTGRES_DATABASE"] = "rag"
async def main():
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=zhipu_complete,
@@ -50,9 +50,17 @@ async def main():
auto_manage_storages_states=False,
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
async def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# add embedding_func for graph database, it's deleted in commit 5661d76860436f7bf5aef2e50d9ee4a59660146c
rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
await rag.initialize_storages()
with open(f"{ROOT_DIR}/book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())

View File

@@ -6,6 +6,7 @@ import numpy as np
from dotenv import load_dotenv
import logging
from openai import AzureOpenAI
from lightrag.kg.shared_storage import initialize_pipeline_status
logging.basicConfig(level=logging.INFO)
@@ -79,25 +80,32 @@ async def test_funcs():
asyncio.run(test_funcs())
embedding_dimension = 3072
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=8192,
func=embedding_func,
),
)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=embedding_dimension,
max_token_size=8192,
func=embedding_func,
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")
rag.insert([book1.read(), book2.read()])
return rag
# Example function demonstrating the new query_with_separate_keyword_extraction usage
async def run_example():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")
rag.insert([book1.read(), book2.read()])
query = "What are the top themes in this story?"
prompt = "Please simplify the response for a young audience."

View File

@@ -1,6 +1,7 @@
import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete
from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
# import nest_asyncio
@@ -12,31 +13,45 @@ WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
)
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
)
with open("./dickens/book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
await rag.initialize_storages()
await initialize_pipeline_status()
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
return rag
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -67,7 +68,7 @@ async def create_embedding_function_instance():
async def initialize_rag():
embedding_func_instance = await create_embedding_function_instance()
if CHROMADB_USE_LOCAL_PERSISTENT:
return LightRAG(
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -87,7 +88,7 @@ async def initialize_rag():
},
)
else:
return LightRAG(
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -112,28 +113,36 @@ async def initialize_rag():
)
# Run the initialization
rag = asyncio.run(initialize_rag())
await rag.initialize_storages()
await initialize_pipeline_status()
# with open("./dickens/book.txt", "r", encoding="utf-8") as f:
# rag.insert(f.read())
return rag
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -1,5 +1,6 @@
import os
import logging
import asyncio
import numpy as np
from dotenv import load_dotenv
@@ -8,7 +9,9 @@ from sentence_transformers import SentenceTransformer
from openai import AzureOpenAI
from lightrag import LightRAG, QueryParam
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
# Configure Logging
logging.basicConfig(level=logging.INFO)
@@ -55,11 +58,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
embeddings = model.encode(texts, convert_to_numpy=True)
return embeddings
def main():
WORKING_DIR = "./dickens"
# Initialize LightRAG with the LLM model function and embedding function
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
@@ -74,6 +73,15 @@ def main():
},
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Insert the custom chunks into LightRAG
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")

View File

@@ -1,7 +1,8 @@
import os
import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete
from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -14,33 +15,46 @@ WORKING_DIR = "./local_neo4jWorkDir"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
graph_storage="Neo4JStorage",
log_level="INFO",
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
)
async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
graph_storage="Neo4JStorage",
log_level="INFO",
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
)
with open("./book.txt") as f:
rag.insert(f.read())
await rag.initialize_storages()
await initialize_pipeline_status()
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
return rag
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
# Perform naive search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)
if __name__ == "__main__":
main()

View File

@@ -18,6 +18,7 @@
"from lightrag import LightRAG, QueryParam\n",
"from lightrag.llm.openai import openai_complete_if_cache, openai_embed\n",
"from lightrag.utils import EmbeddingFunc\n",
"from lightrag.kg.shared_storage import initialize_pipeline_status\n",
"import nest_asyncio"
]
},
@@ -25,7 +26,9 @@
"cell_type": "markdown",
"id": "dd17956ec322b361",
"metadata": {},
"source": "#### split by character"
"source": [
"#### split by character"
]
},
{
"cell_type": "code",
@@ -109,14 +112,26 @@
}
],
"source": [
"rag = LightRAG(\n",
" working_dir=WORKING_DIR,\n",
" llm_model_func=llm_model_func,\n",
" embedding_func=EmbeddingFunc(\n",
" embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
" ),\n",
" chunk_token_size=512,\n",
")"
"import asyncio\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()\n",
"\n",
"async def initialize_rag():\n",
" rag = LightRAG(\n",
" working_dir=WORKING_DIR,\n",
" llm_model_func=llm_model_func,\n",
" embedding_func=EmbeddingFunc(\n",
" embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
" ),\n",
" chunk_token_size=512,\n",
" )\n",
" await rag.initialize_storages()\n",
" await initialize_pipeline_status()\n",
"\n",
" return rag\n",
"\n",
"rag = asyncio.run(initialize_rag())"
]
},
{
@@ -908,7 +923,9 @@
"cell_type": "markdown",
"id": "4e5bfad24cb721a8",
"metadata": {},
"source": "#### split by character only"
"source": [
"#### split by character only"
]
},
{
"cell_type": "code",

View File

@@ -1,8 +1,10 @@
import os
import time
import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
from lightrag.kg.shared_storage import initialize_pipeline_status
# Working directory and the directory path for text files
WORKING_DIR = "./dickens"
@@ -12,17 +14,22 @@ TEXT_FILES_DIR = "/llm/mt"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
# Initialize LightRAG
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="qwen2.5:3b-instruct-max-context",
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
),
)
async def initialize_rag():
# Initialize LightRAG
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name="qwen2.5:3b-instruct-max-context",
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
),
)
await rag.initialize_storages()
await initialize_pipeline_status()
return rag
# Read all .txt files from the TEXT_FILES_DIR directory
texts = []
@@ -47,58 +54,65 @@ def insert_texts_with_retry(rag, texts, retries=3, delay=5):
raise RuntimeError("Failed to insert texts after multiple retries.")
insert_texts_with_retry(rag, texts)
def main():
# Initialize RAG instance
rag = asyncio.run(initialize_rag())
# Perform different types of queries and handle potential errors
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="naive")
insert_texts_with_retry(rag, texts)
# Perform different types of queries and handle potential errors
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="naive")
)
)
)
except Exception as e:
print(f"Error performing naive search: {e}")
except Exception as e:
print(f"Error performing naive search: {e}")
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="local")
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="local")
)
)
)
except Exception as e:
print(f"Error performing local search: {e}")
except Exception as e:
print(f"Error performing local search: {e}")
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="global")
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="global")
)
)
)
except Exception as e:
print(f"Error performing global search: {e}")
except Exception as e:
print(f"Error performing global search: {e}")
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
try:
print(
rag.query(
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
)
)
)
except Exception as e:
print(f"Error performing hybrid search: {e}")
except Exception as e:
print(f"Error performing hybrid search: {e}")
# Function to clear VRAM resources
def clear_vram():
os.system("sudo nvidia-smi --gpu-reset")
# Function to clear VRAM resources
def clear_vram():
os.system("sudo nvidia-smi --gpu-reset")
# Regularly clear VRAM to prevent overflow
clear_vram_interval = 3600 # Clear once every hour
start_time = time.time()
# Regularly clear VRAM to prevent overflow
clear_vram_interval = 3600 # Clear once every hour
start_time = time.time()
while True:
current_time = time.time()
if current_time - start_time > clear_vram_interval:
clear_vram()
start_time = current_time
time.sleep(60) # Check the time every minute
while True:
current_time = time.time()
if current_time - start_time > clear_vram_interval:
clear_vram()
start_time = current_time
time.sleep(60) # Check the time every minute
if __name__ == "__main__":
main()