diff --git a/.gitignore b/.gitignore
index 6deb14d5..a4afe4ea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,7 +57,7 @@ ignore_this.txt
*.ignore.*
# Project-specific files
-dickens/
+dickens*/
book.txt
lightrag-dev/
gui/
diff --git a/README.md b/README.md
index 86211b35..2b0d9f4b 100644
--- a/README.md
+++ b/README.md
@@ -102,33 +102,47 @@ Use the below Python snippet (in a script) to initialize LightRAG and perform qu
```python
import os
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed
+from lightrag.kg.shared_storage import initialize_pipeline_status
-rag = LightRAG(
- working_dir="your/path",
- embedding_func=openai_embed,
- llm_model_func=gpt_4o_mini_complete
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir="your/path",
+ embedding_func=openai_embed,
+ llm_model_func=gpt_4o_mini_complete
+ )
-# Insert text
-rag.insert("Your text")
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-mode="naive"
-# Perform local search
-mode="local"
-# Perform global search
-mode="global"
-# Perform hybrid search
-mode="hybrid"
-# Mix mode Integrates knowledge graph and vector retrieval.
-mode="mix"
+ return rag
-rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode=mode)
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ # Insert text
+ rag.insert("Your text")
+
+ # Perform naive search
+ mode="naive"
+ # Perform local search
+ mode="local"
+ # Perform global search
+ mode="global"
+ # Perform hybrid search
+ mode="hybrid"
+ # Mix mode Integrates knowledge graph and vector retrieval.
+ mode="mix"
+
+ rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode=mode)
+ )
+
+if __name__ == "__main__":
+ main()
```
### Query Param
@@ -190,15 +204,21 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
base_url="https://api.upstage.ai/v1/solar"
)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=4096,
- max_token_size=8192,
- func=embedding_func
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=4096,
+ max_token_size=8192,
+ func=embedding_func
+ )
)
-)
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
```
@@ -210,10 +230,6 @@ rag = LightRAG(
See `lightrag_hf_demo.py`
```python
-from lightrag.llm import hf_model_complete, hf_embed
-from transformers import AutoModel, AutoTokenizer
-from lightrag.utils import EmbeddingFunc
-
# Initialize LightRAG with Hugging Face model
rag = LightRAG(
working_dir=WORKING_DIR,
@@ -242,9 +258,6 @@ If you want to use Ollama models, you need to pull model you plan to use and emb
Then you only need to set LightRAG as follows:
```python
-from lightrag.llm.ollama import ollama_model_complete, ollama_embed
-from lightrag.utils import EmbeddingFunc
-
# Initialize LightRAG with Ollama model
rag = LightRAG(
working_dir=WORKING_DIR,
@@ -325,20 +338,58 @@ LightRAG supports integration with LlamaIndex.
```python
# Using LlamaIndex with direct OpenAI access
+import asyncio
from lightrag import LightRAG
from lightrag.llm.llama_index_impl import llama_index_complete_if_cache, llama_index_embed
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
+from lightrag.kg.shared_storage import initialize_pipeline_status
-rag = LightRAG(
- working_dir="your/path",
- llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
- embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
- embedding_dim=1536,
- max_token_size=8192,
- func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
- ),
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir="your/path",
+ llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
+ embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
+ embedding_dim=1536,
+ max_token_size=8192,
+ func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
```
#### For detailed documentation and examples, see:
@@ -353,11 +404,6 @@ rag = LightRAG(
LightRAG now supports multi-turn dialogue through the conversation history feature. Here's how to use it:
```python
-from lightrag import LightRAG, QueryParam
-
-# Initialize LightRAG
-rag = LightRAG(working_dir=WORKING_DIR)
-
# Create conversation history
conversation_history = [
{"role": "user", "content": "What is the main character's attitude towards Christmas?"},
@@ -387,11 +433,6 @@ response = rag.query(
LightRAG now supports custom prompts for fine-tuned control over the system's behavior. Here's how to use it:
```python
-from lightrag import LightRAG, QueryParam
-
-# Initialize LightRAG
-rag = LightRAG(working_dir=WORKING_DIR)
-
# Create query parameters
query_param = QueryParam(
mode="hybrid", # or other mode: "local", "global", "hybrid", "mix" and "naive"
@@ -456,16 +497,6 @@ rag.query_with_separate_keyword_extraction(
Insert Custom KG
```python
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
-
custom_kg = {
"entities": [
{
@@ -534,6 +565,7 @@ rag = LightRAG(
"insert_batch_size": 20 # Process 20 documents per batch
}
)
+
rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in batches of 20
```
@@ -560,27 +592,6 @@ rag.insert(["TEXT1", "TEXT2",...], ids=["ID_FOR_TEXT1", "ID_FOR_TEXT2"])
-
- Incremental Insert
-
-```python
-# Incremental Insert: Insert new documents into an existing LightRAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
-
-with open("./newText.txt") as f:
- rag.insert(f.read())
-```
-
-
-
Insert using Pipeline
@@ -592,6 +603,7 @@ And using a routine to process news documents.
```python
rag = LightRAG(..)
+
await rag.apipeline_enqueue_documents(input)
# Your routine in loop
await rag.apipeline_process_enqueue_documents(input)
@@ -633,8 +645,6 @@ export NEO4J_PASSWORD="password"
# Note: Default settings use NetworkX
# Initialize LightRAG with Neo4J implementation.
-WORKING_DIR = "./local_neo4jWorkDir"
-
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
@@ -706,26 +716,26 @@ You can also install `faiss-gpu` if you have GPU support.
- Here we are using `sentence-transformers` but you can also use `OpenAIEmbedding` model with `3072` dimensions.
-```
+```python
async def embedding_func(texts: list[str]) -> np.ndarray:
model = SentenceTransformer('all-MiniLM-L6-v2')
embeddings = model.encode(texts, convert_to_numpy=True)
return embeddings
# Initialize LightRAG with the LLM model function and embedding function
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=8192,
- func=embedding_func,
- ),
- vector_storage="FaissVectorDBStorage",
- vector_db_storage_cls_kwargs={
- "cosine_better_than_threshold": 0.3 # Your desired threshold
- }
- )
+rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ vector_storage="FaissVectorDBStorage",
+ vector_db_storage_cls_kwargs={
+ "cosine_better_than_threshold": 0.3 # Your desired threshold
+ }
+)
```
@@ -733,17 +743,6 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
## Delete
```python
-
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
-
# Delete Entity: Deleting entities by their names
rag.delete_by_entity("Project Gutenberg")
diff --git a/examples/copy_llm_cache_to_another_storage.py b/examples/copy_llm_cache_to_another_storage.py
index 5d07ad13..60fa6192 100644
--- a/examples/copy_llm_cache_to_another_storage.py
+++ b/examples/copy_llm_cache_to_another_storage.py
@@ -10,7 +10,7 @@ import os
from dotenv import load_dotenv
from lightrag.kg.postgres_impl import PostgreSQLDB, PGKVStorage
-from lightrag.storage import JsonKVStorage
+from lightrag.kg.json_kv_impl import JsonKVStorage
from lightrag.namespace import NameSpace
load_dotenv()
diff --git a/examples/lightrag_api_ollama_demo.py b/examples/lightrag_api_ollama_demo.py
index 079e9935..f1b68795 100644
--- a/examples/lightrag_api_ollama_demo.py
+++ b/examples/lightrag_api_ollama_demo.py
@@ -1,4 +1,5 @@
from fastapi import FastAPI, HTTPException, File, UploadFile
+from contextlib import asynccontextmanager
from pydantic import BaseModel
import os
from lightrag import LightRAG, QueryParam
@@ -8,12 +9,12 @@ from typing import Optional
import asyncio
import nest_asyncio
import aiofiles
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
DEFAULT_RAG_DIR = "index_default"
-app = FastAPI(title="LightRAG API", description="API for RAG operations")
DEFAULT_INPUT_FILE = "book.txt"
INPUT_FILE = os.environ.get("INPUT_FILE", f"{DEFAULT_INPUT_FILE}")
@@ -28,23 +29,41 @@ if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="gemma2:9b",
- llm_model_max_async=4,
- llm_model_max_token_size=8192,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+async def init():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="gemma2:9b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=8192,
+ llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
+ )
+
+ # Add initialization code
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ global rag
+ rag = await init()
+ print("done!")
+ yield
+
+
+app = FastAPI(
+ title="LightRAG API", description="API for RAG operations", lifespan=lifespan
)
-
-
# Data models
class QueryRequest(BaseModel):
query: str
diff --git a/examples/lightrag_api_openai_compatible_demo.py b/examples/lightrag_api_openai_compatible_demo.py
index 68ccfe95..2206f40d 100644
--- a/examples/lightrag_api_openai_compatible_demo.py
+++ b/examples/lightrag_api_openai_compatible_demo.py
@@ -1,4 +1,5 @@
from fastapi import FastAPI, HTTPException, File, UploadFile
+from contextlib import asynccontextmanager
from pydantic import BaseModel
import os
from lightrag import LightRAG, QueryParam
@@ -8,6 +9,7 @@ import numpy as np
from typing import Optional
import asyncio
import nest_asyncio
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
@@ -71,16 +73,35 @@ async def get_embedding_dim():
# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
+async def init():
+ embedding_dimension = await get_embedding_dim()
+
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
+ func=embedding_func,
+ ),
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ global rag
+ rag = await init()
+ print("done!")
+ yield
+
+
+app = FastAPI(
+ title="LightRAG API", description="API for RAG operations", lifespan=lifespan
+)
# Data models
diff --git a/examples/lightrag_api_openai_compatible_demo_simplified.py b/examples/lightrag_api_openai_compatible_demo_simplified.py
deleted file mode 100644
index fabbb3e2..00000000
--- a/examples/lightrag_api_openai_compatible_demo_simplified.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import os
-from lightrag import LightRAG, QueryParam
-from lightrag.llm.openai import openai_complete_if_cache, openai_embed
-from lightrag.utils import EmbeddingFunc
-import numpy as np
-import asyncio
-import nest_asyncio
-
-# Apply nest_asyncio to solve event loop issues
-nest_asyncio.apply()
-
-DEFAULT_RAG_DIR = "index_default"
-
-# Configure working directory
-WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
-print(f"WORKING_DIR: {WORKING_DIR}")
-LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
-print(f"LLM_MODEL: {LLM_MODEL}")
-EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small")
-print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
-EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
-print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
-BASE_URL = os.environ.get("BASE_URL", "https://api.openai.com/v1")
-print(f"BASE_URL: {BASE_URL}")
-API_KEY = os.environ.get("API_KEY", "xxxxxxxx")
-print(f"API_KEY: {API_KEY}")
-
-if not os.path.exists(WORKING_DIR):
- os.mkdir(WORKING_DIR)
-
-
-# LLM model function
-
-
-async def llm_model_func(
- prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
-) -> str:
- return await openai_complete_if_cache(
- model=LLM_MODEL,
- prompt=prompt,
- system_prompt=system_prompt,
- history_messages=history_messages,
- base_url=BASE_URL,
- api_key=API_KEY,
- **kwargs,
- )
-
-
-# Embedding function
-
-
-async def embedding_func(texts: list[str]) -> np.ndarray:
- return await openai_embed(
- texts=texts,
- model=EMBEDDING_MODEL,
- base_url=BASE_URL,
- api_key=API_KEY,
- )
-
-
-async def get_embedding_dim():
- test_text = ["This is a test sentence."]
- embedding = await embedding_func(test_text)
- embedding_dim = embedding.shape[1]
- print(f"{embedding_dim=}")
- return embedding_dim
-
-
-# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
-
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
diff --git a/examples/lightrag_api_oracle_demo.py b/examples/lightrag_api_oracle_demo.py
index 3675795e..3a82f479 100644
--- a/examples/lightrag_api_oracle_demo.py
+++ b/examples/lightrag_api_oracle_demo.py
@@ -16,6 +16,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
print(os.getcwd())
@@ -113,6 +114,9 @@ async def init():
vector_storage="OracleVectorDBStorage",
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
return rag
diff --git a/examples/lightrag_azure_openai_demo.py b/examples/lightrag_azure_openai_demo.py
index 1c941f93..e0840366 100644
--- a/examples/lightrag_azure_openai_demo.py
+++ b/examples/lightrag_azure_openai_demo.py
@@ -6,6 +6,7 @@ import numpy as np
from dotenv import load_dotenv
import logging
from openai import AzureOpenAI
+from lightrag.kg.shared_storage import initialize_pipeline_status
logging.basicConfig(level=logging.INFO)
@@ -90,6 +91,9 @@ rag = LightRAG(
),
)
+rag.initialize_storages()
+initialize_pipeline_status()
+
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")
diff --git a/examples/lightrag_bedrock_demo.py b/examples/lightrag_bedrock_demo.py
index 6bb6c7d4..700b4391 100644
--- a/examples/lightrag_bedrock_demo.py
+++ b/examples/lightrag_bedrock_demo.py
@@ -8,6 +8,12 @@ import logging
from lightrag import LightRAG, QueryParam
from lightrag.llm.bedrock import bedrock_complete, bedrock_embed
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
logging.getLogger("aiobotocore").setLevel(logging.WARNING)
@@ -15,22 +21,31 @@ WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=bedrock_complete,
- llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
- embedding_func=EmbeddingFunc(
- embedding_dim=1024, max_token_size=8192, func=bedrock_embed
- ),
-)
-
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-for mode in ["naive", "local", "global", "hybrid"]:
- print("\n+-" + "-" * len(mode) + "-+")
- print(f"| {mode.capitalize()} |")
- print("+-" + "-" * len(mode) + "-+\n")
- print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=bedrock_complete,
+ llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
+ embedding_func=EmbeddingFunc(
+ embedding_dim=1024, max_token_size=8192, func=bedrock_embed
+ ),
)
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+def main():
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ for mode in ["naive", "local", "global", "hybrid"]:
+ print("\n+-" + "-" * len(mode) + "-+")
+ print(f"| {mode.capitalize()} |")
+ print("+-" + "-" * len(mode) + "-+\n")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
+ )
diff --git a/examples/lightrag_gemini_demo.py b/examples/lightrag_gemini_demo.py
index 32732ba8..d20c0d0d 100644
--- a/examples/lightrag_gemini_demo.py
+++ b/examples/lightrag_gemini_demo.py
@@ -8,6 +8,12 @@ from dotenv import load_dotenv
from lightrag.utils import EmbeddingFunc
from lightrag import LightRAG, QueryParam
from sentence_transformers import SentenceTransformer
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+# Apply nest_asyncio to solve event loop issues
+nest_asyncio.apply()
load_dotenv()
gemini_api_key = os.getenv("GEMINI_API_KEY")
@@ -60,25 +66,37 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
return embeddings
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
-file_path = "story.txt"
-with open(file_path, "r") as file:
- text = file.read()
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-rag.insert(text)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ file_path = "story.txt"
+ with open(file_path, "r") as file:
+ text = file.read()
-response = rag.query(
- query="What is the main theme of the story?",
- param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
-)
+ rag.insert(text)
-print(response)
+ response = rag.query(
+ query="What is the main theme of the story?",
+ param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
+ )
+
+ print(response)
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_hf_demo.py b/examples/lightrag_hf_demo.py
index a5088e54..5cd214fd 100644
--- a/examples/lightrag_hf_demo.py
+++ b/examples/lightrag_hf_demo.py
@@ -4,51 +4,68 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.hf import hf_model_complete, hf_embed
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel, AutoTokenizer
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=hf_model_complete,
- llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=5000,
- func=lambda texts: hf_embed(
- texts,
- tokenizer=AutoTokenizer.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
- ),
- embed_model=AutoModel.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=hf_model_complete,
+ llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=5000,
+ func=lambda texts: hf_embed(
+ texts,
+ tokenizer=AutoTokenizer.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
+ embed_model=AutoModel.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
),
),
- ),
-)
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ return rag
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+def main():
+ rag = asyncio.run(initialize_rag())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_jinaai_demo.py b/examples/lightrag_jinaai_demo.py
deleted file mode 100644
index 0378b61b..00000000
--- a/examples/lightrag_jinaai_demo.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import numpy as np
-from lightrag import LightRAG, QueryParam
-from lightrag.utils import EmbeddingFunc
-from lightrag.llm.jina import jina_embed
-from lightrag.llm.openai import openai_complete_if_cache
-import os
-import asyncio
-
-
-async def embedding_func(texts: list[str]) -> np.ndarray:
- return await jina_embed(texts, api_key="YourJinaAPIKey")
-
-
-WORKING_DIR = "./dickens"
-
-if not os.path.exists(WORKING_DIR):
- os.mkdir(WORKING_DIR)
-
-
-async def llm_model_func(
- prompt, system_prompt=None, history_messages=[], **kwargs
-) -> str:
- return await openai_complete_if_cache(
- "solar-mini",
- prompt,
- system_prompt=system_prompt,
- history_messages=history_messages,
- api_key=os.getenv("UPSTAGE_API_KEY"),
- base_url="https://api.upstage.ai/v1/solar",
- **kwargs,
- )
-
-
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=1024, max_token_size=8192, func=embedding_func
- ),
-)
-
-
-async def lightraginsert(file_path, semaphore):
- async with semaphore:
- try:
- with open(file_path, "r", encoding="utf-8") as f:
- content = f.read()
- except UnicodeDecodeError:
- # If UTF-8 decoding fails, try other encodings
- with open(file_path, "r", encoding="gbk") as f:
- content = f.read()
- await rag.ainsert(content)
-
-
-async def process_files(directory, concurrency_limit):
- semaphore = asyncio.Semaphore(concurrency_limit)
- tasks = []
- for root, dirs, files in os.walk(directory):
- for f in files:
- file_path = os.path.join(root, f)
- if f.startswith("."):
- continue
- tasks.append(lightraginsert(file_path, semaphore))
- await asyncio.gather(*tasks)
-
-
-async def main():
- try:
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=1024,
- max_token_size=8192,
- func=embedding_func,
- ),
- )
-
- asyncio.run(process_files(WORKING_DIR, concurrency_limit=4))
-
- # Perform naive search
- print(
- await rag.aquery(
- "What are the top themes in this story?", param=QueryParam(mode="naive")
- )
- )
-
- # Perform local search
- print(
- await rag.aquery(
- "What are the top themes in this story?", param=QueryParam(mode="local")
- )
- )
-
- # Perform global search
- print(
- await rag.aquery(
- "What are the top themes in this story?",
- param=QueryParam(mode="global"),
- )
- )
-
- # Perform hybrid search
- print(
- await rag.aquery(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid"),
- )
- )
- except Exception as e:
- print(f"An error occurred: {e}")
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/examples/lightrag_llamaindex_direct_demo.py b/examples/lightrag_llamaindex_direct_demo.py
index 5db158ce..d1e68233 100644
--- a/examples/lightrag_llamaindex_direct_demo.py
+++ b/examples/lightrag_llamaindex_direct_demo.py
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Configure working directory
WORKING_DIR = "./index_default"
@@ -76,38 +81,53 @@ async def get_embedding_dim():
return embedding_dim
-# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
+ func=embedding_func,
+ ),
+ )
-# Insert example text
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-# Test different query modes
-print("\nNaive Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-print("\nLocal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-print("\nGlobal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-print("\nHybrid Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_llamaindex_litellm_demo.py b/examples/lightrag_llamaindex_litellm_demo.py
index 3511ecf3..d7c609b4 100644
--- a/examples/lightrag_llamaindex_litellm_demo.py
+++ b/examples/lightrag_llamaindex_litellm_demo.py
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
from llama_index.llms.litellm import LiteLLM
from llama_index.embeddings.litellm import LiteLLMEmbedding
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Configure working directory
WORKING_DIR = "./index_default"
@@ -79,38 +84,53 @@ async def get_embedding_dim():
return embedding_dim
-# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
+ func=embedding_func,
+ ),
+ )
-# Insert example text
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-# Test different query modes
-print("\nNaive Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-print("\nLocal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-print("\nGlobal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-print("\nHybrid Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_lmdeploy_demo.py b/examples/lightrag_lmdeploy_demo.py
index d12eb564..e640a613 100644
--- a/examples/lightrag_lmdeploy_demo.py
+++ b/examples/lightrag_lmdeploy_demo.py
@@ -5,6 +5,12 @@ from lightrag.llm.lmdeploy import lmdeploy_model_if_cache
from lightrag.llm.hf import hf_embed
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel, AutoTokenizer
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
WORKING_DIR = "./dickens"
@@ -35,46 +41,59 @@ async def lmdeploy_model_complete(
**kwargs,
)
-
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=lmdeploy_model_complete,
- llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=5000,
- func=lambda texts: hf_embed(
- texts,
- tokenizer=AutoTokenizer.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
- ),
- embed_model=AutoModel.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=lmdeploy_model_complete,
+ llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=5000,
+ func=lambda texts: hf_embed(
+ texts,
+ tokenizer=AutoTokenizer.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
+ embed_model=AutoModel.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
),
),
- ),
-)
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ print("\nLocal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ print("\nGlobal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/examples/lightrag_nvidia_demo.py b/examples/lightrag_nvidia_demo.py
index da4b46ff..9137c1b6 100644
--- a/examples/lightrag_nvidia_demo.py
+++ b/examples/lightrag_nvidia_demo.py
@@ -1,5 +1,9 @@
import os
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
from lightrag import LightRAG, QueryParam
from lightrag.llm import (
openai_complete_if_cache,
@@ -7,10 +11,12 @@ from lightrag.llm import (
)
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
# for custom llm_model_func
from lightrag.utils import locate_json_string_body_from_string
+
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
@@ -91,42 +97,37 @@ async def test_funcs():
# asyncio.run(test_funcs())
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+ # lightRAG class during indexing
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ # llm_model_name="meta/llama3-70b-instruct", #un comment if
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
+ # so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
+ # so you can adjust to be able to fit the NVIDIA model (future work)
+ func=indexing_embedding_func,
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def main():
try:
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- # lightRAG class during indexing
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- # llm_model_name="meta/llama3-70b-instruct", #un comment if
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
- # so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
- # so you can adjust to be able to fit the NVIDIA model (future work)
- func=indexing_embedding_func,
- ),
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
# reading file
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
- # redefine rag to change embedding into query type
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- # llm_model_name="meta/llama3-70b-instruct", #un comment if
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=512,
- func=query_embedding_func,
- ),
- )
-
# Perform naive search
print("==============Naive===============")
print(
diff --git a/examples/lightrag_ollama_age_demo.py b/examples/lightrag_ollama_age_demo.py
index d394ded4..22e42190 100644
--- a/examples/lightrag_ollama_age_demo.py
+++ b/examples/lightrag_ollama_age_demo.py
@@ -1,4 +1,8 @@
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
import inspect
import logging
import os
@@ -6,6 +10,7 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens_age"
@@ -22,59 +27,72 @@ os.environ["AGE_POSTGRES_HOST"] = "localhost"
os.environ["AGE_POSTGRES_PORT"] = "5455"
os.environ["AGE_GRAPH_NAME"] = "dickens"
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="llama3.1:8b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="llama3.1:8b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
- graph_storage="AGEStorage",
-)
-
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
-
-# stream response
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ graph_storage="AGEStorage",
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def print_stream(stream):
async for chunk in stream:
print(chunk, end="", flush=True)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-if inspect.isasyncgen(resp):
- asyncio.run(print_stream(resp))
-else:
- print(resp)
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+ # stream response
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ if inspect.isasyncgen(resp):
+ asyncio.run(print_stream(resp))
+ else:
+ print(resp)
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_ollama_demo.py b/examples/lightrag_ollama_demo.py
index 95856fa2..6715ea72 100644
--- a/examples/lightrag_ollama_demo.py
+++ b/examples/lightrag_ollama_demo.py
@@ -1,10 +1,14 @@
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
import os
import inspect
import logging
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -13,58 +17,71 @@ logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="gemma2:2b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="gemma2:2b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
-)
-
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
-
-# stream response
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def print_stream(stream):
async for chunk in stream:
print(chunk, end="", flush=True)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-if inspect.isasyncgen(resp):
- asyncio.run(print_stream(resp))
-else:
- print(resp)
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+ # stream response
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ if inspect.isasyncgen(resp):
+ asyncio.run(print_stream(resp))
+ else:
+ print(resp)
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_ollama_gremlin_demo.py b/examples/lightrag_ollama_gremlin_demo.py
index fa7d4fb5..4d657afa 100644
--- a/examples/lightrag_ollama_gremlin_demo.py
+++ b/examples/lightrag_ollama_gremlin_demo.py
@@ -12,6 +12,7 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens_gremlin"
@@ -31,59 +32,72 @@ os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
os.environ["GREMLIN_USER"] = ""
os.environ["GREMLIN_PASSWORD"] = ""
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="llama3.1:8b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="llama3.1:8b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
- graph_storage="GremlinStorage",
-)
-
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
-
-# stream response
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ graph_storage="GremlinStorage",
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def print_stream(stream):
async for chunk in stream:
print(chunk, end="", flush=True)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-if inspect.isasyncgen(resp):
- asyncio.run(print_stream(resp))
-else:
- print(resp)
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+ # stream response
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ if inspect.isasyncgen(resp):
+ asyncio.run(print_stream(resp))
+ else:
+ print(resp)
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py b/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py
index 7e2d1026..e5d4064d 100644
--- a/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py
+++ b/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py
@@ -2,6 +2,11 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -27,30 +32,59 @@ os.environ["MILVUS_USER"] = "root"
os.environ["MILVUS_PASSWORD"] = "root"
os.environ["MILVUS_DB_NAME"] = "lightrag"
-
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="qwen2.5:14b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=1024,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="qwen2.5:14b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
+ embedding_func=EmbeddingFunc(
+ embedding_dim=1024,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
+ ),
),
- ),
- kv_storage="MongoKVStorage",
- graph_storage="Neo4JStorage",
- vector_storage="MilvusVectorDBStorage",
-)
+ kv_storage="MongoKVStorage",
+ graph_storage="Neo4JStorage",
+ vector_storage="MilvusVectorDBStorage",
+ )
-file = "./book.txt"
-with open(file, "r") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_openai_compatible_demo.py b/examples/lightrag_openai_compatible_demo.py
index 09673dd8..f4af7be6 100644
--- a/examples/lightrag_openai_compatible_demo.py
+++ b/examples/lightrag_openai_compatible_demo.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -52,21 +53,28 @@ async def test_funcs():
# asyncio.run(test_funcs())
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def main():
try:
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
diff --git a/examples/lightrag_openai_compatible_demo_embedding_cache.py b/examples/lightrag_openai_compatible_demo_embedding_cache.py
index d696ce25..fcdd1ef3 100644
--- a/examples/lightrag_openai_compatible_demo_embedding_cache.py
+++ b/examples/lightrag_openai_compatible_demo_embedding_cache.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -52,25 +53,33 @@ async def test_funcs():
# asyncio.run(test_funcs())
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ embedding_cache_config={
+ "enabled": True,
+ "similarity_threshold": 0.90,
+ },
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def main():
try:
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- rag = LightRAG(
- working_dir=WORKING_DIR,
- embedding_cache_config={
- "enabled": True,
- "similarity_threshold": 0.90,
- },
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
diff --git a/examples/lightrag_openai_compatible_stream_demo.py b/examples/lightrag_openai_compatible_stream_demo.py
index a974ca14..b3f237e5 100644
--- a/examples/lightrag_openai_compatible_stream_demo.py
+++ b/examples/lightrag_openai_compatible_stream_demo.py
@@ -1,9 +1,11 @@
import inspect
import os
+import asyncio
from lightrag import LightRAG
from lightrag.llm import openai_complete, openai_embed
from lightrag.utils import EmbeddingFunc, always_get_an_event_loop
from lightrag import QueryParam
+from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -13,42 +15,54 @@ if not os.path.exists(WORKING_DIR):
print(f"WorkingDir: {WORKING_DIR}")
api_key = "empty"
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=openai_complete,
- llm_model_name="qwen2.5-14b-instruct@4bit",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
- embedding_func=EmbeddingFunc(
- embedding_dim=1024,
- max_token_size=8192,
- func=lambda texts: openai_embed(
- texts=texts,
- model="text-embedding-bge-m3",
- base_url="http://127.0.0.1:1234/v1",
- api_key=api_key,
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=openai_complete,
+ llm_model_name="qwen2.5-14b-instruct@4bit",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
+ embedding_func=EmbeddingFunc(
+ embedding_dim=1024,
+ max_token_size=8192,
+ func=lambda texts: openai_embed(
+ texts=texts,
+ model="text-embedding-bge-m3",
+ base_url="http://127.0.0.1:1234/v1",
+ api_key=api_key,
+ ),
),
- ),
-)
+ )
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+ return rag
async def print_stream(stream):
async for chunk in stream:
if chunk:
print(chunk, end="", flush=True)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ loop = always_get_an_event_loop()
+ if inspect.isasyncgen(resp):
+ loop.run_until_complete(print_stream(resp))
+ else:
+ print(resp)
+
+if __name__ == "__main__":
+ main()
-loop = always_get_an_event_loop()
-if inspect.isasyncgen(resp):
- loop.run_until_complete(print_stream(resp))
-else:
- print(resp)
diff --git a/examples/lightrag_openai_demo.py b/examples/lightrag_openai_demo.py
index c5393fc8..f5f47ee2 100644
--- a/examples/lightrag_openai_demo.py
+++ b/examples/lightrag_openai_demo.py
@@ -1,40 +1,54 @@
import os
-
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- embedding_func=openai_embed,
- llm_model_func=gpt_4o_mini_complete,
- # llm_model_func=gpt_4o_complete
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ embedding_func=openai_embed,
+ llm_model_func=gpt_4o_mini_complete,
+ # llm_model_func=gpt_4o_complete
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ return rag
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
diff --git a/examples/lightrag_openai_mongodb_graph_demo.py b/examples/lightrag_openai_mongodb_graph_demo.py
index 775eb296..ddf5ca63 100644
--- a/examples/lightrag_openai_mongodb_graph_demo.py
+++ b/examples/lightrag_openai_mongodb_graph_demo.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -52,7 +53,7 @@ async def create_embedding_function_instance():
async def initialize_rag():
embedding_func_instance = await create_embedding_function_instance()
- return LightRAG(
+ rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -60,14 +61,38 @@ async def initialize_rag():
log_level="DEBUG",
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Run the initialization
-rag = asyncio.run(initialize_rag())
+ return rag
-with open("book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/examples/lightrag_openai_neo4j_milvus_redis_demo.py b/examples/lightrag_openai_neo4j_milvus_redis_demo.py
index da5c5a8f..adf87691 100644
--- a/examples/lightrag_openai_neo4j_milvus_redis_demo.py
+++ b/examples/lightrag_openai_neo4j_milvus_redis_demo.py
@@ -1,7 +1,9 @@
import os
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, openai_complete_if_cache
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -48,23 +50,52 @@ embedding_func = EmbeddingFunc(
texts, embed_model="shaw/dmeta-embedding-zh", host="http://117.50.173.35:11434"
),
)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ llm_model_max_token_size=32768,
+ embedding_func=embedding_func,
+ chunk_token_size=512,
+ chunk_overlap_token_size=256,
+ kv_storage="RedisKVStorage",
+ graph_storage="Neo4JStorage",
+ vector_storage="MilvusVectorDBStorage",
+ doc_status_storage="RedisKVStorage",
+ )
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- llm_model_max_token_size=32768,
- embedding_func=embedding_func,
- chunk_token_size=512,
- chunk_overlap_token_size=256,
- kv_storage="RedisKVStorage",
- graph_storage="Neo4JStorage",
- vector_storage="MilvusVectorDBStorage",
- doc_status_storage="RedisKVStorage",
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-file = "../book.txt"
-with open(file, "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ return rag
-print(rag.query("谁会3D建模 ?", param=QueryParam(mode="mix")))
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_oracle_demo.py b/examples/lightrag_oracle_demo.py
index c6121840..53139220 100644
--- a/examples/lightrag_oracle_demo.py
+++ b/examples/lightrag_oracle_demo.py
@@ -6,6 +6,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
print(os.getcwd())
script_directory = Path(__file__).resolve().parent.parent
@@ -63,41 +64,48 @@ async def get_embedding_dim():
embedding_dim = embedding.shape[1]
return embedding_dim
+async def initialize_rag():
+ # Detect embedding dimension
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ # Initialize LightRAG
+ # We use Oracle DB as the KV/vector/graph storage
+ # You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
+ rag = LightRAG(
+ # log_level="DEBUG",
+ working_dir=WORKING_DIR,
+ entity_extract_max_gleaning=1,
+ enable_llm_cache=True,
+ enable_llm_cache_for_entity_extract=True,
+ embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
+ chunk_token_size=CHUNK_TOKEN_SIZE,
+ llm_model_max_token_size=MAX_TOKENS,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=500,
+ func=embedding_func,
+ ),
+ graph_storage="OracleGraphStorage",
+ kv_storage="OracleKVStorage",
+ vector_storage="OracleVectorDBStorage",
+ addon_params={
+ "example_number": 1,
+ "language": "Simplfied Chinese",
+ "entity_types": ["organization", "person", "geo", "event"],
+ "insert_batch_size": 2,
+ },
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def main():
try:
- # Detect embedding dimension
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- # Initialize LightRAG
- # We use Oracle DB as the KV/vector/graph storage
- # You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
- rag = LightRAG(
- # log_level="DEBUG",
- working_dir=WORKING_DIR,
- entity_extract_max_gleaning=1,
- enable_llm_cache=True,
- enable_llm_cache_for_entity_extract=True,
- embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
- chunk_token_size=CHUNK_TOKEN_SIZE,
- llm_model_max_token_size=MAX_TOKENS,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=500,
- func=embedding_func,
- ),
- graph_storage="OracleGraphStorage",
- kv_storage="OracleKVStorage",
- vector_storage="OracleVectorDBStorage",
- addon_params={
- "example_number": 1,
- "language": "Simplfied Chinese",
- "entity_types": ["organization", "person", "geo", "event"],
- "insert_batch_size": 2,
- },
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
# Extract and Insert into LightRAG storage
with open(WORKING_DIR + "/docs.txt", "r", encoding="utf-8") as f:
diff --git a/examples/lightrag_siliconcloud_demo.py b/examples/lightrag_siliconcloud_demo.py
index 5f4f86a1..1deb5a66 100644
--- a/examples/lightrag_siliconcloud_demo.py
+++ b/examples/lightrag_siliconcloud_demo.py
@@ -5,6 +5,7 @@ from lightrag.llm.openai import openai_complete_if_cache
from lightrag.llm.siliconcloud import siliconcloud_embedding
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -46,35 +47,48 @@ async def test_funcs():
asyncio.run(test_funcs())
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768, max_token_size=512, func=embedding_func
+ ),
+ )
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=768, max_token_size=512, func=embedding_func
- ),
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-with open("./book.txt") as f:
- rag.insert(f.read())
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
diff --git a/examples/lightrag_tidb_demo.py b/examples/lightrag_tidb_demo.py
index f2ee9ad8..c3f8fd19 100644
--- a/examples/lightrag_tidb_demo.py
+++ b/examples/lightrag_tidb_demo.py
@@ -6,6 +6,7 @@ import numpy as np
from lightrag import LightRAG, QueryParam
from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -54,33 +55,40 @@ async def get_embedding_dim():
embedding_dim = embedding.shape[1]
return embedding_dim
+async def initialize_rag():
+ # Detect embedding dimension
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ # Initialize LightRAG
+ # We use TiDB DB as the KV/vector
+ rag = LightRAG(
+ enable_llm_cache=False,
+ working_dir=WORKING_DIR,
+ chunk_token_size=512,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=512,
+ func=embedding_func,
+ ),
+ kv_storage="TiDBKVStorage",
+ vector_storage="TiDBVectorDBStorage",
+ graph_storage="TiDBGraphStorage",
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
async def main():
try:
- # Detect embedding dimension
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
- # Initialize LightRAG
- # We use TiDB DB as the KV/vector
- rag = LightRAG(
- enable_llm_cache=False,
- working_dir=WORKING_DIR,
- chunk_token_size=512,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=512,
- func=embedding_func,
- ),
- kv_storage="TiDBKVStorage",
- vector_storage="TiDBVectorDBStorage",
- graph_storage="TiDBGraphStorage",
- )
-
- # Extract and Insert into LightRAG storage
- with open("./dickens/demo.txt", "r", encoding="utf-8") as f:
- await rag.ainsert(f.read())
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
# Perform search in different modes
modes = ["naive", "local", "global", "hybrid"]
diff --git a/examples/lightrag_zhipu_demo.py b/examples/lightrag_zhipu_demo.py
index 97a5042e..30b7316f 100644
--- a/examples/lightrag_zhipu_demo.py
+++ b/examples/lightrag_zhipu_demo.py
@@ -1,10 +1,12 @@
import os
import logging
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -17,39 +19,51 @@ api_key = os.environ.get("ZHIPUAI_API_KEY")
if api_key is None:
raise Exception("Please set ZHIPU_API_KEY in your environment")
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=zhipu_complete,
+ llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=2048, # Zhipu embedding-3 dimension
+ max_token_size=8192,
+ func=lambda texts: zhipu_embedding(texts),
+ ),
+ )
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=zhipu_complete,
- llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- embedding_func=EmbeddingFunc(
- embedding_dim=2048, # Zhipu embedding-3 dimension
- max_token_size=8192,
- func=lambda texts: zhipu_embedding(texts),
- ),
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ return rag
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_zhipu_postgres_demo.py b/examples/lightrag_zhipu_postgres_demo.py
index 8f40690e..310786a5 100644
--- a/examples/lightrag_zhipu_postgres_demo.py
+++ b/examples/lightrag_zhipu_postgres_demo.py
@@ -8,6 +8,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.zhipu import zhipu_complete
from lightrag.llm.ollama import ollama_embedding
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
load_dotenv()
ROOT_DIR = os.environ.get("ROOT_DIR")
@@ -27,8 +28,7 @@ os.environ["POSTGRES_USER"] = "rag"
os.environ["POSTGRES_PASSWORD"] = "rag"
os.environ["POSTGRES_DATABASE"] = "rag"
-
-async def main():
+async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=zhipu_complete,
@@ -50,9 +50,17 @@ async def main():
auto_manage_storages_states=False,
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+async def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
# add embedding_func for graph database, it's deleted in commit 5661d76860436f7bf5aef2e50d9ee4a59660146c
rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
- await rag.initialize_storages()
with open(f"{ROOT_DIR}/book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
diff --git a/examples/query_keyword_separation_example.py b/examples/query_keyword_separation_example.py
index f11ce8c1..de106de6 100644
--- a/examples/query_keyword_separation_example.py
+++ b/examples/query_keyword_separation_example.py
@@ -6,6 +6,7 @@ import numpy as np
from dotenv import load_dotenv
import logging
from openai import AzureOpenAI
+from lightrag.kg.shared_storage import initialize_pipeline_status
logging.basicConfig(level=logging.INFO)
@@ -79,25 +80,32 @@ async def test_funcs():
asyncio.run(test_funcs())
embedding_dimension = 3072
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-book1 = open("./book_1.txt", encoding="utf-8")
-book2 = open("./book_2.txt", encoding="utf-8")
-
-rag.insert([book1.read(), book2.read()])
+ return rag
# Example function demonstrating the new query_with_separate_keyword_extraction usage
async def run_example():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ book1 = open("./book_1.txt", encoding="utf-8")
+ book2 = open("./book_2.txt", encoding="utf-8")
+
+ rag.insert([book1.read(), book2.read()])
query = "What are the top themes in this story?"
prompt = "Please simplify the response for a young audience."
diff --git a/examples/test.py b/examples/test.py
index 67ee22eb..dc186bda 100644
--- a/examples/test.py
+++ b/examples/test.py
@@ -1,6 +1,7 @@
import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
# import nest_asyncio
@@ -12,31 +13,45 @@ WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
- # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
+ # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
+ )
-with open("./dickens/book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ return rag
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/examples/test_chromadb.py b/examples/test_chromadb.py
index 99090a6d..10d69cc1 100644
--- a/examples/test_chromadb.py
+++ b/examples/test_chromadb.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -67,7 +68,7 @@ async def create_embedding_function_instance():
async def initialize_rag():
embedding_func_instance = await create_embedding_function_instance()
if CHROMADB_USE_LOCAL_PERSISTENT:
- return LightRAG(
+ rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -87,7 +88,7 @@ async def initialize_rag():
},
)
else:
- return LightRAG(
+ rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -112,28 +113,36 @@ async def initialize_rag():
)
-# Run the initialization
-rag = asyncio.run(initialize_rag())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# with open("./dickens/book.txt", "r", encoding="utf-8") as f:
-# rag.insert(f.read())
+ return rag
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/test_faiss.py b/examples/test_faiss.py
index c3ac6f47..34991f47 100644
--- a/examples/test_faiss.py
+++ b/examples/test_faiss.py
@@ -1,5 +1,6 @@
import os
import logging
+import asyncio
import numpy as np
from dotenv import load_dotenv
@@ -8,7 +9,9 @@ from sentence_transformers import SentenceTransformer
from openai import AzureOpenAI
from lightrag import LightRAG, QueryParam
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
+WORKING_DIR = "./dickens"
# Configure Logging
logging.basicConfig(level=logging.INFO)
@@ -55,11 +58,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
embeddings = model.encode(texts, convert_to_numpy=True)
return embeddings
-
-def main():
- WORKING_DIR = "./dickens"
-
- # Initialize LightRAG with the LLM model function and embedding function
+async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
@@ -74,6 +73,15 @@ def main():
},
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+def main():
+
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
# Insert the custom chunks into LightRAG
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")
diff --git a/examples/test_neo4j.py b/examples/test_neo4j.py
index ac5f7fb7..2d1d527a 100644
--- a/examples/test_neo4j.py
+++ b/examples/test_neo4j.py
@@ -1,7 +1,8 @@
import os
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete
-
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -14,33 +15,46 @@ WORKING_DIR = "./local_neo4jWorkDir"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
- graph_storage="Neo4JStorage",
- log_level="INFO",
- # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
+ graph_storage="Neo4JStorage",
+ log_level="INFO",
+ # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
+ )
-with open("./book.txt") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ return rag
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/test_split_by_character.ipynb b/examples/test_split_by_character.ipynb
index df5d938d..f70f9f34 100644
--- a/examples/test_split_by_character.ipynb
+++ b/examples/test_split_by_character.ipynb
@@ -18,6 +18,7 @@
"from lightrag import LightRAG, QueryParam\n",
"from lightrag.llm.openai import openai_complete_if_cache, openai_embed\n",
"from lightrag.utils import EmbeddingFunc\n",
+ "from lightrag.kg.shared_storage import initialize_pipeline_status\n",
"import nest_asyncio"
]
},
@@ -25,7 +26,9 @@
"cell_type": "markdown",
"id": "dd17956ec322b361",
"metadata": {},
- "source": "#### split by character"
+ "source": [
+ "#### split by character"
+ ]
},
{
"cell_type": "code",
@@ -109,14 +112,26 @@
}
],
"source": [
- "rag = LightRAG(\n",
- " working_dir=WORKING_DIR,\n",
- " llm_model_func=llm_model_func,\n",
- " embedding_func=EmbeddingFunc(\n",
- " embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
- " ),\n",
- " chunk_token_size=512,\n",
- ")"
+ "import asyncio\n",
+ "import nest_asyncio\n",
+ "\n",
+ "nest_asyncio.apply()\n",
+ "\n",
+ "async def initialize_rag():\n",
+ " rag = LightRAG(\n",
+ " working_dir=WORKING_DIR,\n",
+ " llm_model_func=llm_model_func,\n",
+ " embedding_func=EmbeddingFunc(\n",
+ " embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
+ " ),\n",
+ " chunk_token_size=512,\n",
+ " )\n",
+ " await rag.initialize_storages()\n",
+ " await initialize_pipeline_status()\n",
+ "\n",
+ " return rag\n",
+ "\n",
+ "rag = asyncio.run(initialize_rag())"
]
},
{
@@ -908,7 +923,9 @@
"cell_type": "markdown",
"id": "4e5bfad24cb721a8",
"metadata": {},
- "source": "#### split by character only"
+ "source": [
+ "#### split by character only"
+ ]
},
{
"cell_type": "code",
diff --git a/examples/vram_management_demo.py b/examples/vram_management_demo.py
index b8d0872e..f4d46ab4 100644
--- a/examples/vram_management_demo.py
+++ b/examples/vram_management_demo.py
@@ -1,8 +1,10 @@
import os
import time
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Working directory and the directory path for text files
WORKING_DIR = "./dickens"
@@ -12,17 +14,22 @@ TEXT_FILES_DIR = "/llm/mt"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-# Initialize LightRAG
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="qwen2.5:3b-instruct-max-context",
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
- ),
-)
+async def initialize_rag():
+ # Initialize LightRAG
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="qwen2.5:3b-instruct-max-context",
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
+ ),
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
# Read all .txt files from the TEXT_FILES_DIR directory
texts = []
@@ -47,58 +54,65 @@ def insert_texts_with_retry(rag, texts, retries=3, delay=5):
raise RuntimeError("Failed to insert texts after multiple retries.")
-insert_texts_with_retry(rag, texts)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform different types of queries and handle potential errors
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="naive")
+ insert_texts_with_retry(rag, texts)
+
+ # Perform different types of queries and handle potential errors
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
)
- )
-except Exception as e:
- print(f"Error performing naive search: {e}")
+ except Exception as e:
+ print(f"Error performing naive search: {e}")
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="local")
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
)
- )
-except Exception as e:
- print(f"Error performing local search: {e}")
+ except Exception as e:
+ print(f"Error performing local search: {e}")
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="global")
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
)
- )
-except Exception as e:
- print(f"Error performing global search: {e}")
+ except Exception as e:
+ print(f"Error performing global search: {e}")
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
)
- )
-except Exception as e:
- print(f"Error performing hybrid search: {e}")
+ except Exception as e:
+ print(f"Error performing hybrid search: {e}")
-# Function to clear VRAM resources
-def clear_vram():
- os.system("sudo nvidia-smi --gpu-reset")
+ # Function to clear VRAM resources
+ def clear_vram():
+ os.system("sudo nvidia-smi --gpu-reset")
-# Regularly clear VRAM to prevent overflow
-clear_vram_interval = 3600 # Clear once every hour
-start_time = time.time()
+ # Regularly clear VRAM to prevent overflow
+ clear_vram_interval = 3600 # Clear once every hour
+ start_time = time.time()
-while True:
- current_time = time.time()
- if current_time - start_time > clear_vram_interval:
- clear_vram()
- start_time = current_time
- time.sleep(60) # Check the time every minute
+ while True:
+ current_time = time.time()
+ if current_time - start_time > clear_vram_interval:
+ clear_vram()
+ start_time = current_time
+ time.sleep(60) # Check the time every minute
+
+if __name__ == "__main__":
+ main()
diff --git a/reproduce/Step_1.py b/reproduce/Step_1.py
index e318c145..6df00b8a 100644
--- a/reproduce/Step_1.py
+++ b/reproduce/Step_1.py
@@ -1,8 +1,10 @@
import os
import json
import time
+import asyncio
from lightrag import LightRAG
+from lightrag.kg.shared_storage import initialize_pipeline_status
def insert_text(rag, file_path):
@@ -29,6 +31,19 @@ WORKING_DIR = f"../{cls}"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(working_dir=WORKING_DIR)
+async def initialize_rag():
+ rag = LightRAG(working_dir=WORKING_DIR)
-insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/reproduce/Step_1_openai_compatible.py b/reproduce/Step_1_openai_compatible.py
index 09ca78c5..7e4139b8 100644
--- a/reproduce/Step_1_openai_compatible.py
+++ b/reproduce/Step_1_openai_compatible.py
@@ -1,11 +1,13 @@
import os
import json
import time
+import asyncio
import numpy as np
from lightrag import LightRAG
from lightrag.utils import EmbeddingFunc
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
+from lightrag.kg.shared_storage import initialize_pipeline_status
## For Upstage API
@@ -60,12 +62,25 @@ WORKING_DIR = f"../{cls}"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=4096, max_token_size=8192, func=embedding_func
- ),
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=4096, max_token_size=8192, func=embedding_func
+ ),
+ )
-insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/run_with_gunicorn.py b/run_with_gunicorn.py
deleted file mode 100755
index 2e4e3cf7..00000000
--- a/run_with_gunicorn.py
+++ /dev/null
@@ -1,203 +0,0 @@
-#!/usr/bin/env python
-"""
-Start LightRAG server with Gunicorn
-"""
-
-import os
-import sys
-import signal
-import pipmaster as pm
-from lightrag.api.utils_api import parse_args, display_splash_screen
-from lightrag.kg.shared_storage import initialize_share_data, finalize_share_data
-
-
-def check_and_install_dependencies():
- """Check and install required dependencies"""
- required_packages = [
- "gunicorn",
- "tiktoken",
- "psutil",
- # Add other required packages here
- ]
-
- for package in required_packages:
- if not pm.is_installed(package):
- print(f"Installing {package}...")
- pm.install(package)
- print(f"{package} installed successfully")
-
-
-# Signal handler for graceful shutdown
-def signal_handler(sig, frame):
- print("\n\n" + "=" * 80)
- print("RECEIVED TERMINATION SIGNAL")
- print(f"Process ID: {os.getpid()}")
- print("=" * 80 + "\n")
-
- # Release shared resources
- finalize_share_data()
-
- # Exit with success status
- sys.exit(0)
-
-
-def main():
- # Check and install dependencies
- check_and_install_dependencies()
-
- # Register signal handlers for graceful shutdown
- signal.signal(signal.SIGINT, signal_handler) # Ctrl+C
- signal.signal(signal.SIGTERM, signal_handler) # kill command
-
- # Parse all arguments using parse_args
- args = parse_args(is_uvicorn_mode=False)
-
- # Display startup information
- display_splash_screen(args)
-
- print("🚀 Starting LightRAG with Gunicorn")
- print(f"🔄 Worker management: Gunicorn (workers={args.workers})")
- print("🔍 Preloading app: Enabled")
- print("📝 Note: Using Gunicorn's preload feature for shared data initialization")
- print("\n\n" + "=" * 80)
- print("MAIN PROCESS INITIALIZATION")
- print(f"Process ID: {os.getpid()}")
- print(f"Workers setting: {args.workers}")
- print("=" * 80 + "\n")
-
- # Import Gunicorn's StandaloneApplication
- from gunicorn.app.base import BaseApplication
-
- # Define a custom application class that loads our config
- class GunicornApp(BaseApplication):
- def __init__(self, app, options=None):
- self.options = options or {}
- self.application = app
- super().__init__()
-
- def load_config(self):
- # Define valid Gunicorn configuration options
- valid_options = {
- "bind",
- "workers",
- "worker_class",
- "timeout",
- "keepalive",
- "preload_app",
- "errorlog",
- "accesslog",
- "loglevel",
- "certfile",
- "keyfile",
- "limit_request_line",
- "limit_request_fields",
- "limit_request_field_size",
- "graceful_timeout",
- "max_requests",
- "max_requests_jitter",
- }
-
- # Special hooks that need to be set separately
- special_hooks = {
- "on_starting",
- "on_reload",
- "on_exit",
- "pre_fork",
- "post_fork",
- "pre_exec",
- "pre_request",
- "post_request",
- "worker_init",
- "worker_exit",
- "nworkers_changed",
- "child_exit",
- }
-
- # Import and configure the gunicorn_config module
- import gunicorn_config
-
- # Set configuration variables in gunicorn_config, prioritizing command line arguments
- gunicorn_config.workers = (
- args.workers if args.workers else int(os.getenv("WORKERS", 1))
- )
-
- # Bind configuration prioritizes command line arguments
- host = args.host if args.host != "0.0.0.0" else os.getenv("HOST", "0.0.0.0")
- port = args.port if args.port != 9621 else int(os.getenv("PORT", 9621))
- gunicorn_config.bind = f"{host}:{port}"
-
- # Log level configuration prioritizes command line arguments
- gunicorn_config.loglevel = (
- args.log_level.lower()
- if args.log_level
- else os.getenv("LOG_LEVEL", "info")
- )
-
- # Timeout configuration prioritizes command line arguments
- gunicorn_config.timeout = (
- args.timeout if args.timeout else int(os.getenv("TIMEOUT", 150))
- )
-
- # Keepalive configuration
- gunicorn_config.keepalive = int(os.getenv("KEEPALIVE", 5))
-
- # SSL configuration prioritizes command line arguments
- if args.ssl or os.getenv("SSL", "").lower() in (
- "true",
- "1",
- "yes",
- "t",
- "on",
- ):
- gunicorn_config.certfile = (
- args.ssl_certfile
- if args.ssl_certfile
- else os.getenv("SSL_CERTFILE")
- )
- gunicorn_config.keyfile = (
- args.ssl_keyfile if args.ssl_keyfile else os.getenv("SSL_KEYFILE")
- )
-
- # Set configuration options from the module
- for key in dir(gunicorn_config):
- if key in valid_options:
- value = getattr(gunicorn_config, key)
- # Skip functions like on_starting and None values
- if not callable(value) and value is not None:
- self.cfg.set(key, value)
- # Set special hooks
- elif key in special_hooks:
- value = getattr(gunicorn_config, key)
- if callable(value):
- self.cfg.set(key, value)
-
- if hasattr(gunicorn_config, "logconfig_dict"):
- self.cfg.set(
- "logconfig_dict", getattr(gunicorn_config, "logconfig_dict")
- )
-
- def load(self):
- # Import the application
- from lightrag.api.lightrag_server import get_application
-
- return get_application(args)
-
- # Create the application
- app = GunicornApp("")
-
- # Force workers to be an integer and greater than 1 for multi-process mode
- workers_count = int(args.workers)
- if workers_count > 1:
- # Set a flag to indicate we're in the main process
- os.environ["LIGHTRAG_MAIN_PROCESS"] = "1"
- initialize_share_data(workers_count)
- else:
- initialize_share_data(1)
-
- # Run the application
- print("\nStarting Gunicorn with direct Python API...")
- app.run()
-
-
-if __name__ == "__main__":
- main()