diff --git a/.gitignore b/.gitignore
index 6deb14d5..a4afe4ea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,7 +57,7 @@ ignore_this.txt
*.ignore.*
# Project-specific files
-dickens/
+dickens*/
book.txt
lightrag-dev/
gui/
diff --git a/README.md b/README.md
index 86211b35..abc2f8b3 100644
--- a/README.md
+++ b/README.md
@@ -102,33 +102,47 @@ Use the below Python snippet (in a script) to initialize LightRAG and perform qu
```python
import os
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed
+from lightrag.kg.shared_storage import initialize_pipeline_status
-rag = LightRAG(
- working_dir="your/path",
- embedding_func=openai_embed,
- llm_model_func=gpt_4o_mini_complete
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir="your/path",
+ embedding_func=openai_embed,
+ llm_model_func=gpt_4o_mini_complete
+ )
-# Insert text
-rag.insert("Your text")
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-mode="naive"
-# Perform local search
-mode="local"
-# Perform global search
-mode="global"
-# Perform hybrid search
-mode="hybrid"
-# Mix mode Integrates knowledge graph and vector retrieval.
-mode="mix"
+ return rag
-rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode=mode)
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ # Insert text
+ rag.insert("Your text")
+
+ # Perform naive search
+ mode="naive"
+ # Perform local search
+ mode="local"
+ # Perform global search
+ mode="global"
+ # Perform hybrid search
+ mode="hybrid"
+ # Mix mode Integrates knowledge graph and vector retrieval.
+ mode="mix"
+
+ rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode=mode)
+ )
+
+if __name__ == "__main__":
+ main()
```
### Query Param
@@ -190,15 +204,21 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
base_url="https://api.upstage.ai/v1/solar"
)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=4096,
- max_token_size=8192,
- func=embedding_func
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=4096,
+ max_token_size=8192,
+ func=embedding_func
+ )
)
-)
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
```
@@ -210,10 +230,6 @@ rag = LightRAG(
See `lightrag_hf_demo.py`
```python
-from lightrag.llm import hf_model_complete, hf_embed
-from transformers import AutoModel, AutoTokenizer
-from lightrag.utils import EmbeddingFunc
-
# Initialize LightRAG with Hugging Face model
rag = LightRAG(
working_dir=WORKING_DIR,
@@ -242,9 +258,6 @@ If you want to use Ollama models, you need to pull model you plan to use and emb
Then you only need to set LightRAG as follows:
```python
-from lightrag.llm.ollama import ollama_model_complete, ollama_embed
-from lightrag.utils import EmbeddingFunc
-
# Initialize LightRAG with Ollama model
rag = LightRAG(
working_dir=WORKING_DIR,
@@ -325,20 +338,58 @@ LightRAG supports integration with LlamaIndex.
```python
# Using LlamaIndex with direct OpenAI access
+import asyncio
from lightrag import LightRAG
from lightrag.llm.llama_index_impl import llama_index_complete_if_cache, llama_index_embed
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
+from lightrag.kg.shared_storage import initialize_pipeline_status
-rag = LightRAG(
- working_dir="your/path",
- llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
- embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
- embedding_dim=1536,
- max_token_size=8192,
- func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
- ),
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir="your/path",
+ llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
+ embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
+ embedding_dim=1536,
+ max_token_size=8192,
+ func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
+ )
+
+ # Perform local search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
+ )
+
+ # Perform global search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
+ )
+
+if __name__ == "__main__":
+ main()
```
#### For detailed documentation and examples, see:
@@ -353,11 +404,6 @@ rag = LightRAG(
LightRAG now supports multi-turn dialogue through the conversation history feature. Here's how to use it:
```python
-from lightrag import LightRAG, QueryParam
-
-# Initialize LightRAG
-rag = LightRAG(working_dir=WORKING_DIR)
-
# Create conversation history
conversation_history = [
{"role": "user", "content": "What is the main character's attitude towards Christmas?"},
@@ -387,11 +433,6 @@ response = rag.query(
LightRAG now supports custom prompts for fine-tuned control over the system's behavior. Here's how to use it:
```python
-from lightrag import LightRAG, QueryParam
-
-# Initialize LightRAG
-rag = LightRAG(working_dir=WORKING_DIR)
-
# Create query parameters
query_param = QueryParam(
mode="hybrid", # or other mode: "local", "global", "hybrid", "mix" and "naive"
@@ -456,16 +497,6 @@ rag.query_with_separate_keyword_extraction(
Insert Custom KG
```python
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
-
custom_kg = {
"entities": [
{
@@ -534,6 +565,7 @@ rag = LightRAG(
"insert_batch_size": 20 # Process 20 documents per batch
}
)
+
rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in batches of 20
```
@@ -560,27 +592,6 @@ rag.insert(["TEXT1", "TEXT2",...], ids=["ID_FOR_TEXT1", "ID_FOR_TEXT2"])
-
- Incremental Insert
-
-```python
-# Incremental Insert: Insert new documents into an existing LightRAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
-
-with open("./newText.txt") as f:
- rag.insert(f.read())
-```
-
-
-
Insert using Pipeline
@@ -592,6 +603,7 @@ And using a routine to process news documents.
```python
rag = LightRAG(..)
+
await rag.apipeline_enqueue_documents(input)
# Your routine in loop
await rag.apipeline_process_enqueue_documents(input)
@@ -633,8 +645,6 @@ export NEO4J_PASSWORD="password"
# Note: Default settings use NetworkX
# Initialize LightRAG with Neo4J implementation.
-WORKING_DIR = "./local_neo4jWorkDir"
-
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
@@ -706,26 +716,26 @@ You can also install `faiss-gpu` if you have GPU support.
- Here we are using `sentence-transformers` but you can also use `OpenAIEmbedding` model with `3072` dimensions.
-```
+```python
async def embedding_func(texts: list[str]) -> np.ndarray:
model = SentenceTransformer('all-MiniLM-L6-v2')
embeddings = model.encode(texts, convert_to_numpy=True)
return embeddings
# Initialize LightRAG with the LLM model function and embedding function
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=8192,
- func=embedding_func,
- ),
- vector_storage="FaissVectorDBStorage",
- vector_db_storage_cls_kwargs={
- "cosine_better_than_threshold": 0.3 # Your desired threshold
- }
- )
+rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ vector_storage="FaissVectorDBStorage",
+ vector_db_storage_cls_kwargs={
+ "cosine_better_than_threshold": 0.3 # Your desired threshold
+ }
+)
```
@@ -733,17 +743,6 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
## Delete
```python
-
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
-
# Delete Entity: Deleting entities by their names
rag.delete_by_entity("Project Gutenberg")
@@ -751,6 +750,70 @@ rag.delete_by_entity("Project Gutenberg")
rag.delete_by_doc_id("doc_id")
```
+## Edit Entities and Relations
+
+LightRAG now supports comprehensive knowledge graph management capabilities, allowing you to create, edit, and delete entities and relationships within your knowledge graph.
+
+### Create Entities and Relations
+
+```python
+# Create new entity
+entity = rag.create_entity("Google", {
+ "description": "Google is a multinational technology company specializing in internet-related services and products.",
+ "entity_type": "company"
+})
+
+# Create another entity
+product = rag.create_entity("Gmail", {
+ "description": "Gmail is an email service developed by Google.",
+ "entity_type": "product"
+})
+
+# Create relation between entities
+relation = rag.create_relation("Google", "Gmail", {
+ "description": "Google develops and operates Gmail.",
+ "keywords": "develops operates service",
+ "weight": 2.0
+})
+```
+
+### Edit Entities and Relations
+
+```python
+# Edit an existing entity
+updated_entity = rag.edit_entity("Google", {
+ "description": "Google is a subsidiary of Alphabet Inc., founded in 1998.",
+ "entity_type": "tech_company"
+})
+
+# Rename an entity (with all its relationships properly migrated)
+renamed_entity = rag.edit_entity("Gmail", {
+ "entity_name": "Google Mail",
+ "description": "Google Mail (formerly Gmail) is an email service."
+})
+
+# Edit a relation between entities
+updated_relation = rag.edit_relation("Google", "Google Mail", {
+ "description": "Google created and maintains Google Mail service.",
+ "keywords": "creates maintains email service",
+ "weight": 3.0
+})
+```
+
+All operations are available in both synchronous and asynchronous versions. The asynchronous versions have the prefix "a" (e.g., `acreate_entity`, `aedit_relation`).
+
+#### Entity Operations
+
+- **create_entity**: Creates a new entity with specified attributes
+- **edit_entity**: Updates an existing entity's attributes or renames it
+
+#### Relation Operations
+
+- **create_relation**: Creates a new relation between existing entities
+- **edit_relation**: Updates an existing relation's attributes
+
+These operations maintain data consistency across both the graph database and vector database components, ensuring your knowledge graph remains coherent.
+
## Cache
diff --git a/examples/copy_llm_cache_to_another_storage.py b/examples/copy_llm_cache_to_another_storage.py
index 5d07ad13..60fa6192 100644
--- a/examples/copy_llm_cache_to_another_storage.py
+++ b/examples/copy_llm_cache_to_another_storage.py
@@ -10,7 +10,7 @@ import os
from dotenv import load_dotenv
from lightrag.kg.postgres_impl import PostgreSQLDB, PGKVStorage
-from lightrag.storage import JsonKVStorage
+from lightrag.kg.json_kv_impl import JsonKVStorage
from lightrag.namespace import NameSpace
load_dotenv()
diff --git a/examples/lightrag_api_ollama_demo.py b/examples/lightrag_api_ollama_demo.py
index 079e9935..dad2a2e0 100644
--- a/examples/lightrag_api_ollama_demo.py
+++ b/examples/lightrag_api_ollama_demo.py
@@ -1,4 +1,5 @@
from fastapi import FastAPI, HTTPException, File, UploadFile
+from contextlib import asynccontextmanager
from pydantic import BaseModel
import os
from lightrag import LightRAG, QueryParam
@@ -8,12 +9,12 @@ from typing import Optional
import asyncio
import nest_asyncio
import aiofiles
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
DEFAULT_RAG_DIR = "index_default"
-app = FastAPI(title="LightRAG API", description="API for RAG operations")
DEFAULT_INPUT_FILE = "book.txt"
INPUT_FILE = os.environ.get("INPUT_FILE", f"{DEFAULT_INPUT_FILE}")
@@ -28,20 +29,43 @@ if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="gemma2:9b",
- llm_model_max_async=4,
- llm_model_max_token_size=8192,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+async def init():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="gemma2:9b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=8192,
+ llm_model_kwargs={
+ "host": "http://localhost:11434",
+ "options": {"num_ctx": 8192},
+ },
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
+ )
+
+ # Add initialization code
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ global rag
+ rag = await init()
+ print("done!")
+ yield
+
+
+app = FastAPI(
+ title="LightRAG API", description="API for RAG operations", lifespan=lifespan
)
diff --git a/examples/lightrag_api_openai_compatible_demo.py b/examples/lightrag_api_openai_compatible_demo.py
index 68ccfe95..312be872 100644
--- a/examples/lightrag_api_openai_compatible_demo.py
+++ b/examples/lightrag_api_openai_compatible_demo.py
@@ -1,4 +1,5 @@
from fastapi import FastAPI, HTTPException, File, UploadFile
+from contextlib import asynccontextmanager
from pydantic import BaseModel
import os
from lightrag import LightRAG, QueryParam
@@ -8,6 +9,7 @@ import numpy as np
from typing import Optional
import asyncio
import nest_asyncio
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Apply nest_asyncio to solve event loop issues
nest_asyncio.apply()
@@ -71,16 +73,36 @@ async def get_embedding_dim():
# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
+async def init():
+ embedding_dimension = await get_embedding_dim()
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
+ func=embedding_func,
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ global rag
+ rag = await init()
+ print("done!")
+ yield
+
+
+app = FastAPI(
+ title="LightRAG API", description="API for RAG operations", lifespan=lifespan
+)
# Data models
diff --git a/examples/lightrag_api_openai_compatible_demo_simplified.py b/examples/lightrag_api_openai_compatible_demo_simplified.py
deleted file mode 100644
index fabbb3e2..00000000
--- a/examples/lightrag_api_openai_compatible_demo_simplified.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import os
-from lightrag import LightRAG, QueryParam
-from lightrag.llm.openai import openai_complete_if_cache, openai_embed
-from lightrag.utils import EmbeddingFunc
-import numpy as np
-import asyncio
-import nest_asyncio
-
-# Apply nest_asyncio to solve event loop issues
-nest_asyncio.apply()
-
-DEFAULT_RAG_DIR = "index_default"
-
-# Configure working directory
-WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
-print(f"WORKING_DIR: {WORKING_DIR}")
-LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
-print(f"LLM_MODEL: {LLM_MODEL}")
-EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small")
-print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
-EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
-print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
-BASE_URL = os.environ.get("BASE_URL", "https://api.openai.com/v1")
-print(f"BASE_URL: {BASE_URL}")
-API_KEY = os.environ.get("API_KEY", "xxxxxxxx")
-print(f"API_KEY: {API_KEY}")
-
-if not os.path.exists(WORKING_DIR):
- os.mkdir(WORKING_DIR)
-
-
-# LLM model function
-
-
-async def llm_model_func(
- prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
-) -> str:
- return await openai_complete_if_cache(
- model=LLM_MODEL,
- prompt=prompt,
- system_prompt=system_prompt,
- history_messages=history_messages,
- base_url=BASE_URL,
- api_key=API_KEY,
- **kwargs,
- )
-
-
-# Embedding function
-
-
-async def embedding_func(texts: list[str]) -> np.ndarray:
- return await openai_embed(
- texts=texts,
- model=EMBEDDING_MODEL,
- base_url=BASE_URL,
- api_key=API_KEY,
- )
-
-
-async def get_embedding_dim():
- test_text = ["This is a test sentence."]
- embedding = await embedding_func(test_text)
- embedding_dim = embedding.shape[1]
- print(f"{embedding_dim=}")
- return embedding_dim
-
-
-# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
-
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
diff --git a/examples/lightrag_api_oracle_demo.py b/examples/lightrag_api_oracle_demo.py
index 3675795e..3a82f479 100644
--- a/examples/lightrag_api_oracle_demo.py
+++ b/examples/lightrag_api_oracle_demo.py
@@ -16,6 +16,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
print(os.getcwd())
@@ -113,6 +114,9 @@ async def init():
vector_storage="OracleVectorDBStorage",
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
return rag
diff --git a/examples/lightrag_azure_openai_demo.py b/examples/lightrag_azure_openai_demo.py
index 1c941f93..e0840366 100644
--- a/examples/lightrag_azure_openai_demo.py
+++ b/examples/lightrag_azure_openai_demo.py
@@ -6,6 +6,7 @@ import numpy as np
from dotenv import load_dotenv
import logging
from openai import AzureOpenAI
+from lightrag.kg.shared_storage import initialize_pipeline_status
logging.basicConfig(level=logging.INFO)
@@ -90,6 +91,9 @@ rag = LightRAG(
),
)
+rag.initialize_storages()
+initialize_pipeline_status()
+
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")
diff --git a/examples/lightrag_bedrock_demo.py b/examples/lightrag_bedrock_demo.py
index 6bb6c7d4..68e9f962 100644
--- a/examples/lightrag_bedrock_demo.py
+++ b/examples/lightrag_bedrock_demo.py
@@ -8,6 +8,12 @@ import logging
from lightrag import LightRAG, QueryParam
from lightrag.llm.bedrock import bedrock_complete, bedrock_embed
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
logging.getLogger("aiobotocore").setLevel(logging.WARNING)
@@ -15,22 +21,35 @@ WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=bedrock_complete,
- llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
- embedding_func=EmbeddingFunc(
- embedding_dim=1024, max_token_size=8192, func=bedrock_embed
- ),
-)
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-
-for mode in ["naive", "local", "global", "hybrid"]:
- print("\n+-" + "-" * len(mode) + "-+")
- print(f"| {mode.capitalize()} |")
- print("+-" + "-" * len(mode) + "-+\n")
- print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=bedrock_complete,
+ llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
+ embedding_func=EmbeddingFunc(
+ embedding_dim=1024, max_token_size=8192, func=bedrock_embed
+ ),
)
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+def main():
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ for mode in ["naive", "local", "global", "hybrid"]:
+ print("\n+-" + "-" * len(mode) + "-+")
+ print(f"| {mode.capitalize()} |")
+ print("+-" + "-" * len(mode) + "-+\n")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode=mode)
+ )
+ )
diff --git a/examples/lightrag_gemini_demo.py b/examples/lightrag_gemini_demo.py
index 32732ba8..cd2bb579 100644
--- a/examples/lightrag_gemini_demo.py
+++ b/examples/lightrag_gemini_demo.py
@@ -8,6 +8,13 @@ from dotenv import load_dotenv
from lightrag.utils import EmbeddingFunc
from lightrag import LightRAG, QueryParam
from sentence_transformers import SentenceTransformer
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+
+# Apply nest_asyncio to solve event loop issues
+nest_asyncio.apply()
load_dotenv()
gemini_api_key = os.getenv("GEMINI_API_KEY")
@@ -60,25 +67,39 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
return embeddings
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
-file_path = "story.txt"
-with open(file_path, "r") as file:
- text = file.read()
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-rag.insert(text)
+ return rag
-response = rag.query(
- query="What is the main theme of the story?",
- param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
-)
-print(response)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ file_path = "story.txt"
+ with open(file_path, "r") as file:
+ text = file.read()
+
+ rag.insert(text)
+
+ response = rag.query(
+ query="What is the main theme of the story?",
+ param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
+ )
+
+ print(response)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_hf_demo.py b/examples/lightrag_hf_demo.py
index a5088e54..f2abbb2f 100644
--- a/examples/lightrag_hf_demo.py
+++ b/examples/lightrag_hf_demo.py
@@ -4,51 +4,79 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.hf import hf_model_complete, hf_embed
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel, AutoTokenizer
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=hf_model_complete,
- llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=5000,
- func=lambda texts: hf_embed(
- texts,
- tokenizer=AutoTokenizer.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
- ),
- embed_model=AutoModel.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
+
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=hf_model_complete,
+ llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=5000,
+ func=lambda texts: hf_embed(
+ texts,
+ tokenizer=AutoTokenizer.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
+ embed_model=AutoModel.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
),
),
- ),
-)
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+def main():
+ rag = asyncio.run(initialize_rag())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_jinaai_demo.py b/examples/lightrag_jinaai_demo.py
deleted file mode 100644
index 0378b61b..00000000
--- a/examples/lightrag_jinaai_demo.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import numpy as np
-from lightrag import LightRAG, QueryParam
-from lightrag.utils import EmbeddingFunc
-from lightrag.llm.jina import jina_embed
-from lightrag.llm.openai import openai_complete_if_cache
-import os
-import asyncio
-
-
-async def embedding_func(texts: list[str]) -> np.ndarray:
- return await jina_embed(texts, api_key="YourJinaAPIKey")
-
-
-WORKING_DIR = "./dickens"
-
-if not os.path.exists(WORKING_DIR):
- os.mkdir(WORKING_DIR)
-
-
-async def llm_model_func(
- prompt, system_prompt=None, history_messages=[], **kwargs
-) -> str:
- return await openai_complete_if_cache(
- "solar-mini",
- prompt,
- system_prompt=system_prompt,
- history_messages=history_messages,
- api_key=os.getenv("UPSTAGE_API_KEY"),
- base_url="https://api.upstage.ai/v1/solar",
- **kwargs,
- )
-
-
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=1024, max_token_size=8192, func=embedding_func
- ),
-)
-
-
-async def lightraginsert(file_path, semaphore):
- async with semaphore:
- try:
- with open(file_path, "r", encoding="utf-8") as f:
- content = f.read()
- except UnicodeDecodeError:
- # If UTF-8 decoding fails, try other encodings
- with open(file_path, "r", encoding="gbk") as f:
- content = f.read()
- await rag.ainsert(content)
-
-
-async def process_files(directory, concurrency_limit):
- semaphore = asyncio.Semaphore(concurrency_limit)
- tasks = []
- for root, dirs, files in os.walk(directory):
- for f in files:
- file_path = os.path.join(root, f)
- if f.startswith("."):
- continue
- tasks.append(lightraginsert(file_path, semaphore))
- await asyncio.gather(*tasks)
-
-
-async def main():
- try:
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=1024,
- max_token_size=8192,
- func=embedding_func,
- ),
- )
-
- asyncio.run(process_files(WORKING_DIR, concurrency_limit=4))
-
- # Perform naive search
- print(
- await rag.aquery(
- "What are the top themes in this story?", param=QueryParam(mode="naive")
- )
- )
-
- # Perform local search
- print(
- await rag.aquery(
- "What are the top themes in this story?", param=QueryParam(mode="local")
- )
- )
-
- # Perform global search
- print(
- await rag.aquery(
- "What are the top themes in this story?",
- param=QueryParam(mode="global"),
- )
- )
-
- # Perform hybrid search
- print(
- await rag.aquery(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid"),
- )
- )
- except Exception as e:
- print(f"An error occurred: {e}")
-
-
-if __name__ == "__main__":
- asyncio.run(main())
diff --git a/examples/lightrag_llamaindex_direct_demo.py b/examples/lightrag_llamaindex_direct_demo.py
index 5db158ce..d5e3f617 100644
--- a/examples/lightrag_llamaindex_direct_demo.py
+++ b/examples/lightrag_llamaindex_direct_demo.py
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
from llama_index.llms.openai import OpenAI
from llama_index.embeddings.openai import OpenAIEmbedding
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Configure working directory
WORKING_DIR = "./index_default"
@@ -76,38 +81,62 @@ async def get_embedding_dim():
return embedding_dim
-# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
-# Insert example text
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
+ func=embedding_func,
+ ),
+ )
-# Test different query modes
-print("\nNaive Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-print("\nLocal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ return rag
-print("\nGlobal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-print("\nHybrid Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_llamaindex_litellm_demo.py b/examples/lightrag_llamaindex_litellm_demo.py
index 3511ecf3..6e738628 100644
--- a/examples/lightrag_llamaindex_litellm_demo.py
+++ b/examples/lightrag_llamaindex_litellm_demo.py
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
from llama_index.llms.litellm import LiteLLM
from llama_index.embeddings.litellm import LiteLLMEmbedding
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Configure working directory
WORKING_DIR = "./index_default"
@@ -79,38 +84,62 @@ async def get_embedding_dim():
return embedding_dim
-# Initialize RAG instance
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=asyncio.run(get_embedding_dim()),
- max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
- func=embedding_func,
- ),
-)
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
-# Insert example text
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
+ func=embedding_func,
+ ),
+ )
-# Test different query modes
-print("\nNaive Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-print("\nLocal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ return rag
-print("\nGlobal Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-print("\nHybrid Search:")
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_lmdeploy_demo.py b/examples/lightrag_lmdeploy_demo.py
index d12eb564..ba118fc9 100644
--- a/examples/lightrag_lmdeploy_demo.py
+++ b/examples/lightrag_lmdeploy_demo.py
@@ -5,6 +5,12 @@ from lightrag.llm.lmdeploy import lmdeploy_model_if_cache
from lightrag.llm.hf import hf_embed
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel, AutoTokenizer
+from lightrag.kg.shared_storage import initialize_pipeline_status
+
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
WORKING_DIR = "./dickens"
@@ -36,45 +42,69 @@ async def lmdeploy_model_complete(
)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=lmdeploy_model_complete,
- llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
- embedding_func=EmbeddingFunc(
- embedding_dim=384,
- max_token_size=5000,
- func=lambda texts: hf_embed(
- texts,
- tokenizer=AutoTokenizer.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
- ),
- embed_model=AutoModel.from_pretrained(
- "sentence-transformers/all-MiniLM-L6-v2"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=lmdeploy_model_complete,
+ llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
+ embedding_func=EmbeddingFunc(
+ embedding_dim=384,
+ max_token_size=5000,
+ func=lambda texts: hf_embed(
+ texts,
+ tokenizer=AutoTokenizer.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
+ embed_model=AutoModel.from_pretrained(
+ "sentence-transformers/all-MiniLM-L6-v2"
+ ),
),
),
- ),
-)
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ print("\nLocal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ print("\nGlobal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_nvidia_demo.py b/examples/lightrag_nvidia_demo.py
index da4b46ff..6de0814c 100644
--- a/examples/lightrag_nvidia_demo.py
+++ b/examples/lightrag_nvidia_demo.py
@@ -1,5 +1,9 @@
import os
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
from lightrag import LightRAG, QueryParam
from lightrag.llm import (
openai_complete_if_cache,
@@ -7,10 +11,12 @@ from lightrag.llm import (
)
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
# for custom llm_model_func
from lightrag.utils import locate_json_string_body_from_string
+
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
@@ -92,41 +98,39 @@ async def test_funcs():
# asyncio.run(test_funcs())
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ # lightRAG class during indexing
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ # llm_model_name="meta/llama3-70b-instruct", #un comment if
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
+ # so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
+ # so you can adjust to be able to fit the NVIDIA model (future work)
+ func=indexing_embedding_func,
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
async def main():
try:
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- # lightRAG class during indexing
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- # llm_model_name="meta/llama3-70b-instruct", #un comment if
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
- # so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
- # so you can adjust to be able to fit the NVIDIA model (future work)
- func=indexing_embedding_func,
- ),
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
# reading file
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
- # redefine rag to change embedding into query type
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- # llm_model_name="meta/llama3-70b-instruct", #un comment if
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=512,
- func=query_embedding_func,
- ),
- )
-
# Perform naive search
print("==============Naive===============")
print(
diff --git a/examples/lightrag_ollama_age_demo.py b/examples/lightrag_ollama_age_demo.py
index d394ded4..b1b4607a 100644
--- a/examples/lightrag_ollama_age_demo.py
+++ b/examples/lightrag_ollama_age_demo.py
@@ -1,4 +1,8 @@
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+
import inspect
import logging
import os
@@ -6,6 +10,7 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens_age"
@@ -22,51 +27,32 @@ os.environ["AGE_POSTGRES_HOST"] = "localhost"
os.environ["AGE_POSTGRES_PORT"] = "5455"
os.environ["AGE_GRAPH_NAME"] = "dickens"
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="llama3.1:8b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="llama3.1:8b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={
+ "host": "http://localhost:11434",
+ "options": {"num_ctx": 32768},
+ },
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
- graph_storage="AGEStorage",
-)
+ graph_storage="AGEStorage",
+ )
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
-
-# stream response
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ return rag
async def print_stream(stream):
@@ -74,7 +60,54 @@ async def print_stream(stream):
print(chunk, end="", flush=True)
-if inspect.isasyncgen(resp):
- asyncio.run(print_stream(resp))
-else:
- print(resp)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+ # stream response
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ if inspect.isasyncgen(resp):
+ asyncio.run(print_stream(resp))
+ else:
+ print(resp)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_ollama_demo.py b/examples/lightrag_ollama_demo.py
index 95856fa2..cf43aa4a 100644
--- a/examples/lightrag_ollama_demo.py
+++ b/examples/lightrag_ollama_demo.py
@@ -1,10 +1,14 @@
import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
import os
import inspect
import logging
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -13,50 +17,31 @@ logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="gemma2:2b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="gemma2:2b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={
+ "host": "http://localhost:11434",
+ "options": {"num_ctx": 32768},
+ },
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
-)
+ )
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
-
-# stream response
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ return rag
async def print_stream(stream):
@@ -64,7 +49,54 @@ async def print_stream(stream):
print(chunk, end="", flush=True)
-if inspect.isasyncgen(resp):
- asyncio.run(print_stream(resp))
-else:
- print(resp)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+ # stream response
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ if inspect.isasyncgen(resp):
+ asyncio.run(print_stream(resp))
+ else:
+ print(resp)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_ollama_gremlin_demo.py b/examples/lightrag_ollama_gremlin_demo.py
index fa7d4fb5..893b5606 100644
--- a/examples/lightrag_ollama_gremlin_demo.py
+++ b/examples/lightrag_ollama_gremlin_demo.py
@@ -12,6 +12,7 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens_gremlin"
@@ -31,51 +32,32 @@ os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
os.environ["GREMLIN_USER"] = ""
os.environ["GREMLIN_PASSWORD"] = ""
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="llama3.1:8b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="llama3.1:8b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={
+ "host": "http://localhost:11434",
+ "options": {"num_ctx": 32768},
+ },
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts, embed_model="nomic-embed-text", host="http://localhost:11434"
+ ),
),
- ),
- graph_storage="GremlinStorage",
-)
+ graph_storage="GremlinStorage",
+ )
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
-
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
-
-# stream response
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ return rag
async def print_stream(stream):
@@ -83,7 +65,54 @@ async def print_stream(stream):
print(chunk, end="", flush=True)
-if inspect.isasyncgen(resp):
- asyncio.run(print_stream(resp))
-else:
- print(resp)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+ # stream response
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ if inspect.isasyncgen(resp):
+ asyncio.run(print_stream(resp))
+ else:
+ print(resp)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py b/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py
index 7e2d1026..b6cc931c 100644
--- a/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py
+++ b/examples/lightrag_ollama_neo4j_milvus_mongo_demo.py
@@ -2,6 +2,11 @@ import os
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
+import asyncio
+import nest_asyncio
+
+nest_asyncio.apply()
+from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -28,29 +33,72 @@ os.environ["MILVUS_PASSWORD"] = "root"
os.environ["MILVUS_DB_NAME"] = "lightrag"
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="qwen2.5:14b",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
- embedding_func=EmbeddingFunc(
- embedding_dim=1024,
- max_token_size=8192,
- func=lambda texts: ollama_embed(
- texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="qwen2.5:14b",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={
+ "host": "http://127.0.0.1:11434",
+ "options": {"num_ctx": 32768},
+ },
+ embedding_func=EmbeddingFunc(
+ embedding_dim=1024,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(
+ texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
+ ),
),
- ),
- kv_storage="MongoKVStorage",
- graph_storage="Neo4JStorage",
- vector_storage="MilvusVectorDBStorage",
-)
+ kv_storage="MongoKVStorage",
+ graph_storage="Neo4JStorage",
+ vector_storage="MilvusVectorDBStorage",
+ )
-file = "./book.txt"
-with open(file, "r") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ return rag
+
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ # Insert example text
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Test different query modes
+ print("\nNaive Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ print("\nLocal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ print("\nGlobal Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ print("\nHybrid Search:")
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_openai_compatible_demo.py b/examples/lightrag_openai_compatible_demo.py
index 09673dd8..1c4a7a92 100644
--- a/examples/lightrag_openai_compatible_demo.py
+++ b/examples/lightrag_openai_compatible_demo.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -53,20 +54,30 @@ async def test_funcs():
# asyncio.run(test_funcs())
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
async def main():
try:
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
diff --git a/examples/lightrag_openai_compatible_demo_embedding_cache.py b/examples/lightrag_openai_compatible_demo_embedding_cache.py
index d696ce25..85408f3b 100644
--- a/examples/lightrag_openai_compatible_demo_embedding_cache.py
+++ b/examples/lightrag_openai_compatible_demo_embedding_cache.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -53,24 +54,34 @@ async def test_funcs():
# asyncio.run(test_funcs())
+async def initialize_rag():
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ embedding_cache_config={
+ "enabled": True,
+ "similarity_threshold": 0.90,
+ },
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
async def main():
try:
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- rag = LightRAG(
- working_dir=WORKING_DIR,
- embedding_cache_config={
- "enabled": True,
- "similarity_threshold": 0.90,
- },
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
with open("./book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
diff --git a/examples/lightrag_openai_compatible_stream_demo.py b/examples/lightrag_openai_compatible_stream_demo.py
index a974ca14..ab3e73a5 100644
--- a/examples/lightrag_openai_compatible_stream_demo.py
+++ b/examples/lightrag_openai_compatible_stream_demo.py
@@ -1,9 +1,11 @@
import inspect
import os
+import asyncio
from lightrag import LightRAG
from lightrag.llm import openai_complete, openai_embed
from lightrag.utils import EmbeddingFunc, always_get_an_event_loop
from lightrag import QueryParam
+from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -13,32 +15,32 @@ if not os.path.exists(WORKING_DIR):
print(f"WorkingDir: {WORKING_DIR}")
api_key = "empty"
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=openai_complete,
- llm_model_name="qwen2.5-14b-instruct@4bit",
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
- embedding_func=EmbeddingFunc(
- embedding_dim=1024,
- max_token_size=8192,
- func=lambda texts: openai_embed(
- texts=texts,
- model="text-embedding-bge-m3",
- base_url="http://127.0.0.1:1234/v1",
- api_key=api_key,
+
+
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=openai_complete,
+ llm_model_name="qwen2.5-14b-instruct@4bit",
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
+ embedding_func=EmbeddingFunc(
+ embedding_dim=1024,
+ max_token_size=8192,
+ func=lambda texts: openai_embed(
+ texts=texts,
+ model="text-embedding-bge-m3",
+ base_url="http://127.0.0.1:1234/v1",
+ api_key=api_key,
+ ),
),
- ),
-)
+ )
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-resp = rag.query(
- "What are the top themes in this story?",
- param=QueryParam(mode="hybrid", stream=True),
-)
+ return rag
async def print_stream(stream):
@@ -47,8 +49,24 @@ async def print_stream(stream):
print(chunk, end="", flush=True)
-loop = always_get_an_event_loop()
-if inspect.isasyncgen(resp):
- loop.run_until_complete(print_stream(resp))
-else:
- print(resp)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ resp = rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid", stream=True),
+ )
+
+ loop = always_get_an_event_loop()
+ if inspect.isasyncgen(resp):
+ loop.run_until_complete(print_stream(resp))
+ else:
+ print(resp)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_openai_demo.py b/examples/lightrag_openai_demo.py
index c5393fc8..138b31a2 100644
--- a/examples/lightrag_openai_demo.py
+++ b/examples/lightrag_openai_demo.py
@@ -1,40 +1,64 @@
import os
-
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- embedding_func=openai_embed,
- llm_model_func=gpt_4o_mini_complete,
- # llm_model_func=gpt_4o_complete
-)
+
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ embedding_func=openai_embed,
+ llm_model_func=gpt_4o_mini_complete,
+ # llm_model_func=gpt_4o_complete
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_openai_mongodb_graph_demo.py b/examples/lightrag_openai_mongodb_graph_demo.py
index 775eb296..67c51892 100644
--- a/examples/lightrag_openai_mongodb_graph_demo.py
+++ b/examples/lightrag_openai_mongodb_graph_demo.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -52,7 +53,7 @@ async def create_embedding_function_instance():
async def initialize_rag():
embedding_func_instance = await create_embedding_function_instance()
- return LightRAG(
+ rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -60,14 +61,47 @@ async def initialize_rag():
log_level="DEBUG",
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Run the initialization
-rag = asyncio.run(initialize_rag())
+ return rag
-with open("book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_openai_neo4j_milvus_redis_demo.py b/examples/lightrag_openai_neo4j_milvus_redis_demo.py
index da5c5a8f..88a61246 100644
--- a/examples/lightrag_openai_neo4j_milvus_redis_demo.py
+++ b/examples/lightrag_openai_neo4j_milvus_redis_demo.py
@@ -1,7 +1,9 @@
import os
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_embed, openai_complete_if_cache
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
# WorkingDir
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
@@ -49,22 +51,62 @@ embedding_func = EmbeddingFunc(
),
)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- llm_model_max_token_size=32768,
- embedding_func=embedding_func,
- chunk_token_size=512,
- chunk_overlap_token_size=256,
- kv_storage="RedisKVStorage",
- graph_storage="Neo4JStorage",
- vector_storage="MilvusVectorDBStorage",
- doc_status_storage="RedisKVStorage",
-)
-file = "../book.txt"
-with open(file, "r", encoding="utf-8") as f:
- rag.insert(f.read())
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ llm_model_max_token_size=32768,
+ embedding_func=embedding_func,
+ chunk_token_size=512,
+ chunk_overlap_token_size=256,
+ kv_storage="RedisKVStorage",
+ graph_storage="Neo4JStorage",
+ vector_storage="MilvusVectorDBStorage",
+ doc_status_storage="RedisKVStorage",
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-print(rag.query("谁会3D建模 ?", param=QueryParam(mode="mix")))
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_oracle_demo.py b/examples/lightrag_oracle_demo.py
index c6121840..420f1af0 100644
--- a/examples/lightrag_oracle_demo.py
+++ b/examples/lightrag_oracle_demo.py
@@ -6,6 +6,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
print(os.getcwd())
script_directory = Path(__file__).resolve().parent.parent
@@ -64,40 +65,49 @@ async def get_embedding_dim():
return embedding_dim
+async def initialize_rag():
+ # Detect embedding dimension
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ # Initialize LightRAG
+ # We use Oracle DB as the KV/vector/graph storage
+ # You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
+ rag = LightRAG(
+ # log_level="DEBUG",
+ working_dir=WORKING_DIR,
+ entity_extract_max_gleaning=1,
+ enable_llm_cache=True,
+ enable_llm_cache_for_entity_extract=True,
+ embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
+ chunk_token_size=CHUNK_TOKEN_SIZE,
+ llm_model_max_token_size=MAX_TOKENS,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=500,
+ func=embedding_func,
+ ),
+ graph_storage="OracleGraphStorage",
+ kv_storage="OracleKVStorage",
+ vector_storage="OracleVectorDBStorage",
+ addon_params={
+ "example_number": 1,
+ "language": "Simplfied Chinese",
+ "entity_types": ["organization", "person", "geo", "event"],
+ "insert_batch_size": 2,
+ },
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
async def main():
try:
- # Detect embedding dimension
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
-
- # Initialize LightRAG
- # We use Oracle DB as the KV/vector/graph storage
- # You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
- rag = LightRAG(
- # log_level="DEBUG",
- working_dir=WORKING_DIR,
- entity_extract_max_gleaning=1,
- enable_llm_cache=True,
- enable_llm_cache_for_entity_extract=True,
- embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
- chunk_token_size=CHUNK_TOKEN_SIZE,
- llm_model_max_token_size=MAX_TOKENS,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=500,
- func=embedding_func,
- ),
- graph_storage="OracleGraphStorage",
- kv_storage="OracleKVStorage",
- vector_storage="OracleVectorDBStorage",
- addon_params={
- "example_number": 1,
- "language": "Simplfied Chinese",
- "entity_types": ["organization", "person", "geo", "event"],
- "insert_batch_size": 2,
- },
- )
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
# Extract and Insert into LightRAG storage
with open(WORKING_DIR + "/docs.txt", "r", encoding="utf-8") as f:
diff --git a/examples/lightrag_siliconcloud_demo.py b/examples/lightrag_siliconcloud_demo.py
index 5f4f86a1..7a414aca 100644
--- a/examples/lightrag_siliconcloud_demo.py
+++ b/examples/lightrag_siliconcloud_demo.py
@@ -5,6 +5,7 @@ from lightrag.llm.openai import openai_complete_if_cache
from lightrag.llm.siliconcloud import siliconcloud_embedding
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -47,34 +48,56 @@ async def test_funcs():
asyncio.run(test_funcs())
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=768, max_token_size=512, func=embedding_func
- ),
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768, max_token_size=512, func=embedding_func
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
-with open("./book.txt") as f:
- rag.insert(f.read())
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_tidb_demo.py b/examples/lightrag_tidb_demo.py
index f2ee9ad8..f167e9cc 100644
--- a/examples/lightrag_tidb_demo.py
+++ b/examples/lightrag_tidb_demo.py
@@ -6,6 +6,7 @@ import numpy as np
from lightrag import LightRAG, QueryParam
from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -55,32 +56,41 @@ async def get_embedding_dim():
return embedding_dim
+async def initialize_rag():
+ # Detect embedding dimension
+ embedding_dimension = await get_embedding_dim()
+ print(f"Detected embedding dimension: {embedding_dimension}")
+
+ # Initialize LightRAG
+ # We use TiDB DB as the KV/vector
+ rag = LightRAG(
+ enable_llm_cache=False,
+ working_dir=WORKING_DIR,
+ chunk_token_size=512,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=512,
+ func=embedding_func,
+ ),
+ kv_storage="TiDBKVStorage",
+ vector_storage="TiDBVectorDBStorage",
+ graph_storage="TiDBGraphStorage",
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
async def main():
try:
- # Detect embedding dimension
- embedding_dimension = await get_embedding_dim()
- print(f"Detected embedding dimension: {embedding_dimension}")
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
- # Initialize LightRAG
- # We use TiDB DB as the KV/vector
- rag = LightRAG(
- enable_llm_cache=False,
- working_dir=WORKING_DIR,
- chunk_token_size=512,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=512,
- func=embedding_func,
- ),
- kv_storage="TiDBKVStorage",
- vector_storage="TiDBVectorDBStorage",
- graph_storage="TiDBGraphStorage",
- )
-
- # Extract and Insert into LightRAG storage
- with open("./dickens/demo.txt", "r", encoding="utf-8") as f:
- await rag.ainsert(f.read())
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
# Perform search in different modes
modes = ["naive", "local", "global", "hybrid"]
diff --git a/examples/lightrag_zhipu_demo.py b/examples/lightrag_zhipu_demo.py
index 97a5042e..fdc37c9c 100644
--- a/examples/lightrag_zhipu_demo.py
+++ b/examples/lightrag_zhipu_demo.py
@@ -1,10 +1,12 @@
import os
import logging
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
WORKING_DIR = "./dickens"
@@ -18,38 +20,61 @@ if api_key is None:
raise Exception("Please set ZHIPU_API_KEY in your environment")
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=zhipu_complete,
- llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
- llm_model_max_async=4,
- llm_model_max_token_size=32768,
- embedding_func=EmbeddingFunc(
- embedding_dim=2048, # Zhipu embedding-3 dimension
- max_token_size=8192,
- func=lambda texts: zhipu_embedding(texts),
- ),
-)
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=zhipu_complete,
+ llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
+ llm_model_max_async=4,
+ llm_model_max_token_size=32768,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=2048, # Zhipu embedding-3 dimension
+ max_token_size=8192,
+ func=lambda texts: zhipu_embedding(texts),
+ ),
+ )
-with open("./book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ return rag
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/lightrag_zhipu_postgres_demo.py b/examples/lightrag_zhipu_postgres_demo.py
index 8f40690e..304c5f2c 100644
--- a/examples/lightrag_zhipu_postgres_demo.py
+++ b/examples/lightrag_zhipu_postgres_demo.py
@@ -8,6 +8,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.zhipu import zhipu_complete
from lightrag.llm.ollama import ollama_embedding
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
load_dotenv()
ROOT_DIR = os.environ.get("ROOT_DIR")
@@ -28,7 +29,7 @@ os.environ["POSTGRES_PASSWORD"] = "rag"
os.environ["POSTGRES_DATABASE"] = "rag"
-async def main():
+async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=zhipu_complete,
@@ -50,9 +51,18 @@ async def main():
auto_manage_storages_states=False,
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+async def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
# add embedding_func for graph database, it's deleted in commit 5661d76860436f7bf5aef2e50d9ee4a59660146c
rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
- await rag.initialize_storages()
with open(f"{ROOT_DIR}/book.txt", "r", encoding="utf-8") as f:
await rag.ainsert(f.read())
diff --git a/examples/query_keyword_separation_example.py b/examples/query_keyword_separation_example.py
index f11ce8c1..cbfdd930 100644
--- a/examples/query_keyword_separation_example.py
+++ b/examples/query_keyword_separation_example.py
@@ -6,6 +6,7 @@ import numpy as np
from dotenv import load_dotenv
import logging
from openai import AzureOpenAI
+from lightrag.kg.shared_storage import initialize_pipeline_status
logging.basicConfig(level=logging.INFO)
@@ -80,24 +81,33 @@ asyncio.run(test_funcs())
embedding_dimension = 3072
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=embedding_dimension,
- max_token_size=8192,
- func=embedding_func,
- ),
-)
-book1 = open("./book_1.txt", encoding="utf-8")
-book2 = open("./book_2.txt", encoding="utf-8")
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=embedding_dimension,
+ max_token_size=8192,
+ func=embedding_func,
+ ),
+ )
-rag.insert([book1.read(), book2.read()])
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
# Example function demonstrating the new query_with_separate_keyword_extraction usage
async def run_example():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ book1 = open("./book_1.txt", encoding="utf-8")
+ book2 = open("./book_2.txt", encoding="utf-8")
+
+ rag.insert([book1.read(), book2.read()])
query = "What are the top themes in this story?"
prompt = "Please simplify the response for a young audience."
diff --git a/examples/test.py b/examples/test.py
index 67ee22eb..f2456436 100644
--- a/examples/test.py
+++ b/examples/test.py
@@ -1,6 +1,8 @@
import os
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
# import nest_asyncio
@@ -12,31 +14,55 @@ WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
- # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
-)
-with open("./dickens/book.txt", "r", encoding="utf-8") as f:
- rag.insert(f.read())
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
+ # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
+ )
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ return rag
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/test_chromadb.py b/examples/test_chromadb.py
index 99090a6d..e4e9b698 100644
--- a/examples/test_chromadb.py
+++ b/examples/test_chromadb.py
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
from lightrag.utils import EmbeddingFunc
import numpy as np
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -67,7 +68,7 @@ async def create_embedding_function_instance():
async def initialize_rag():
embedding_func_instance = await create_embedding_function_instance()
if CHROMADB_USE_LOCAL_PERSISTENT:
- return LightRAG(
+ rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -87,7 +88,7 @@ async def initialize_rag():
},
)
else:
- return LightRAG(
+ rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete,
embedding_func=embedding_func_instance,
@@ -111,29 +112,47 @@ async def initialize_rag():
},
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Run the initialization
-rag = asyncio.run(initialize_rag())
+ return rag
-# with open("./dickens/book.txt", "r", encoding="utf-8") as f:
-# rag.insert(f.read())
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/test_faiss.py b/examples/test_faiss.py
index c3ac6f47..febdce14 100644
--- a/examples/test_faiss.py
+++ b/examples/test_faiss.py
@@ -1,5 +1,6 @@
import os
import logging
+import asyncio
import numpy as np
from dotenv import load_dotenv
@@ -8,7 +9,9 @@ from sentence_transformers import SentenceTransformer
from openai import AzureOpenAI
from lightrag import LightRAG, QueryParam
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
+WORKING_DIR = "./dickens"
# Configure Logging
logging.basicConfig(level=logging.INFO)
@@ -56,10 +59,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
return embeddings
-def main():
- WORKING_DIR = "./dickens"
-
- # Initialize LightRAG with the LLM model function and embedding function
+async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
@@ -74,6 +74,15 @@ def main():
},
)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
# Insert the custom chunks into LightRAG
book1 = open("./book_1.txt", encoding="utf-8")
book2 = open("./book_2.txt", encoding="utf-8")
diff --git a/examples/test_neo4j.py b/examples/test_neo4j.py
index ac5f7fb7..7f620acc 100644
--- a/examples/test_neo4j.py
+++ b/examples/test_neo4j.py
@@ -1,7 +1,8 @@
import os
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.openai import gpt_4o_mini_complete
-
+from lightrag.kg.shared_storage import initialize_pipeline_status
#########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
@@ -14,33 +15,57 @@ WORKING_DIR = "./local_neo4jWorkDir"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
- graph_storage="Neo4JStorage",
- log_level="INFO",
- # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
-)
-with open("./book.txt") as f:
- rag.insert(f.read())
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
+ graph_storage="Neo4JStorage",
+ log_level="INFO",
+ # llm_model_func=gpt_4o_complete # Optionally, use a stronger model
+ )
-# Perform naive search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
-)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
-# Perform local search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
-)
+ return rag
-# Perform global search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
-)
-# Perform hybrid search
-print(
- rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
-)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+
+ with open("./book.txt", "r", encoding="utf-8") as f:
+ rag.insert(f.read())
+
+ # Perform naive search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
+ )
+
+ # Perform local search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
+ )
+
+ # Perform global search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="global")
+ )
+ )
+
+ # Perform hybrid search
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/examples/test_split_by_character.ipynb b/examples/test_split_by_character.ipynb
deleted file mode 100644
index df5d938d..00000000
--- a/examples/test_split_by_character.ipynb
+++ /dev/null
@@ -1,1296 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "4b5690db12e34685",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:40:58.307102Z",
- "start_time": "2025-01-09T03:40:51.935233Z"
- }
- },
- "outputs": [],
- "source": [
- "import os\n",
- "import logging\n",
- "import numpy as np\n",
- "from lightrag import LightRAG, QueryParam\n",
- "from lightrag.llm.openai import openai_complete_if_cache, openai_embed\n",
- "from lightrag.utils import EmbeddingFunc\n",
- "import nest_asyncio"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "dd17956ec322b361",
- "metadata": {},
- "source": "#### split by character"
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "8c8ee7c061bf9159",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:41:13.961167Z",
- "start_time": "2025-01-09T03:41:13.958357Z"
- }
- },
- "outputs": [],
- "source": [
- "nest_asyncio.apply()\n",
- "WORKING_DIR = \"../../llm_rag/paper_db/R000088_test1\"\n",
- "logging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.INFO)\n",
- "if not os.path.exists(WORKING_DIR):\n",
- " os.mkdir(WORKING_DIR)\n",
- "API = os.environ.get(\"DOUBAO_API_KEY\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "a5009d16e0851dca",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:41:16.862036Z",
- "start_time": "2025-01-09T03:41:16.859306Z"
- }
- },
- "outputs": [],
- "source": [
- "async def llm_model_func(\n",
- " prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs\n",
- ") -> str:\n",
- " return await openai_complete_if_cache(\n",
- " \"ep-20241218114828-2tlww\",\n",
- " prompt,\n",
- " system_prompt=system_prompt,\n",
- " history_messages=history_messages,\n",
- " api_key=API,\n",
- " base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n",
- " **kwargs,\n",
- " )\n",
- "\n",
- "\n",
- "async def embedding_func(texts: list[str]) -> np.ndarray:\n",
- " return await openai_embed(\n",
- " texts,\n",
- " model=\"ep-20241231173413-pgjmk\",\n",
- " api_key=API,\n",
- " base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n",
- " )"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "397fcad24ce4d0ed",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:41:24.950307Z",
- "start_time": "2025-01-09T03:41:24.940353Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO:lightrag:Logger initialized for working directory: ../../llm_rag/paper_db/R000088_test1\n",
- "INFO:lightrag:Load KV llm_response_cache with 0 data\n",
- "INFO:lightrag:Load KV full_docs with 0 data\n",
- "INFO:lightrag:Load KV text_chunks with 0 data\n",
- "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test1/vdb_entities.json'} 0 data\n",
- "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test1/vdb_relationships.json'} 0 data\n",
- "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test1/vdb_chunks.json'} 0 data\n",
- "INFO:lightrag:Loaded document status storage with 0 records\n"
- ]
- }
- ],
- "source": [
- "rag = LightRAG(\n",
- " working_dir=WORKING_DIR,\n",
- " llm_model_func=llm_model_func,\n",
- " embedding_func=EmbeddingFunc(\n",
- " embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
- " ),\n",
- " chunk_token_size=512,\n",
- ")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "1dc3603677f7484d",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:41:37.947456Z",
- "start_time": "2025-01-09T03:41:37.941901Z"
- }
- },
- "outputs": [],
- "source": [
- "with open(\n",
- " \"../../llm_rag/example/R000088/auto/R000088_full_txt.md\", \"r\", encoding=\"utf-8\"\n",
- ") as f:\n",
- " content = f.read()\n",
- "\n",
- "\n",
- "async def embedding_func(texts: list[str]) -> np.ndarray:\n",
- " return await openai_embed(\n",
- " texts,\n",
- " model=\"ep-20241231173413-pgjmk\",\n",
- " api_key=API,\n",
- " base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n",
- " )\n",
- "\n",
- "\n",
- "async def get_embedding_dim():\n",
- " test_text = [\"This is a test sentence.\"]\n",
- " embedding = await embedding_func(test_text)\n",
- " embedding_dim = embedding.shape[1]\n",
- " return embedding_dim"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "id": "6844202606acfbe5",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:41:39.608541Z",
- "start_time": "2025-01-09T03:41:39.165057Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n"
- ]
- }
- ],
- "source": [
- "embedding_dimension = await get_embedding_dim()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "id": "d6273839d9681403",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:44:34.295345Z",
- "start_time": "2025-01-09T03:41:48.324171Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO:lightrag:Processing 1 new unique documents\n",
- "Processing batch 1: 0%| | 0/1 [00:00, ?it/s]INFO:lightrag:Inserting 35 vectors to chunks\n",
- "\n",
- "Generating embeddings: 0%| | 0/2 [00:00, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 50%|█████ | 1/2 [00:00<00:00, 1.36batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 100%|██████████| 2/2 [00:04<00:00, 2.25s/batch]\u001b[A\n",
- "\n",
- "Extracting entities from chunks: 0%| | 0/35 [00:00, ?chunk/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠙ Processed 1 chunks, 1 entities(duplicated), 0 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 3%|▎ | 1/35 [00:04<02:47, 4.93s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠹ Processed 2 chunks, 2 entities(duplicated), 0 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 6%|▌ | 2/35 [00:05<01:18, 2.37s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠸ Processed 3 chunks, 9 entities(duplicated), 5 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 9%|▊ | 3/35 [00:26<05:43, 10.73s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠼ Processed 4 chunks, 16 entities(duplicated), 11 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 11%|█▏ | 4/35 [00:26<03:24, 6.60s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠴ Processed 5 chunks, 24 entities(duplicated), 18 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 14%|█▍ | 5/35 [00:33<03:24, 6.82s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠦ Processed 6 chunks, 35 entities(duplicated), 28 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 17%|█▋ | 6/35 [00:42<03:38, 7.53s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠧ Processed 7 chunks, 47 entities(duplicated), 36 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 20%|██ | 7/35 [00:43<02:28, 5.31s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠇ Processed 8 chunks, 61 entities(duplicated), 49 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 23%|██▎ | 8/35 [00:45<01:52, 4.16s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠏ Processed 9 chunks, 81 entities(duplicated), 49 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠋ Processed 10 chunks, 90 entities(duplicated), 62 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 29%|██▊ | 10/35 [00:46<01:06, 2.64s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠙ Processed 11 chunks, 101 entities(duplicated), 80 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 31%|███▏ | 11/35 [00:52<01:19, 3.31s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠹ Processed 12 chunks, 108 entities(duplicated), 85 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 34%|███▍ | 12/35 [00:54<01:11, 3.12s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠸ Processed 13 chunks, 120 entities(duplicated), 100 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 37%|███▋ | 13/35 [00:59<01:18, 3.55s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠼ Processed 14 chunks, 131 entities(duplicated), 110 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 40%|████ | 14/35 [01:00<00:59, 2.82s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠴ Processed 15 chunks, 143 entities(duplicated), 110 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 43%|████▎ | 15/35 [01:02<00:52, 2.64s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠦ Processed 16 chunks, 162 entities(duplicated), 124 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 46%|████▌ | 16/35 [01:05<00:53, 2.80s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠧ Processed 17 chunks, 174 entities(duplicated), 132 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 49%|████▊ | 17/35 [01:06<00:39, 2.19s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠇ Processed 18 chunks, 185 entities(duplicated), 137 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 51%|█████▏ | 18/35 [01:12<00:53, 3.15s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠏ Processed 19 chunks, 193 entities(duplicated), 149 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 54%|█████▍ | 19/35 [01:18<01:06, 4.14s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠋ Processed 20 chunks, 205 entities(duplicated), 158 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 57%|█████▋ | 20/35 [01:19<00:50, 3.35s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠙ Processed 21 chunks, 220 entities(duplicated), 187 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 60%|██████ | 21/35 [01:27<01:02, 4.47s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠹ Processed 22 chunks, 247 entities(duplicated), 216 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 63%|██████▎ | 22/35 [01:30<00:54, 4.16s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠸ Processed 23 chunks, 260 entities(duplicated), 230 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 66%|██████▌ | 23/35 [01:34<00:48, 4.05s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠼ Processed 24 chunks, 291 entities(duplicated), 253 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 69%|██████▊ | 24/35 [01:38<00:44, 4.03s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠴ Processed 25 chunks, 304 entities(duplicated), 262 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 71%|███████▏ | 25/35 [01:41<00:36, 3.67s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠦ Processed 26 chunks, 313 entities(duplicated), 271 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 74%|███████▍ | 26/35 [01:41<00:24, 2.76s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠧ Processed 27 chunks, 321 entities(duplicated), 283 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 77%|███████▋ | 27/35 [01:47<00:28, 3.52s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠇ Processed 28 chunks, 333 entities(duplicated), 290 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 80%|████████ | 28/35 [01:52<00:28, 4.08s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠏ Processed 29 chunks, 348 entities(duplicated), 307 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 83%|████████▎ | 29/35 [01:59<00:29, 4.88s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠋ Processed 30 chunks, 362 entities(duplicated), 329 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 86%|████████▌ | 30/35 [02:02<00:21, 4.29s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠙ Processed 31 chunks, 373 entities(duplicated), 337 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 89%|████████▊ | 31/35 [02:03<00:13, 3.28s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠹ Processed 32 chunks, 390 entities(duplicated), 369 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 91%|█████████▏| 32/35 [02:03<00:07, 2.55s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠸ Processed 33 chunks, 405 entities(duplicated), 378 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 94%|█████████▍| 33/35 [02:07<00:05, 2.84s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠼ Processed 34 chunks, 435 entities(duplicated), 395 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 97%|█████████▋| 34/35 [02:10<00:02, 2.94s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠴ Processed 35 chunks, 456 entities(duplicated), 440 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 100%|██████████| 35/35 [02:23<00:00, 4.10s/chunk]\u001b[A\n",
- "INFO:lightrag:Inserting entities into storage...\n",
- "\n",
- "Inserting entities: 100%|██████████| 324/324 [00:00<00:00, 17456.96entity/s]\n",
- "INFO:lightrag:Inserting relationships into storage...\n",
- "\n",
- "Inserting relationships: 100%|██████████| 427/427 [00:00<00:00, 29956.31relationship/s]\n",
- "INFO:lightrag:Inserting 324 vectors to entities\n",
- "\n",
- "Generating embeddings: 0%| | 0/11 [00:00, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 9%|▉ | 1/11 [00:00<00:06, 1.48batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 18%|█▊ | 2/11 [00:02<00:11, 1.25s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 27%|██▋ | 3/11 [00:02<00:06, 1.17batch/s]\u001b[A\n",
- "Generating embeddings: 36%|███▋ | 4/11 [00:03<00:04, 1.50batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 45%|████▌ | 5/11 [00:03<00:03, 1.78batch/s]\u001b[A\n",
- "Generating embeddings: 55%|█████▍ | 6/11 [00:03<00:02, 2.01batch/s]\u001b[A\n",
- "Generating embeddings: 64%|██████▎ | 7/11 [00:04<00:01, 2.19batch/s]\u001b[A\n",
- "Generating embeddings: 73%|███████▎ | 8/11 [00:04<00:01, 2.31batch/s]\u001b[A\n",
- "Generating embeddings: 82%|████████▏ | 9/11 [00:04<00:00, 2.41batch/s]\u001b[A\n",
- "Generating embeddings: 91%|█████████ | 10/11 [00:05<00:00, 2.48batch/s]\u001b[A\n",
- "Generating embeddings: 100%|██████████| 11/11 [00:05<00:00, 1.91batch/s]\u001b[A\n",
- "INFO:lightrag:Inserting 427 vectors to relationships\n",
- "\n",
- "Generating embeddings: 0%| | 0/14 [00:00, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 7%|▋ | 1/14 [00:01<00:14, 1.11s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 14%|█▍ | 2/14 [00:02<00:14, 1.18s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 21%|██▏ | 3/14 [00:02<00:08, 1.23batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 29%|██▊ | 4/14 [00:03<00:06, 1.56batch/s]\u001b[A\n",
- "Generating embeddings: 36%|███▌ | 5/14 [00:03<00:04, 1.85batch/s]\u001b[A\n",
- "Generating embeddings: 43%|████▎ | 6/14 [00:03<00:03, 2.05batch/s]\u001b[A\n",
- "Generating embeddings: 50%|█████ | 7/14 [00:04<00:03, 2.23batch/s]\u001b[A\n",
- "Generating embeddings: 57%|█████▋ | 8/14 [00:04<00:02, 2.37batch/s]\u001b[A\n",
- "Generating embeddings: 64%|██████▍ | 9/14 [00:04<00:02, 2.46batch/s]\u001b[A\n",
- "Generating embeddings: 71%|███████▏ | 10/14 [00:05<00:01, 2.54batch/s]\u001b[A\n",
- "Generating embeddings: 79%|███████▊ | 11/14 [00:05<00:01, 2.59batch/s]\u001b[A\n",
- "Generating embeddings: 86%|████████▌ | 12/14 [00:06<00:00, 2.64batch/s]\u001b[A\n",
- "Generating embeddings: 93%|█████████▎| 13/14 [00:06<00:00, 2.65batch/s]\u001b[A\n",
- "Generating embeddings: 100%|██████████| 14/14 [00:06<00:00, 2.05batch/s]\u001b[A\n",
- "INFO:lightrag:Writing graph with 333 nodes, 427 edges\n",
- "Processing batch 1: 100%|██████████| 1/1 [02:45<00:00, 165.90s/it]\n"
- ]
- }
- ],
- "source": [
- "# rag.insert(content)\n",
- "rag.insert(content, split_by_character=\"\\n#\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "id": "c4f9ae517151a01d",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:45:11.668987Z",
- "start_time": "2025-01-09T03:45:11.664744Z"
- }
- },
- "outputs": [],
- "source": [
- "prompt1 = \"\"\"你是一名经验丰富的论文分析科学家,你的任务是对一篇英文学术研究论文进行关键信息提取并深入分析。\n",
- "请按照以下步骤进行分析:\n",
- "1. 该文献主要研究的问题是什么?\n",
- "2. 该文献采用什么方法进行分析?\n",
- "3. 该文献的主要结论是什么?\n",
- "首先在<分析>标签中,针对每个问题详细分析你的思考过程。然后在<回答>标签中给出所有问题的最终答案。\"\"\""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "7a6491385b050095",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:45:40.829111Z",
- "start_time": "2025-01-09T03:45:13.530298Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:lightrag:Local query uses 5 entites, 12 relations, 3 text units\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:lightrag:Global query uses 8 entites, 5 relations, 4 text units\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "<分析>\n",
- "1. **该文献主要研究的问题是什么?**\n",
- " - 思考过程:通过浏览论文内容,查找作者明确阐述研究目的的部分。文中多处提及“Our study was performed to explore whether folic acid treatment was associated with cancer outcomes and all-cause mortality after extended follow-up”,表明作者旨在探究叶酸治疗与癌症结局及全因死亡率之间的关系,尤其是在经过长期随访后。\n",
- "2. **该文献采用什么方法进行分析?**\n",
- " - 思考过程:寻找描述研究方法和数据分析过程的段落。文中提到“Survival curves were constructed using the Kaplan-Meier method and differences in survival between groups were analyzed using the log-rank test. Estimates of hazard ratios (HRs) with 95% CIs were obtained by using Cox proportional hazards regression models stratified by trial”,可以看出作者使用了Kaplan-Meier法构建生存曲线、log-rank检验分析组间生存差异以及Cox比例风险回归模型估计风险比等方法。\n",
- "3. **该文献的主要结论是什么?**\n",
- " - 思考过程:定位到论文中总结结论的部分,如“Conclusion Treatment with folic acid plus vitamin $\\mathsf{B}_{12}$ was associated with increased cancer outcomes and all-cause mortality in patients with ischemic heart disease in Norway, where there is no folic acid fortification of foods”,可知作者得出叶酸加维生素$\\mathsf{B}_{12}$治疗与癌症结局和全因死亡率增加有关的结论。\n",
- "<回答>\n",
- "1. 该文献主要研究的问题是:叶酸治疗与癌症结局及全因死亡率之间的关系,尤其是在经过长期随访后,叶酸治疗是否与癌症结局和全因死亡率相关。\n",
- "2. 该文献采用的分析方法包括:使用Kaplan-Meier法构建生存曲线、log-rank检验分析组间生存差异、Cox比例风险回归模型估计风险比等。\n",
- "3. 该文献的主要结论是:在挪威没有叶酸强化食品的情况下,叶酸加维生素$\\mathsf{B}_{12}$治疗与缺血性心脏病患者的癌症结局和全因死亡率增加有关。\n",
- "\n",
- "**参考文献**\n",
- "- [VD] In2Norwegianhomocysteine-lowering trialsamongpatientswithischemicheart disease, there was a statistically nonsignificantincreaseincancerincidenceinthe groupsassignedtofolicacidtreatment.15,16 Our study was performed to explore whetherfolicacidtreatmentwasassociatedwithcanceroutcomesandall-cause mortality after extended follow-up.\n",
- "- [VD] Survivalcurveswereconstructedusing theKaplan-Meiermethodanddifferences insurvivalbetweengroupswereanalyzed usingthelog-ranktest.Estimatesofhazard ratios (HRs) with $95\\%$ CIs were obtainedbyusingCoxproportionalhazards regressionmodelsstratifiedbytrial.\n",
- "- [VD] Conclusion Treatment with folic acid plus vitamin $\\mathsf{B}_{12}$ was associated with increased cancer outcomes and all-cause mortality in patients with ischemic heart disease in Norway, where there is no folic acid fortification of foods.\n"
- ]
- }
- ],
- "source": [
- "resp = rag.query(prompt1, param=QueryParam(mode=\"mix\", top_k=5))\n",
- "print(resp)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4e5bfad24cb721a8",
- "metadata": {},
- "source": "#### split by character only"
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "id": "44e2992dc95f8ce0",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:47:40.988796Z",
- "start_time": "2025-01-09T03:47:40.982648Z"
- }
- },
- "outputs": [],
- "source": [
- "WORKING_DIR = \"../../llm_rag/paper_db/R000088_test2\"\n",
- "if not os.path.exists(WORKING_DIR):\n",
- " os.mkdir(WORKING_DIR)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "id": "62c63385d2d973d5",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:51:39.951329Z",
- "start_time": "2025-01-09T03:49:15.218976Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO:lightrag:Logger initialized for working directory: ../../llm_rag/paper_db/R000088_test2\n",
- "INFO:lightrag:Load KV llm_response_cache with 0 data\n",
- "INFO:lightrag:Load KV full_docs with 0 data\n",
- "INFO:lightrag:Load KV text_chunks with 0 data\n",
- "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test2/vdb_entities.json'} 0 data\n",
- "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test2/vdb_relationships.json'} 0 data\n",
- "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test2/vdb_chunks.json'} 0 data\n",
- "INFO:lightrag:Loaded document status storage with 0 records\n",
- "INFO:lightrag:Processing 1 new unique documents\n",
- "Processing batch 1: 0%| | 0/1 [00:00, ?it/s]INFO:lightrag:Inserting 12 vectors to chunks\n",
- "\n",
- "Generating embeddings: 0%| | 0/1 [00:00, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 100%|██████████| 1/1 [00:02<00:00, 2.95s/batch]\u001b[A\n",
- "\n",
- "Extracting entities from chunks: 0%| | 0/12 [00:00, ?chunk/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠙ Processed 1 chunks, 0 entities(duplicated), 0 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 8%|▊ | 1/12 [00:03<00:43, 3.93s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠹ Processed 2 chunks, 8 entities(duplicated), 8 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 17%|█▋ | 2/12 [00:29<02:44, 16.46s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠸ Processed 3 chunks, 17 entities(duplicated), 15 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 25%|██▌ | 3/12 [00:30<01:25, 9.45s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠼ Processed 4 chunks, 27 entities(duplicated), 22 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 33%|███▎ | 4/12 [00:39<01:16, 9.52s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠴ Processed 5 chunks, 36 entities(duplicated), 33 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 42%|████▏ | 5/12 [00:40<00:43, 6.24s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠦ Processed 6 chunks, 49 entities(duplicated), 42 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 50%|█████ | 6/12 [00:49<00:43, 7.33s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠧ Processed 7 chunks, 62 entities(duplicated), 65 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 58%|█████▊ | 7/12 [01:05<00:50, 10.05s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠇ Processed 8 chunks, 81 entities(duplicated), 90 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 67%|██████▋ | 8/12 [01:23<00:50, 12.69s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠏ Processed 9 chunks, 99 entities(duplicated), 117 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 75%|███████▌ | 9/12 [01:32<00:34, 11.54s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠋ Processed 10 chunks, 123 entities(duplicated), 140 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 83%|████████▎ | 10/12 [01:48<00:25, 12.79s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠙ Processed 11 chunks, 158 entities(duplicated), 174 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 92%|█████████▏| 11/12 [02:03<00:13, 13.50s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "⠹ Processed 12 chunks, 194 entities(duplicated), 221 relations(duplicated)\r"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n",
- "Extracting entities from chunks: 100%|██████████| 12/12 [02:13<00:00, 11.15s/chunk]\u001b[A\n",
- "INFO:lightrag:Inserting entities into storage...\n",
- "\n",
- "Inserting entities: 100%|██████████| 170/170 [00:00<00:00, 11610.25entity/s]\n",
- "INFO:lightrag:Inserting relationships into storage...\n",
- "\n",
- "Inserting relationships: 100%|██████████| 218/218 [00:00<00:00, 15913.51relationship/s]\n",
- "INFO:lightrag:Inserting 170 vectors to entities\n",
- "\n",
- "Generating embeddings: 0%| | 0/6 [00:00, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 17%|█▋ | 1/6 [00:01<00:05, 1.10s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 33%|███▎ | 2/6 [00:02<00:04, 1.07s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 50%|█████ | 3/6 [00:02<00:02, 1.33batch/s]\u001b[A\n",
- "Generating embeddings: 67%|██████▋ | 4/6 [00:02<00:01, 1.67batch/s]\u001b[A\n",
- "Generating embeddings: 83%|████████▎ | 5/6 [00:03<00:00, 1.95batch/s]\u001b[A\n",
- "Generating embeddings: 100%|██████████| 6/6 [00:03<00:00, 1.66batch/s]\u001b[A\n",
- "INFO:lightrag:Inserting 218 vectors to relationships\n",
- "\n",
- "Generating embeddings: 0%| | 0/7 [00:00, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 14%|█▍ | 1/7 [00:01<00:10, 1.74s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 29%|██▊ | 2/7 [00:02<00:05, 1.04s/batch]\u001b[A\n",
- "Generating embeddings: 43%|████▎ | 3/7 [00:02<00:02, 1.35batch/s]\u001b[A\n",
- "Generating embeddings: 57%|█████▋ | 4/7 [00:03<00:01, 1.69batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "\n",
- "Generating embeddings: 71%|███████▏ | 5/7 [00:03<00:01, 1.96batch/s]\u001b[A\n",
- "Generating embeddings: 86%|████████▌ | 6/7 [00:03<00:00, 2.17batch/s]\u001b[A\n",
- "Generating embeddings: 100%|██████████| 7/7 [00:04<00:00, 1.68batch/s]\u001b[A\n",
- "INFO:lightrag:Writing graph with 174 nodes, 218 edges\n",
- "Processing batch 1: 100%|██████████| 1/1 [02:24<00:00, 144.69s/it]\n"
- ]
- }
- ],
- "source": [
- "rag = LightRAG(\n",
- " working_dir=WORKING_DIR,\n",
- " llm_model_func=llm_model_func,\n",
- " embedding_func=EmbeddingFunc(\n",
- " embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
- " ),\n",
- " chunk_token_size=512,\n",
- ")\n",
- "\n",
- "# rag.insert(content)\n",
- "rag.insert(content, split_by_character=\"\\n#\", split_by_character_only=True)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "id": "3c7aa9836d8d43c7",
- "metadata": {
- "ExecuteTime": {
- "end_time": "2025-01-09T03:52:37.000418Z",
- "start_time": "2025-01-09T03:52:09.933584Z"
- }
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:lightrag:Local query uses 5 entites, 3 relations, 2 text units\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
- "INFO:lightrag:Global query uses 9 entites, 5 relations, 4 text units\n",
- "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "<分析>\n",
- "- **该文献主要研究的问题是什么?**\n",
- " - **思考过程**:通过浏览论文的标题、摘要、引言等部分,寻找关于研究目的和问题的描述。论文标题为“Cancer Incidence and Mortality After Treatment With Folic Acid and Vitamin B12”,摘要中的“Objective”部分明确指出研究目的是“To evaluate effects of treatment with B vitamins on cancer outcomes and all-cause mortality in 2 randomized controlled trials”。因此,可以确定该文献主要研究的问题是评估B族维生素治疗对两项随机对照试验中癌症结局和全因死亡率的影响。\n",
- "- **该文献采用什么方法进行分析?**\n",
- " - **思考过程**:在论文的“METHODS”部分详细描述了研究方法。文中提到这是一个对两项随机、双盲、安慰剂对照临床试验(Norwegian Vitamin [NORVIT] trial和Western Norway B Vitamin Intervention Trial [WENBIT])数据的联合分析,并进行了观察性的试验后随访。具体包括对参与者进行分组干预(不同剂量的叶酸、维生素B12、维生素B6或安慰剂),收集临床信息和血样,分析循环B族维生素、同型半胱氨酸和可替宁等指标,并进行基因分型等,还涉及到多种统计分析方法,如计算预期癌症发生率、构建生存曲线、进行Cox比例风险回归模型分析等。\n",
- "- **该文献的主要结论是什么?**\n",
- " - **思考过程**:在论文的“Results”和“Conclusion”部分寻找主要结论。研究结果表明,在治疗期间,接受叶酸加维生素B12治疗的参与者血清叶酸浓度显著增加,且在后续随访中,该组癌症发病率、癌症死亡率和全因死亡率均有所上升,主要是肺癌发病率增加,而维生素B6治疗未显示出显著影响。结论部分明确指出“Treatment with folic acid plus vitamin $\\mathsf{B}_{12}$ was associated with increased cancer outcomes and all-cause mortality in patients with ischemic heart disease in Norway, where there is no folic acid fortification of foods”。\n",
- "分析>\n",
- "\n",
- "<回答>\n",
- "- **主要研究问题**:评估B族维生素治疗对两项随机对照试验中癌症结局和全因死亡率的影响。\n",
- "- **研究方法**:采用对两项随机、双盲、安慰剂对照临床试验(Norwegian Vitamin [NORVIT] trial和Western Norway B Vitamin Intervention Trial [WENBIT])数据的联合分析,并进行观察性的试验后随访,涉及分组干预、多种指标检测以及多种统计分析方法。\n",
- "- **主要结论**:在挪威(食品中未添加叶酸),对于缺血性心脏病患者,叶酸加维生素B12治疗与癌症结局和全因死亡率的增加有关,而维生素B6治疗未显示出显著影响。\n",
- "\n",
- "**参考文献**\n",
- "- [VD] Cancer Incidence and Mortality After Treatment With Folic Acid and Vitamin B12\n",
- "- [VD] METHODS Study Design, Participants, and Study Intervention\n",
- "- [VD] RESULTS\n",
- "- [VD] Conclusion\n",
- "- [VD] Objective To evaluate effects of treatment with B vitamins on cancer outcomes and all-cause mortality in 2 randomized controlled trials.\n"
- ]
- }
- ],
- "source": [
- "resp = rag.query(prompt1, param=QueryParam(mode=\"mix\", top_k=5))\n",
- "print(resp)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "7ba6fa79a2550d10",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 2
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython2",
- "version": "2.7.6"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/examples/vram_management_demo.py b/examples/vram_management_demo.py
index b8d0872e..36eb5468 100644
--- a/examples/vram_management_demo.py
+++ b/examples/vram_management_demo.py
@@ -1,8 +1,10 @@
import os
import time
+import asyncio
from lightrag import LightRAG, QueryParam
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
from lightrag.utils import EmbeddingFunc
+from lightrag.kg.shared_storage import initialize_pipeline_status
# Working directory and the directory path for text files
WORKING_DIR = "./dickens"
@@ -12,17 +14,24 @@ TEXT_FILES_DIR = "/llm/mt"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-# Initialize LightRAG
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=ollama_model_complete,
- llm_model_name="qwen2.5:3b-instruct-max-context",
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- max_token_size=8192,
- func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
- ),
-)
+
+async def initialize_rag():
+ # Initialize LightRAG
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=ollama_model_complete,
+ llm_model_name="qwen2.5:3b-instruct-max-context",
+ embedding_func=EmbeddingFunc(
+ embedding_dim=768,
+ max_token_size=8192,
+ func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
+ ),
+ )
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
# Read all .txt files from the TEXT_FILES_DIR directory
texts = []
@@ -47,58 +56,66 @@ def insert_texts_with_retry(rag, texts, retries=3, delay=5):
raise RuntimeError("Failed to insert texts after multiple retries.")
-insert_texts_with_retry(rag, texts)
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
-# Perform different types of queries and handle potential errors
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="naive")
+ insert_texts_with_retry(rag, texts)
+
+ # Perform different types of queries and handle potential errors
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="naive")
+ )
)
- )
-except Exception as e:
- print(f"Error performing naive search: {e}")
+ except Exception as e:
+ print(f"Error performing naive search: {e}")
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="local")
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?", param=QueryParam(mode="local")
+ )
)
- )
-except Exception as e:
- print(f"Error performing local search: {e}")
+ except Exception as e:
+ print(f"Error performing local search: {e}")
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="global")
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="global"),
+ )
)
- )
-except Exception as e:
- print(f"Error performing global search: {e}")
+ except Exception as e:
+ print(f"Error performing global search: {e}")
-try:
- print(
- rag.query(
- "What are the top themes in this story?", param=QueryParam(mode="hybrid")
+ try:
+ print(
+ rag.query(
+ "What are the top themes in this story?",
+ param=QueryParam(mode="hybrid"),
+ )
)
- )
-except Exception as e:
- print(f"Error performing hybrid search: {e}")
+ except Exception as e:
+ print(f"Error performing hybrid search: {e}")
+
+ # Function to clear VRAM resources
+ def clear_vram():
+ os.system("sudo nvidia-smi --gpu-reset")
+
+ # Regularly clear VRAM to prevent overflow
+ clear_vram_interval = 3600 # Clear once every hour
+ start_time = time.time()
+
+ while True:
+ current_time = time.time()
+ if current_time - start_time > clear_vram_interval:
+ clear_vram()
+ start_time = current_time
+ time.sleep(60) # Check the time every minute
-# Function to clear VRAM resources
-def clear_vram():
- os.system("sudo nvidia-smi --gpu-reset")
-
-
-# Regularly clear VRAM to prevent overflow
-clear_vram_interval = 3600 # Clear once every hour
-start_time = time.time()
-
-while True:
- current_time = time.time()
- if current_time - start_time > clear_vram_interval:
- clear_vram()
- start_time = current_time
- time.sleep(60) # Check the time every minute
+if __name__ == "__main__":
+ main()
diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py
index adcb1029..4dacac08 100644
--- a/lightrag/lightrag.py
+++ b/lightrag/lightrag.py
@@ -392,6 +392,7 @@ class LightRAG:
namespace=make_namespace(
self.namespace_prefix, NameSpace.KV_STORE_LLM_RESPONSE_CACHE
),
+ global_config=asdict(self),
embedding_func=self.embedding_func,
)
@@ -949,17 +950,21 @@ class LightRAG:
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
- def insert_custom_kg(self, custom_kg: dict[str, Any]) -> None:
+ def insert_custom_kg(
+ self, custom_kg: dict[str, Any], full_doc_id: str = None
+ ) -> None:
loop = always_get_an_event_loop()
- loop.run_until_complete(self.ainsert_custom_kg(custom_kg))
+ loop.run_until_complete(self.ainsert_custom_kg(custom_kg, full_doc_id))
- async def ainsert_custom_kg(self, custom_kg: dict[str, Any]) -> None:
+ async def ainsert_custom_kg(
+ self, custom_kg: dict[str, Any], full_doc_id: str = None
+ ) -> None:
update_storage = False
try:
# Insert chunks into vector storage
all_chunks_data: dict[str, dict[str, str]] = {}
chunk_to_source_map: dict[str, str] = {}
- for chunk_data in custom_kg.get("chunks", {}):
+ for chunk_data in custom_kg.get("chunks", []):
chunk_content = self.clean_text(chunk_data["content"])
source_id = chunk_data["source_id"]
tokens = len(
@@ -979,7 +984,9 @@ class LightRAG:
"source_id": source_id,
"tokens": tokens,
"chunk_order_index": chunk_order_index,
- "full_doc_id": source_id,
+ "full_doc_id": full_doc_id
+ if full_doc_id is not None
+ else source_id,
"status": DocStatus.PROCESSED,
}
all_chunks_data[chunk_id] = chunk_entry
@@ -987,9 +994,10 @@ class LightRAG:
update_storage = True
if all_chunks_data:
- await self.chunks_vdb.upsert(all_chunks_data)
- if all_chunks_data:
- await self.text_chunks.upsert(all_chunks_data)
+ await asyncio.gather(
+ self.chunks_vdb.upsert(all_chunks_data),
+ self.text_chunks.upsert(all_chunks_data),
+ )
# Insert entities into knowledge graph
all_entities_data: list[dict[str, str]] = []
@@ -997,7 +1005,6 @@ class LightRAG:
entity_name = entity_data["entity_name"]
entity_type = entity_data.get("entity_type", "UNKNOWN")
description = entity_data.get("description", "No description provided")
- # source_id = entity_data["source_id"]
source_chunk_id = entity_data.get("source_id", "UNKNOWN")
source_id = chunk_to_source_map.get(source_chunk_id, "UNKNOWN")
@@ -1029,7 +1036,6 @@ class LightRAG:
description = relationship_data["description"]
keywords = relationship_data["keywords"]
weight = relationship_data.get("weight", 1.0)
- # source_id = relationship_data["source_id"]
source_chunk_id = relationship_data.get("source_id", "UNKNOWN")
source_id = chunk_to_source_map.get(source_chunk_id, "UNKNOWN")
@@ -1069,34 +1075,43 @@ class LightRAG:
"tgt_id": tgt_id,
"description": description,
"keywords": keywords,
+ "source_id": source_id,
+ "weight": weight,
}
all_relationships_data.append(edge_data)
update_storage = True
- # Insert entities into vector storage if needed
+ # Insert entities into vector storage with consistent format
data_for_vdb = {
compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
- "content": dp["entity_name"] + dp["description"],
+ "content": dp["entity_name"] + "\n" + dp["description"],
"entity_name": dp["entity_name"],
+ "source_id": dp["source_id"],
+ "description": dp["description"],
+ "entity_type": dp["entity_type"],
}
for dp in all_entities_data
}
await self.entities_vdb.upsert(data_for_vdb)
- # Insert relationships into vector storage if needed
+ # Insert relationships into vector storage with consistent format
data_for_vdb = {
compute_mdhash_id(dp["src_id"] + dp["tgt_id"], prefix="rel-"): {
"src_id": dp["src_id"],
"tgt_id": dp["tgt_id"],
- "content": dp["keywords"]
- + dp["src_id"]
- + dp["tgt_id"]
- + dp["description"],
+ "source_id": dp["source_id"],
+ "content": f"{dp['keywords']}\t{dp['src_id']}\n{dp['tgt_id']}\n{dp['description']}",
+ "keywords": dp["keywords"],
+ "description": dp["description"],
+ "weight": dp["weight"],
}
for dp in all_relationships_data
}
await self.relationships_vdb.upsert(data_for_vdb)
+ except Exception as e:
+ logger.error(f"Error in ainsert_custom_kg: {e}")
+ raise
finally:
if update_storage:
await self._insert_done()
@@ -1412,17 +1427,19 @@ class LightRAG:
# 3. Before deleting, check the related entities and relationships for these chunks
for chunk_id in chunk_ids:
# Check entities
+ entities_storage = await self.entities_vdb.client_storage
entities = [
dp
- for dp in self.entities_vdb.client_storage["data"]
+ for dp in entities_storage["data"]
if chunk_id in dp.get("source_id")
]
logger.debug(f"Chunk {chunk_id} has {len(entities)} related entities")
# Check relationships
+ relationships_storage = await self.relationships_vdb.client_storage
relations = [
dp
- for dp in self.relationships_vdb.client_storage["data"]
+ for dp in relationships_storage["data"]
if chunk_id in dp.get("source_id")
]
logger.debug(f"Chunk {chunk_id} has {len(relations)} related relations")
@@ -1486,7 +1503,9 @@ class LightRAG:
for entity in entities_to_delete:
await self.entities_vdb.delete_entity(entity)
logger.debug(f"Deleted entity {entity} from vector DB")
- self.chunk_entity_relation_graph.remove_nodes(list(entities_to_delete))
+ await self.chunk_entity_relation_graph.remove_nodes(
+ list(entities_to_delete)
+ )
logger.debug(f"Deleted {len(entities_to_delete)} entities from graph")
# Update entities
@@ -1505,7 +1524,7 @@ class LightRAG:
rel_id_1 = compute_mdhash_id(tgt + src, prefix="rel-")
await self.relationships_vdb.delete([rel_id_0, rel_id_1])
logger.debug(f"Deleted relationship {src}-{tgt} from vector DB")
- self.chunk_entity_relation_graph.remove_edges(
+ await self.chunk_entity_relation_graph.remove_edges(
list(relationships_to_delete)
)
logger.debug(
@@ -1536,9 +1555,10 @@ class LightRAG:
async def process_data(data_type, vdb, chunk_id):
# Check data (entities or relationships)
+ storage = await vdb.client_storage
data_with_chunk = [
dp
- for dp in vdb.client_storage["data"]
+ for dp in storage["data"]
if chunk_id in (dp.get("source_id") or "").split(GRAPH_FIELD_SEP)
]
@@ -1744,3 +1764,461 @@ class LightRAG:
def clear_cache(self, modes: list[str] | None = None) -> None:
"""Synchronous version of aclear_cache."""
return always_get_an_event_loop().run_until_complete(self.aclear_cache(modes))
+
+ async def aedit_entity(
+ self, entity_name: str, updated_data: dict[str, str], allow_rename: bool = True
+ ) -> dict[str, Any]:
+ """Asynchronously edit entity information.
+
+ Updates entity information in the knowledge graph and re-embeds the entity in the vector database.
+
+ Args:
+ entity_name: Name of the entity to edit
+ updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "entity_type": "new type"}
+ allow_rename: Whether to allow entity renaming, defaults to True
+
+ Returns:
+ Dictionary containing updated entity information
+ """
+ try:
+ # 1. Get current entity information
+ node_data = await self.chunk_entity_relation_graph.get_node(entity_name)
+ if not node_data:
+ raise ValueError(f"Entity '{entity_name}' does not exist")
+
+ # Check if entity is being renamed
+ new_entity_name = updated_data.get("entity_name", entity_name)
+ is_renaming = new_entity_name != entity_name
+
+ # If renaming, check if new name already exists
+ if is_renaming:
+ if not allow_rename:
+ raise ValueError(
+ "Entity renaming is not allowed. Set allow_rename=True to enable this feature"
+ )
+
+ existing_node = await self.chunk_entity_relation_graph.get_node(
+ new_entity_name
+ )
+ if existing_node:
+ raise ValueError(
+ f"Entity name '{new_entity_name}' already exists, cannot rename"
+ )
+
+ # 2. Update entity information in the graph
+ new_node_data = {**node_data, **updated_data}
+ if "entity_name" in new_node_data:
+ del new_node_data[
+ "entity_name"
+ ] # Node data should not contain entity_name field
+
+ # If renaming entity
+ if is_renaming:
+ logger.info(f"Renaming entity '{entity_name}' to '{new_entity_name}'")
+
+ # Create new entity
+ await self.chunk_entity_relation_graph.upsert_node(
+ new_entity_name, new_node_data
+ )
+
+ # Get all edges related to the original entity
+ edges = await self.chunk_entity_relation_graph.get_node_edges(
+ entity_name
+ )
+ if edges:
+ # Recreate edges for the new entity
+ for source, target in edges:
+ edge_data = await self.chunk_entity_relation_graph.get_edge(
+ source, target
+ )
+ if edge_data:
+ if source == entity_name:
+ await self.chunk_entity_relation_graph.upsert_edge(
+ new_entity_name, target, edge_data
+ )
+ else: # target == entity_name
+ await self.chunk_entity_relation_graph.upsert_edge(
+ source, new_entity_name, edge_data
+ )
+
+ # Delete old entity
+ await self.chunk_entity_relation_graph.delete_node(entity_name)
+
+ # Delete old entity record from vector database
+ old_entity_id = compute_mdhash_id(entity_name, prefix="ent-")
+ await self.entities_vdb.delete([old_entity_id])
+
+ # Update working entity name to new name
+ entity_name = new_entity_name
+ else:
+ # If not renaming, directly update node data
+ await self.chunk_entity_relation_graph.upsert_node(
+ entity_name, new_node_data
+ )
+
+ # 3. Recalculate entity's vector representation and update vector database
+ description = new_node_data.get("description", "")
+ source_id = new_node_data.get("source_id", "")
+ entity_type = new_node_data.get("entity_type", "")
+ content = entity_name + "\n" + description
+
+ # Calculate entity ID
+ entity_id = compute_mdhash_id(entity_name, prefix="ent-")
+
+ # Prepare data for vector database update
+ entity_data = {
+ entity_id: {
+ "content": content,
+ "entity_name": entity_name,
+ "source_id": source_id,
+ "description": description,
+ "entity_type": entity_type,
+ }
+ }
+
+ # Update vector database
+ await self.entities_vdb.upsert(entity_data)
+
+ # 4. Save changes
+ await self._edit_entity_done()
+
+ logger.info(f"Entity '{entity_name}' successfully updated")
+ return await self.get_entity_info(entity_name, include_vector_data=True)
+ except Exception as e:
+ logger.error(f"Error while editing entity '{entity_name}': {e}")
+ raise
+
+ def edit_entity(
+ self, entity_name: str, updated_data: dict[str, str], allow_rename: bool = True
+ ) -> dict[str, Any]:
+ """Synchronously edit entity information.
+
+ Updates entity information in the knowledge graph and re-embeds the entity in the vector database.
+
+ Args:
+ entity_name: Name of the entity to edit
+ updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "entity_type": "new type"}
+ allow_rename: Whether to allow entity renaming, defaults to True
+
+ Returns:
+ Dictionary containing updated entity information
+ """
+ loop = always_get_an_event_loop()
+ return loop.run_until_complete(
+ self.aedit_entity(entity_name, updated_data, allow_rename)
+ )
+
+ async def _edit_entity_done(self) -> None:
+ """Callback after entity editing is complete, ensures updates are persisted"""
+ await asyncio.gather(
+ *[
+ cast(StorageNameSpace, storage_inst).index_done_callback()
+ for storage_inst in [ # type: ignore
+ self.entities_vdb,
+ self.chunk_entity_relation_graph,
+ ]
+ ]
+ )
+
+ async def aedit_relation(
+ self, source_entity: str, target_entity: str, updated_data: dict[str, Any]
+ ) -> dict[str, Any]:
+ """Asynchronously edit relation information.
+
+ Updates relation (edge) information in the knowledge graph and re-embeds the relation in the vector database.
+
+ Args:
+ source_entity: Name of the source entity
+ target_entity: Name of the target entity
+ updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "keywords": "new keywords"}
+
+ Returns:
+ Dictionary containing updated relation information
+ """
+ try:
+ # 1. Get current relation information
+ edge_data = await self.chunk_entity_relation_graph.get_edge(
+ source_entity, target_entity
+ )
+ if not edge_data:
+ raise ValueError(
+ f"Relation from '{source_entity}' to '{target_entity}' does not exist"
+ )
+
+ # 2. Update relation information in the graph
+ new_edge_data = {**edge_data, **updated_data}
+ await self.chunk_entity_relation_graph.upsert_edge(
+ source_entity, target_entity, new_edge_data
+ )
+
+ # 3. Recalculate relation's vector representation and update vector database
+ description = new_edge_data.get("description", "")
+ keywords = new_edge_data.get("keywords", "")
+ source_id = new_edge_data.get("source_id", "")
+ weight = float(new_edge_data.get("weight", 1.0))
+
+ # Create content for embedding
+ content = f"{keywords}\t{source_entity}\n{target_entity}\n{description}"
+
+ # Calculate relation ID
+ relation_id = compute_mdhash_id(
+ source_entity + target_entity, prefix="rel-"
+ )
+
+ # Prepare data for vector database update
+ relation_data = {
+ relation_id: {
+ "content": content,
+ "src_id": source_entity,
+ "tgt_id": target_entity,
+ "source_id": source_id,
+ "description": description,
+ "keywords": keywords,
+ "weight": weight,
+ }
+ }
+
+ # Update vector database
+ await self.relationships_vdb.upsert(relation_data)
+
+ # 4. Save changes
+ await self._edit_relation_done()
+
+ logger.info(
+ f"Relation from '{source_entity}' to '{target_entity}' successfully updated"
+ )
+ return await self.get_relation_info(
+ source_entity, target_entity, include_vector_data=True
+ )
+ except Exception as e:
+ logger.error(
+ f"Error while editing relation from '{source_entity}' to '{target_entity}': {e}"
+ )
+ raise
+
+ def edit_relation(
+ self, source_entity: str, target_entity: str, updated_data: dict[str, Any]
+ ) -> dict[str, Any]:
+ """Synchronously edit relation information.
+
+ Updates relation (edge) information in the knowledge graph and re-embeds the relation in the vector database.
+
+ Args:
+ source_entity: Name of the source entity
+ target_entity: Name of the target entity
+ updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "keywords": "keywords"}
+
+ Returns:
+ Dictionary containing updated relation information
+ """
+ loop = always_get_an_event_loop()
+ return loop.run_until_complete(
+ self.aedit_relation(source_entity, target_entity, updated_data)
+ )
+
+ async def _edit_relation_done(self) -> None:
+ """Callback after relation editing is complete, ensures updates are persisted"""
+ await asyncio.gather(
+ *[
+ cast(StorageNameSpace, storage_inst).index_done_callback()
+ for storage_inst in [ # type: ignore
+ self.relationships_vdb,
+ self.chunk_entity_relation_graph,
+ ]
+ ]
+ )
+
+ async def acreate_entity(
+ self, entity_name: str, entity_data: dict[str, Any]
+ ) -> dict[str, Any]:
+ """Asynchronously create a new entity.
+
+ Creates a new entity in the knowledge graph and adds it to the vector database.
+
+ Args:
+ entity_name: Name of the new entity
+ entity_data: Dictionary containing entity attributes, e.g. {"description": "description", "entity_type": "type"}
+
+ Returns:
+ Dictionary containing created entity information
+ """
+ try:
+ # Check if entity already exists
+ existing_node = await self.chunk_entity_relation_graph.get_node(entity_name)
+ if existing_node:
+ raise ValueError(f"Entity '{entity_name}' already exists")
+
+ # Prepare node data with defaults if missing
+ node_data = {
+ "entity_type": entity_data.get("entity_type", "UNKNOWN"),
+ "description": entity_data.get("description", ""),
+ "source_id": entity_data.get("source_id", "manual"),
+ }
+
+ # Add entity to knowledge graph
+ await self.chunk_entity_relation_graph.upsert_node(entity_name, node_data)
+
+ # Prepare content for entity
+ description = node_data.get("description", "")
+ source_id = node_data.get("source_id", "")
+ entity_type = node_data.get("entity_type", "")
+ content = entity_name + "\n" + description
+
+ # Calculate entity ID
+ entity_id = compute_mdhash_id(entity_name, prefix="ent-")
+
+ # Prepare data for vector database update
+ entity_data_for_vdb = {
+ entity_id: {
+ "content": content,
+ "entity_name": entity_name,
+ "source_id": source_id,
+ "description": description,
+ "entity_type": entity_type,
+ }
+ }
+
+ # Update vector database
+ await self.entities_vdb.upsert(entity_data_for_vdb)
+
+ # Save changes
+ await self._edit_entity_done()
+
+ logger.info(f"Entity '{entity_name}' successfully created")
+ return await self.get_entity_info(entity_name, include_vector_data=True)
+ except Exception as e:
+ logger.error(f"Error while creating entity '{entity_name}': {e}")
+ raise
+
+ def create_entity(
+ self, entity_name: str, entity_data: dict[str, Any]
+ ) -> dict[str, Any]:
+ """Synchronously create a new entity.
+
+ Creates a new entity in the knowledge graph and adds it to the vector database.
+
+ Args:
+ entity_name: Name of the new entity
+ entity_data: Dictionary containing entity attributes, e.g. {"description": "description", "entity_type": "type"}
+
+ Returns:
+ Dictionary containing created entity information
+ """
+ loop = always_get_an_event_loop()
+ return loop.run_until_complete(self.acreate_entity(entity_name, entity_data))
+
+ async def acreate_relation(
+ self, source_entity: str, target_entity: str, relation_data: dict[str, Any]
+ ) -> dict[str, Any]:
+ """Asynchronously create a new relation between entities.
+
+ Creates a new relation (edge) in the knowledge graph and adds it to the vector database.
+
+ Args:
+ source_entity: Name of the source entity
+ target_entity: Name of the target entity
+ relation_data: Dictionary containing relation attributes, e.g. {"description": "description", "keywords": "keywords"}
+
+ Returns:
+ Dictionary containing created relation information
+ """
+ try:
+ # Check if both entities exist
+ source_exists = await self.chunk_entity_relation_graph.has_node(
+ source_entity
+ )
+ target_exists = await self.chunk_entity_relation_graph.has_node(
+ target_entity
+ )
+
+ if not source_exists:
+ raise ValueError(f"Source entity '{source_entity}' does not exist")
+ if not target_exists:
+ raise ValueError(f"Target entity '{target_entity}' does not exist")
+
+ # Check if relation already exists
+ existing_edge = await self.chunk_entity_relation_graph.get_edge(
+ source_entity, target_entity
+ )
+ if existing_edge:
+ raise ValueError(
+ f"Relation from '{source_entity}' to '{target_entity}' already exists"
+ )
+
+ # Prepare edge data with defaults if missing
+ edge_data = {
+ "description": relation_data.get("description", ""),
+ "keywords": relation_data.get("keywords", ""),
+ "source_id": relation_data.get("source_id", "manual"),
+ "weight": float(relation_data.get("weight", 1.0)),
+ }
+
+ # Add relation to knowledge graph
+ await self.chunk_entity_relation_graph.upsert_edge(
+ source_entity, target_entity, edge_data
+ )
+
+ # Prepare content for embedding
+ description = edge_data.get("description", "")
+ keywords = edge_data.get("keywords", "")
+ source_id = edge_data.get("source_id", "")
+ weight = edge_data.get("weight", 1.0)
+
+ # Create content for embedding
+ content = f"{keywords}\t{source_entity}\n{target_entity}\n{description}"
+
+ # Calculate relation ID
+ relation_id = compute_mdhash_id(
+ source_entity + target_entity, prefix="rel-"
+ )
+
+ # Prepare data for vector database update
+ relation_data_for_vdb = {
+ relation_id: {
+ "content": content,
+ "src_id": source_entity,
+ "tgt_id": target_entity,
+ "source_id": source_id,
+ "description": description,
+ "keywords": keywords,
+ "weight": weight,
+ }
+ }
+
+ # Update vector database
+ await self.relationships_vdb.upsert(relation_data_for_vdb)
+
+ # Save changes
+ await self._edit_relation_done()
+
+ logger.info(
+ f"Relation from '{source_entity}' to '{target_entity}' successfully created"
+ )
+ return await self.get_relation_info(
+ source_entity, target_entity, include_vector_data=True
+ )
+ except Exception as e:
+ logger.error(
+ f"Error while creating relation from '{source_entity}' to '{target_entity}': {e}"
+ )
+ raise
+
+ def create_relation(
+ self, source_entity: str, target_entity: str, relation_data: dict[str, Any]
+ ) -> dict[str, Any]:
+ """Synchronously create a new relation between entities.
+
+ Creates a new relation (edge) in the knowledge graph and adds it to the vector database.
+
+ Args:
+ source_entity: Name of the source entity
+ target_entity: Name of the target entity
+ relation_data: Dictionary containing relation attributes, e.g. {"description": "description", "keywords": "keywords"}
+
+ Returns:
+ Dictionary containing created relation information
+ """
+ loop = always_get_an_event_loop()
+ return loop.run_until_complete(
+ self.acreate_relation(source_entity, target_entity, relation_data)
+ )
diff --git a/reproduce/Step_1.py b/reproduce/Step_1.py
index e318c145..c94015ad 100644
--- a/reproduce/Step_1.py
+++ b/reproduce/Step_1.py
@@ -1,8 +1,10 @@
import os
import json
import time
+import asyncio
from lightrag import LightRAG
+from lightrag.kg.shared_storage import initialize_pipeline_status
def insert_text(rag, file_path):
@@ -29,6 +31,21 @@ WORKING_DIR = f"../{cls}"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(working_dir=WORKING_DIR)
-insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+async def initialize_rag():
+ rag = LightRAG(working_dir=WORKING_DIR)
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/reproduce/Step_1_openai_compatible.py b/reproduce/Step_1_openai_compatible.py
index 09ca78c5..3b9944eb 100644
--- a/reproduce/Step_1_openai_compatible.py
+++ b/reproduce/Step_1_openai_compatible.py
@@ -1,11 +1,13 @@
import os
import json
import time
+import asyncio
import numpy as np
from lightrag import LightRAG
from lightrag.utils import EmbeddingFunc
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
+from lightrag.kg.shared_storage import initialize_pipeline_status
## For Upstage API
@@ -60,12 +62,27 @@ WORKING_DIR = f"../{cls}"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
-rag = LightRAG(
- working_dir=WORKING_DIR,
- llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=4096, max_token_size=8192, func=embedding_func
- ),
-)
-insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+async def initialize_rag():
+ rag = LightRAG(
+ working_dir=WORKING_DIR,
+ llm_model_func=llm_model_func,
+ embedding_func=EmbeddingFunc(
+ embedding_dim=4096, max_token_size=8192, func=embedding_func
+ ),
+ )
+
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+
+ return rag
+
+
+def main():
+ # Initialize RAG instance
+ rag = asyncio.run(initialize_rag())
+ insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
+
+
+if __name__ == "__main__":
+ main()