Merge branch 'main' into standalone-logger-setup
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -57,7 +57,7 @@ ignore_this.txt
|
||||
*.ignore.*
|
||||
|
||||
# Project-specific files
|
||||
dickens/
|
||||
dickens*/
|
||||
book.txt
|
||||
lightrag-dev/
|
||||
gui/
|
||||
|
289
README.md
289
README.md
@@ -102,33 +102,47 @@ Use the below Python snippet (in a script) to initialize LightRAG and perform qu
|
||||
|
||||
```python
|
||||
import os
|
||||
import asyncio
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir="your/path",
|
||||
embedding_func=openai_embed,
|
||||
llm_model_func=gpt_4o_mini_complete
|
||||
)
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir="your/path",
|
||||
embedding_func=openai_embed,
|
||||
llm_model_func=gpt_4o_mini_complete
|
||||
)
|
||||
|
||||
# Insert text
|
||||
rag.insert("Your text")
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Perform naive search
|
||||
mode="naive"
|
||||
# Perform local search
|
||||
mode="local"
|
||||
# Perform global search
|
||||
mode="global"
|
||||
# Perform hybrid search
|
||||
mode="hybrid"
|
||||
# Mix mode Integrates knowledge graph and vector retrieval.
|
||||
mode="mix"
|
||||
return rag
|
||||
|
||||
rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode=mode)
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
# Insert text
|
||||
rag.insert("Your text")
|
||||
|
||||
# Perform naive search
|
||||
mode="naive"
|
||||
# Perform local search
|
||||
mode="local"
|
||||
# Perform global search
|
||||
mode="global"
|
||||
# Perform hybrid search
|
||||
mode="hybrid"
|
||||
# Mix mode Integrates knowledge graph and vector retrieval.
|
||||
mode="mix"
|
||||
|
||||
rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode=mode)
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
### Query Param
|
||||
@@ -190,15 +204,21 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
base_url="https://api.upstage.ai/v1/solar"
|
||||
)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=4096,
|
||||
max_token_size=8192,
|
||||
func=embedding_func
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=4096,
|
||||
max_token_size=8192,
|
||||
func=embedding_func
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
```
|
||||
</details>
|
||||
|
||||
@@ -210,10 +230,6 @@ rag = LightRAG(
|
||||
See `lightrag_hf_demo.py`
|
||||
|
||||
```python
|
||||
from lightrag.llm import hf_model_complete, hf_embed
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
|
||||
# Initialize LightRAG with Hugging Face model
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
@@ -242,9 +258,6 @@ If you want to use Ollama models, you need to pull model you plan to use and emb
|
||||
Then you only need to set LightRAG as follows:
|
||||
|
||||
```python
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
|
||||
# Initialize LightRAG with Ollama model
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
@@ -325,20 +338,58 @@ LightRAG supports integration with LlamaIndex.
|
||||
|
||||
```python
|
||||
# Using LlamaIndex with direct OpenAI access
|
||||
import asyncio
|
||||
from lightrag import LightRAG
|
||||
from lightrag.llm.llama_index_impl import llama_index_complete_if_cache, llama_index_embed
|
||||
from llama_index.embeddings.openai import OpenAIEmbedding
|
||||
from llama_index.llms.openai import OpenAI
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir="your/path",
|
||||
llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
|
||||
embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
|
||||
embedding_dim=1536,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
|
||||
),
|
||||
)
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir="your/path",
|
||||
llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
|
||||
embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
|
||||
embedding_dim=1536,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
#### For detailed documentation and examples, see:
|
||||
@@ -353,11 +404,6 @@ rag = LightRAG(
|
||||
LightRAG now supports multi-turn dialogue through the conversation history feature. Here's how to use it:
|
||||
|
||||
```python
|
||||
from lightrag import LightRAG, QueryParam
|
||||
|
||||
# Initialize LightRAG
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
# Create conversation history
|
||||
conversation_history = [
|
||||
{"role": "user", "content": "What is the main character's attitude towards Christmas?"},
|
||||
@@ -387,11 +433,6 @@ response = rag.query(
|
||||
LightRAG now supports custom prompts for fine-tuned control over the system's behavior. Here's how to use it:
|
||||
|
||||
```python
|
||||
from lightrag import LightRAG, QueryParam
|
||||
|
||||
# Initialize LightRAG
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
# Create query parameters
|
||||
query_param = QueryParam(
|
||||
mode="hybrid", # or other mode: "local", "global", "hybrid", "mix" and "naive"
|
||||
@@ -456,16 +497,6 @@ rag.query_with_separate_keyword_extraction(
|
||||
<summary> <b>Insert Custom KG</b> </summary>
|
||||
|
||||
```python
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
custom_kg = {
|
||||
"entities": [
|
||||
{
|
||||
@@ -534,6 +565,7 @@ rag = LightRAG(
|
||||
"insert_batch_size": 20 # Process 20 documents per batch
|
||||
}
|
||||
)
|
||||
|
||||
rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in batches of 20
|
||||
```
|
||||
|
||||
@@ -560,27 +592,6 @@ rag.insert(["TEXT1", "TEXT2",...], ids=["ID_FOR_TEXT1", "ID_FOR_TEXT2"])
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Incremental Insert</b></summary>
|
||||
|
||||
```python
|
||||
# Incremental Insert: Insert new documents into an existing LightRAG instance
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
with open("./newText.txt") as f:
|
||||
rag.insert(f.read())
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Insert using Pipeline</b></summary>
|
||||
|
||||
@@ -592,6 +603,7 @@ And using a routine to process news documents.
|
||||
|
||||
```python
|
||||
rag = LightRAG(..)
|
||||
|
||||
await rag.apipeline_enqueue_documents(input)
|
||||
# Your routine in loop
|
||||
await rag.apipeline_process_enqueue_documents(input)
|
||||
@@ -633,8 +645,6 @@ export NEO4J_PASSWORD="password"
|
||||
|
||||
# Note: Default settings use NetworkX
|
||||
# Initialize LightRAG with Neo4J implementation.
|
||||
WORKING_DIR = "./local_neo4jWorkDir"
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||
@@ -706,26 +716,26 @@ You can also install `faiss-gpu` if you have GPU support.
|
||||
|
||||
- Here we are using `sentence-transformers` but you can also use `OpenAIEmbedding` model with `3072` dimensions.
|
||||
|
||||
```
|
||||
```python
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
model = SentenceTransformer('all-MiniLM-L6-v2')
|
||||
embeddings = model.encode(texts, convert_to_numpy=True)
|
||||
return embeddings
|
||||
|
||||
# Initialize LightRAG with the LLM model function and embedding function
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
vector_storage="FaissVectorDBStorage",
|
||||
vector_db_storage_cls_kwargs={
|
||||
"cosine_better_than_threshold": 0.3 # Your desired threshold
|
||||
}
|
||||
)
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
vector_storage="FaissVectorDBStorage",
|
||||
vector_db_storage_cls_kwargs={
|
||||
"cosine_better_than_threshold": 0.3 # Your desired threshold
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
</details>
|
||||
@@ -733,17 +743,6 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
## Delete
|
||||
|
||||
```python
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
# Delete Entity: Deleting entities by their names
|
||||
rag.delete_by_entity("Project Gutenberg")
|
||||
|
||||
@@ -751,6 +750,70 @@ rag.delete_by_entity("Project Gutenberg")
|
||||
rag.delete_by_doc_id("doc_id")
|
||||
```
|
||||
|
||||
## Edit Entities and Relations
|
||||
|
||||
LightRAG now supports comprehensive knowledge graph management capabilities, allowing you to create, edit, and delete entities and relationships within your knowledge graph.
|
||||
|
||||
### Create Entities and Relations
|
||||
|
||||
```python
|
||||
# Create new entity
|
||||
entity = rag.create_entity("Google", {
|
||||
"description": "Google is a multinational technology company specializing in internet-related services and products.",
|
||||
"entity_type": "company"
|
||||
})
|
||||
|
||||
# Create another entity
|
||||
product = rag.create_entity("Gmail", {
|
||||
"description": "Gmail is an email service developed by Google.",
|
||||
"entity_type": "product"
|
||||
})
|
||||
|
||||
# Create relation between entities
|
||||
relation = rag.create_relation("Google", "Gmail", {
|
||||
"description": "Google develops and operates Gmail.",
|
||||
"keywords": "develops operates service",
|
||||
"weight": 2.0
|
||||
})
|
||||
```
|
||||
|
||||
### Edit Entities and Relations
|
||||
|
||||
```python
|
||||
# Edit an existing entity
|
||||
updated_entity = rag.edit_entity("Google", {
|
||||
"description": "Google is a subsidiary of Alphabet Inc., founded in 1998.",
|
||||
"entity_type": "tech_company"
|
||||
})
|
||||
|
||||
# Rename an entity (with all its relationships properly migrated)
|
||||
renamed_entity = rag.edit_entity("Gmail", {
|
||||
"entity_name": "Google Mail",
|
||||
"description": "Google Mail (formerly Gmail) is an email service."
|
||||
})
|
||||
|
||||
# Edit a relation between entities
|
||||
updated_relation = rag.edit_relation("Google", "Google Mail", {
|
||||
"description": "Google created and maintains Google Mail service.",
|
||||
"keywords": "creates maintains email service",
|
||||
"weight": 3.0
|
||||
})
|
||||
```
|
||||
|
||||
All operations are available in both synchronous and asynchronous versions. The asynchronous versions have the prefix "a" (e.g., `acreate_entity`, `aedit_relation`).
|
||||
|
||||
#### Entity Operations
|
||||
|
||||
- **create_entity**: Creates a new entity with specified attributes
|
||||
- **edit_entity**: Updates an existing entity's attributes or renames it
|
||||
|
||||
#### Relation Operations
|
||||
|
||||
- **create_relation**: Creates a new relation between existing entities
|
||||
- **edit_relation**: Updates an existing relation's attributes
|
||||
|
||||
These operations maintain data consistency across both the graph database and vector database components, ensuring your knowledge graph remains coherent.
|
||||
|
||||
## Cache
|
||||
|
||||
<details>
|
||||
|
@@ -10,7 +10,7 @@ import os
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from lightrag.kg.postgres_impl import PostgreSQLDB, PGKVStorage
|
||||
from lightrag.storage import JsonKVStorage
|
||||
from lightrag.kg.json_kv_impl import JsonKVStorage
|
||||
from lightrag.namespace import NameSpace
|
||||
|
||||
load_dotenv()
|
||||
|
@@ -1,4 +1,5 @@
|
||||
from fastapi import FastAPI, HTTPException, File, UploadFile
|
||||
from contextlib import asynccontextmanager
|
||||
from pydantic import BaseModel
|
||||
import os
|
||||
from lightrag import LightRAG, QueryParam
|
||||
@@ -8,12 +9,12 @@ from typing import Optional
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
import aiofiles
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# Apply nest_asyncio to solve event loop issues
|
||||
nest_asyncio.apply()
|
||||
|
||||
DEFAULT_RAG_DIR = "index_default"
|
||||
app = FastAPI(title="LightRAG API", description="API for RAG operations")
|
||||
|
||||
DEFAULT_INPUT_FILE = "book.txt"
|
||||
INPUT_FILE = os.environ.get("INPUT_FILE", f"{DEFAULT_INPUT_FILE}")
|
||||
@@ -28,20 +29,43 @@ if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="gemma2:9b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=8192,
|
||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
async def init():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="gemma2:9b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=8192,
|
||||
llm_model_kwargs={
|
||||
"host": "http://localhost:11434",
|
||||
"options": {"num_ctx": 8192},
|
||||
},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
|
||||
# Add initialization code
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
global rag
|
||||
rag = await init()
|
||||
print("done!")
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="LightRAG API", description="API for RAG operations", lifespan=lifespan
|
||||
)
|
||||
|
||||
|
||||
|
@@ -1,4 +1,5 @@
|
||||
from fastapi import FastAPI, HTTPException, File, UploadFile
|
||||
from contextlib import asynccontextmanager
|
||||
from pydantic import BaseModel
|
||||
import os
|
||||
from lightrag import LightRAG, QueryParam
|
||||
@@ -8,6 +9,7 @@ import numpy as np
|
||||
from typing import Optional
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# Apply nest_asyncio to solve event loop issues
|
||||
nest_asyncio.apply()
|
||||
@@ -71,16 +73,36 @@ async def get_embedding_dim():
|
||||
|
||||
|
||||
# Initialize RAG instance
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
async def init():
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
global rag
|
||||
rag = await init()
|
||||
print("done!")
|
||||
yield
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title="LightRAG API", description="API for RAG operations", lifespan=lifespan
|
||||
)
|
||||
|
||||
# Data models
|
||||
|
||||
|
@@ -1,101 +0,0 @@
|
||||
import os
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
# Apply nest_asyncio to solve event loop issues
|
||||
nest_asyncio.apply()
|
||||
|
||||
DEFAULT_RAG_DIR = "index_default"
|
||||
|
||||
# Configure working directory
|
||||
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
|
||||
print(f"WORKING_DIR: {WORKING_DIR}")
|
||||
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
|
||||
print(f"LLM_MODEL: {LLM_MODEL}")
|
||||
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small")
|
||||
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
||||
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
||||
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
||||
BASE_URL = os.environ.get("BASE_URL", "https://api.openai.com/v1")
|
||||
print(f"BASE_URL: {BASE_URL}")
|
||||
API_KEY = os.environ.get("API_KEY", "xxxxxxxx")
|
||||
print(f"API_KEY: {API_KEY}")
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
|
||||
# LLM model function
|
||||
|
||||
|
||||
async def llm_model_func(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
) -> str:
|
||||
return await openai_complete_if_cache(
|
||||
model=LLM_MODEL,
|
||||
prompt=prompt,
|
||||
system_prompt=system_prompt,
|
||||
history_messages=history_messages,
|
||||
base_url=BASE_URL,
|
||||
api_key=API_KEY,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
# Embedding function
|
||||
|
||||
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await openai_embed(
|
||||
texts=texts,
|
||||
model=EMBEDDING_MODEL,
|
||||
base_url=BASE_URL,
|
||||
api_key=API_KEY,
|
||||
)
|
||||
|
||||
|
||||
async def get_embedding_dim():
|
||||
test_text = ["This is a test sentence."]
|
||||
embedding = await embedding_func(test_text)
|
||||
embedding_dim = embedding.shape[1]
|
||||
print(f"{embedding_dim=}")
|
||||
return embedding_dim
|
||||
|
||||
|
||||
# Initialize RAG instance
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
@@ -16,6 +16,7 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
|
||||
print(os.getcwd())
|
||||
@@ -113,6 +114,9 @@ async def init():
|
||||
vector_storage="OracleVectorDBStorage",
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
from openai import AzureOpenAI
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
@@ -90,6 +91,9 @@ rag = LightRAG(
|
||||
),
|
||||
)
|
||||
|
||||
rag.initialize_storages()
|
||||
initialize_pipeline_status()
|
||||
|
||||
book1 = open("./book_1.txt", encoding="utf-8")
|
||||
book2 = open("./book_2.txt", encoding="utf-8")
|
||||
|
||||
|
@@ -8,6 +8,12 @@ import logging
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.bedrock import bedrock_complete, bedrock_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
logging.getLogger("aiobotocore").setLevel(logging.WARNING)
|
||||
|
||||
@@ -15,22 +21,35 @@ WORKING_DIR = "./dickens"
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=bedrock_complete,
|
||||
llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024, max_token_size=8192, func=bedrock_embed
|
||||
),
|
||||
)
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
for mode in ["naive", "local", "global", "hybrid"]:
|
||||
print("\n+-" + "-" * len(mode) + "-+")
|
||||
print(f"| {mode.capitalize()} |")
|
||||
print("+-" + "-" * len(mode) + "-+\n")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=bedrock_complete,
|
||||
llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024, max_token_size=8192, func=bedrock_embed
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
def main():
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
for mode in ["naive", "local", "global", "hybrid"]:
|
||||
print("\n+-" + "-" * len(mode) + "-+")
|
||||
print(f"| {mode.capitalize()} |")
|
||||
print("+-" + "-" * len(mode) + "-+\n")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode=mode)
|
||||
)
|
||||
)
|
||||
|
@@ -8,6 +8,13 @@ from dotenv import load_dotenv
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from sentence_transformers import SentenceTransformer
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
# Apply nest_asyncio to solve event loop issues
|
||||
nest_asyncio.apply()
|
||||
|
||||
load_dotenv()
|
||||
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
||||
@@ -60,25 +67,39 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return embeddings
|
||||
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
file_path = "story.txt"
|
||||
with open(file_path, "r") as file:
|
||||
text = file.read()
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
rag.insert(text)
|
||||
return rag
|
||||
|
||||
response = rag.query(
|
||||
query="What is the main theme of the story?",
|
||||
param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
|
||||
)
|
||||
|
||||
print(response)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
file_path = "story.txt"
|
||||
with open(file_path, "r") as file:
|
||||
text = file.read()
|
||||
|
||||
rag.insert(text)
|
||||
|
||||
response = rag.query(
|
||||
query="What is the main theme of the story?",
|
||||
param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
|
||||
)
|
||||
|
||||
print(response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -4,51 +4,79 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.hf import hf_model_complete, hf_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=hf_model_complete,
|
||||
llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=5000,
|
||||
func=lambda texts: hf_embed(
|
||||
texts,
|
||||
tokenizer=AutoTokenizer.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
),
|
||||
embed_model=AutoModel.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=hf_model_complete,
|
||||
llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=5000,
|
||||
func=lambda texts: hf_embed(
|
||||
texts,
|
||||
tokenizer=AutoTokenizer.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
),
|
||||
embed_model=AutoModel.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
def main():
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,115 +0,0 @@
|
||||
import numpy as np
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.llm.jina import jina_embed
|
||||
from lightrag.llm.openai import openai_complete_if_cache
|
||||
import os
|
||||
import asyncio
|
||||
|
||||
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await jina_embed(texts, api_key="YourJinaAPIKey")
|
||||
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
|
||||
async def llm_model_func(
|
||||
prompt, system_prompt=None, history_messages=[], **kwargs
|
||||
) -> str:
|
||||
return await openai_complete_if_cache(
|
||||
"solar-mini",
|
||||
prompt,
|
||||
system_prompt=system_prompt,
|
||||
history_messages=history_messages,
|
||||
api_key=os.getenv("UPSTAGE_API_KEY"),
|
||||
base_url="https://api.upstage.ai/v1/solar",
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024, max_token_size=8192, func=embedding_func
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
async def lightraginsert(file_path, semaphore):
|
||||
async with semaphore:
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
except UnicodeDecodeError:
|
||||
# If UTF-8 decoding fails, try other encodings
|
||||
with open(file_path, "r", encoding="gbk") as f:
|
||||
content = f.read()
|
||||
await rag.ainsert(content)
|
||||
|
||||
|
||||
async def process_files(directory, concurrency_limit):
|
||||
semaphore = asyncio.Semaphore(concurrency_limit)
|
||||
tasks = []
|
||||
for root, dirs, files in os.walk(directory):
|
||||
for f in files:
|
||||
file_path = os.path.join(root, f)
|
||||
if f.startswith("."):
|
||||
continue
|
||||
tasks.append(lightraginsert(file_path, semaphore))
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
asyncio.run(process_files(WORKING_DIR, concurrency_limit=4))
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
await rag.aquery(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
await rag.aquery(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
await rag.aquery(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="global"),
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
await rag.aquery(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid"),
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
|
||||
from llama_index.llms.openai import OpenAI
|
||||
from llama_index.embeddings.openai import OpenAIEmbedding
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# Configure working directory
|
||||
WORKING_DIR = "./index_default"
|
||||
@@ -76,38 +81,62 @@ async def get_embedding_dim():
|
||||
return embedding_dim
|
||||
|
||||
|
||||
# Initialize RAG instance
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
async def initialize_rag():
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
return rag
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
|
||||
from llama_index.llms.litellm import LiteLLM
|
||||
from llama_index.embeddings.litellm import LiteLLMEmbedding
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# Configure working directory
|
||||
WORKING_DIR = "./index_default"
|
||||
@@ -79,38 +84,62 @@ async def get_embedding_dim():
|
||||
return embedding_dim
|
||||
|
||||
|
||||
# Initialize RAG instance
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
async def initialize_rag():
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
return rag
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -5,6 +5,12 @@ from lightrag.llm.lmdeploy import lmdeploy_model_if_cache
|
||||
from lightrag.llm.hf import hf_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from transformers import AutoModel, AutoTokenizer
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -36,45 +42,69 @@ async def lmdeploy_model_complete(
|
||||
)
|
||||
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=lmdeploy_model_complete,
|
||||
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=5000,
|
||||
func=lambda texts: hf_embed(
|
||||
texts,
|
||||
tokenizer=AutoTokenizer.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
),
|
||||
embed_model=AutoModel.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=lmdeploy_model_complete,
|
||||
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=384,
|
||||
max_token_size=5000,
|
||||
func=lambda texts: hf_embed(
|
||||
texts,
|
||||
tokenizer=AutoTokenizer.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
),
|
||||
embed_model=AutoModel.from_pretrained(
|
||||
"sentence-transformers/all-MiniLM-L6-v2"
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,5 +1,9 @@
|
||||
import os
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm import (
|
||||
openai_complete_if_cache,
|
||||
@@ -7,10 +11,12 @@ from lightrag.llm import (
|
||||
)
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# for custom llm_model_func
|
||||
from lightrag.utils import locate_json_string_body_from_string
|
||||
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
@@ -92,41 +98,39 @@ async def test_funcs():
|
||||
# asyncio.run(test_funcs())
|
||||
|
||||
|
||||
async def initialize_rag():
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
# lightRAG class during indexing
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
# llm_model_name="meta/llama3-70b-instruct", #un comment if
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
|
||||
# so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
|
||||
# so you can adjust to be able to fit the NVIDIA model (future work)
|
||||
func=indexing_embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
# lightRAG class during indexing
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
# llm_model_name="meta/llama3-70b-instruct", #un comment if
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
|
||||
# so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
|
||||
# so you can adjust to be able to fit the NVIDIA model (future work)
|
||||
func=indexing_embedding_func,
|
||||
),
|
||||
)
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# reading file
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
await rag.ainsert(f.read())
|
||||
|
||||
# redefine rag to change embedding into query type
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
# llm_model_name="meta/llama3-70b-instruct", #un comment if
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=512,
|
||||
func=query_embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
# Perform naive search
|
||||
print("==============Naive===============")
|
||||
print(
|
||||
|
@@ -1,4 +1,8 @@
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
import inspect
|
||||
import logging
|
||||
import os
|
||||
@@ -6,6 +10,7 @@ import os
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens_age"
|
||||
|
||||
@@ -22,51 +27,32 @@ os.environ["AGE_POSTGRES_HOST"] = "localhost"
|
||||
os.environ["AGE_POSTGRES_PORT"] = "5455"
|
||||
os.environ["AGE_GRAPH_NAME"] = "dickens"
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="llama3.1:8b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="llama3.1:8b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={
|
||||
"host": "http://localhost:11434",
|
||||
"options": {"num_ctx": 32768},
|
||||
},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
),
|
||||
),
|
||||
),
|
||||
graph_storage="AGEStorage",
|
||||
)
|
||||
graph_storage="AGEStorage",
|
||||
)
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
|
||||
# stream response
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
return rag
|
||||
|
||||
|
||||
async def print_stream(stream):
|
||||
@@ -74,7 +60,54 @@ async def print_stream(stream):
|
||||
print(chunk, end="", flush=True)
|
||||
|
||||
|
||||
if inspect.isasyncgen(resp):
|
||||
asyncio.run(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
# stream response
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
|
||||
if inspect.isasyncgen(resp):
|
||||
asyncio.run(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,10 +1,14 @@
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
import os
|
||||
import inspect
|
||||
import logging
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -13,50 +17,31 @@ logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="gemma2:2b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="gemma2:2b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={
|
||||
"host": "http://localhost:11434",
|
||||
"options": {"num_ctx": 32768},
|
||||
},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
|
||||
# stream response
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
return rag
|
||||
|
||||
|
||||
async def print_stream(stream):
|
||||
@@ -64,7 +49,54 @@ async def print_stream(stream):
|
||||
print(chunk, end="", flush=True)
|
||||
|
||||
|
||||
if inspect.isasyncgen(resp):
|
||||
asyncio.run(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
# stream response
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
|
||||
if inspect.isasyncgen(resp):
|
||||
asyncio.run(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -12,6 +12,7 @@ import os
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens_gremlin"
|
||||
|
||||
@@ -31,51 +32,32 @@ os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
|
||||
os.environ["GREMLIN_USER"] = ""
|
||||
os.environ["GREMLIN_PASSWORD"] = ""
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="llama3.1:8b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="llama3.1:8b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={
|
||||
"host": "http://localhost:11434",
|
||||
"options": {"num_ctx": 32768},
|
||||
},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||
),
|
||||
),
|
||||
),
|
||||
graph_storage="GremlinStorage",
|
||||
)
|
||||
graph_storage="GremlinStorage",
|
||||
)
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
|
||||
# stream response
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
return rag
|
||||
|
||||
|
||||
async def print_stream(stream):
|
||||
@@ -83,7 +65,54 @@ async def print_stream(stream):
|
||||
print(chunk, end="", flush=True)
|
||||
|
||||
|
||||
if inspect.isasyncgen(resp):
|
||||
asyncio.run(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
# stream response
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
|
||||
if inspect.isasyncgen(resp):
|
||||
asyncio.run(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -2,6 +2,11 @@ import os
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import asyncio
|
||||
import nest_asyncio
|
||||
|
||||
nest_asyncio.apply()
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# WorkingDir
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
@@ -28,29 +33,72 @@ os.environ["MILVUS_PASSWORD"] = "root"
|
||||
os.environ["MILVUS_DB_NAME"] = "lightrag"
|
||||
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="qwen2.5:14b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="qwen2.5:14b",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={
|
||||
"host": "http://127.0.0.1:11434",
|
||||
"options": {"num_ctx": 32768},
|
||||
},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
|
||||
),
|
||||
),
|
||||
),
|
||||
kv_storage="MongoKVStorage",
|
||||
graph_storage="Neo4JStorage",
|
||||
vector_storage="MilvusVectorDBStorage",
|
||||
)
|
||||
kv_storage="MongoKVStorage",
|
||||
graph_storage="Neo4JStorage",
|
||||
vector_storage="MilvusVectorDBStorage",
|
||||
)
|
||||
|
||||
file = "./book.txt"
|
||||
with open(file, "r") as f:
|
||||
rag.insert(f.read())
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
return rag
|
||||
|
||||
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Insert example text
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Test different query modes
|
||||
print("\nNaive Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nLocal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nGlobal Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
print("\nHybrid Search:")
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -53,20 +54,30 @@ async def test_funcs():
|
||||
# asyncio.run(test_funcs())
|
||||
|
||||
|
||||
async def initialize_rag():
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
await rag.ainsert(f.read())
|
||||
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -53,24 +54,34 @@ async def test_funcs():
|
||||
# asyncio.run(test_funcs())
|
||||
|
||||
|
||||
async def initialize_rag():
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
embedding_cache_config={
|
||||
"enabled": True,
|
||||
"similarity_threshold": 0.90,
|
||||
},
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
embedding_cache_config={
|
||||
"enabled": True,
|
||||
"similarity_threshold": 0.90,
|
||||
},
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
await rag.ainsert(f.read())
|
||||
|
@@ -1,9 +1,11 @@
|
||||
import inspect
|
||||
import os
|
||||
import asyncio
|
||||
from lightrag import LightRAG
|
||||
from lightrag.llm import openai_complete, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc, always_get_an_event_loop
|
||||
from lightrag import QueryParam
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# WorkingDir
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
@@ -13,32 +15,32 @@ if not os.path.exists(WORKING_DIR):
|
||||
print(f"WorkingDir: {WORKING_DIR}")
|
||||
|
||||
api_key = "empty"
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=openai_complete,
|
||||
llm_model_name="qwen2.5-14b-instruct@4bit",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: openai_embed(
|
||||
texts=texts,
|
||||
model="text-embedding-bge-m3",
|
||||
base_url="http://127.0.0.1:1234/v1",
|
||||
api_key=api_key,
|
||||
|
||||
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=openai_complete,
|
||||
llm_model_name="qwen2.5-14b-instruct@4bit",
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=1024,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: openai_embed(
|
||||
texts=texts,
|
||||
model="text-embedding-bge-m3",
|
||||
base_url="http://127.0.0.1:1234/v1",
|
||||
api_key=api_key,
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
return rag
|
||||
|
||||
|
||||
async def print_stream(stream):
|
||||
@@ -47,8 +49,24 @@ async def print_stream(stream):
|
||||
print(chunk, end="", flush=True)
|
||||
|
||||
|
||||
loop = always_get_an_event_loop()
|
||||
if inspect.isasyncgen(resp):
|
||||
loop.run_until_complete(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
resp = rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid", stream=True),
|
||||
)
|
||||
|
||||
loop = always_get_an_event_loop()
|
||||
if inspect.isasyncgen(resp):
|
||||
loop.run_until_complete(print_stream(resp))
|
||||
else:
|
||||
print(resp)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,40 +1,64 @@
|
||||
import os
|
||||
|
||||
import asyncio
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
embedding_func=openai_embed,
|
||||
llm_model_func=gpt_4o_mini_complete,
|
||||
# llm_model_func=gpt_4o_complete
|
||||
)
|
||||
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
embedding_func=openai_embed,
|
||||
llm_model_func=gpt_4o_mini_complete,
|
||||
# llm_model_func=gpt_4o_complete
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
#########
|
||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||
@@ -52,7 +53,7 @@ async def create_embedding_function_instance():
|
||||
async def initialize_rag():
|
||||
embedding_func_instance = await create_embedding_function_instance()
|
||||
|
||||
return LightRAG(
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete,
|
||||
embedding_func=embedding_func_instance,
|
||||
@@ -60,14 +61,47 @@ async def initialize_rag():
|
||||
log_level="DEBUG",
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Run the initialization
|
||||
rag = asyncio.run(initialize_rag())
|
||||
return rag
|
||||
|
||||
with open("book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,7 +1,9 @@
|
||||
import os
|
||||
import asyncio
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.ollama import ollama_embed, openai_complete_if_cache
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# WorkingDir
|
||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
@@ -49,22 +51,62 @@ embedding_func = EmbeddingFunc(
|
||||
),
|
||||
)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
llm_model_max_token_size=32768,
|
||||
embedding_func=embedding_func,
|
||||
chunk_token_size=512,
|
||||
chunk_overlap_token_size=256,
|
||||
kv_storage="RedisKVStorage",
|
||||
graph_storage="Neo4JStorage",
|
||||
vector_storage="MilvusVectorDBStorage",
|
||||
doc_status_storage="RedisKVStorage",
|
||||
)
|
||||
|
||||
file = "../book.txt"
|
||||
with open(file, "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
llm_model_max_token_size=32768,
|
||||
embedding_func=embedding_func,
|
||||
chunk_token_size=512,
|
||||
chunk_overlap_token_size=256,
|
||||
kv_storage="RedisKVStorage",
|
||||
graph_storage="Neo4JStorage",
|
||||
vector_storage="MilvusVectorDBStorage",
|
||||
doc_status_storage="RedisKVStorage",
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
print(rag.query("谁会3D建模 ?", param=QueryParam(mode="mix")))
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -6,6 +6,7 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
print(os.getcwd())
|
||||
script_directory = Path(__file__).resolve().parent.parent
|
||||
@@ -64,40 +65,49 @@ async def get_embedding_dim():
|
||||
return embedding_dim
|
||||
|
||||
|
||||
async def initialize_rag():
|
||||
# Detect embedding dimension
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
# Initialize LightRAG
|
||||
# We use Oracle DB as the KV/vector/graph storage
|
||||
# You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
|
||||
rag = LightRAG(
|
||||
# log_level="DEBUG",
|
||||
working_dir=WORKING_DIR,
|
||||
entity_extract_max_gleaning=1,
|
||||
enable_llm_cache=True,
|
||||
enable_llm_cache_for_entity_extract=True,
|
||||
embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
|
||||
chunk_token_size=CHUNK_TOKEN_SIZE,
|
||||
llm_model_max_token_size=MAX_TOKENS,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=500,
|
||||
func=embedding_func,
|
||||
),
|
||||
graph_storage="OracleGraphStorage",
|
||||
kv_storage="OracleKVStorage",
|
||||
vector_storage="OracleVectorDBStorage",
|
||||
addon_params={
|
||||
"example_number": 1,
|
||||
"language": "Simplfied Chinese",
|
||||
"entity_types": ["organization", "person", "geo", "event"],
|
||||
"insert_batch_size": 2,
|
||||
},
|
||||
)
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Detect embedding dimension
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
# Initialize LightRAG
|
||||
# We use Oracle DB as the KV/vector/graph storage
|
||||
# You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
|
||||
rag = LightRAG(
|
||||
# log_level="DEBUG",
|
||||
working_dir=WORKING_DIR,
|
||||
entity_extract_max_gleaning=1,
|
||||
enable_llm_cache=True,
|
||||
enable_llm_cache_for_entity_extract=True,
|
||||
embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
|
||||
chunk_token_size=CHUNK_TOKEN_SIZE,
|
||||
llm_model_max_token_size=MAX_TOKENS,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=500,
|
||||
func=embedding_func,
|
||||
),
|
||||
graph_storage="OracleGraphStorage",
|
||||
kv_storage="OracleKVStorage",
|
||||
vector_storage="OracleVectorDBStorage",
|
||||
addon_params={
|
||||
"example_number": 1,
|
||||
"language": "Simplfied Chinese",
|
||||
"entity_types": ["organization", "person", "geo", "event"],
|
||||
"insert_batch_size": 2,
|
||||
},
|
||||
)
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Extract and Insert into LightRAG storage
|
||||
with open(WORKING_DIR + "/docs.txt", "r", encoding="utf-8") as f:
|
||||
|
@@ -5,6 +5,7 @@ from lightrag.llm.openai import openai_complete_if_cache
|
||||
from lightrag.llm.siliconcloud import siliconcloud_embedding
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -47,34 +48,56 @@ async def test_funcs():
|
||||
asyncio.run(test_funcs())
|
||||
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768, max_token_size=512, func=embedding_func
|
||||
),
|
||||
)
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768, max_token_size=512, func=embedding_func
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
with open("./book.txt") as f:
|
||||
rag.insert(f.read())
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -55,32 +56,41 @@ async def get_embedding_dim():
|
||||
return embedding_dim
|
||||
|
||||
|
||||
async def initialize_rag():
|
||||
# Detect embedding dimension
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
|
||||
# Initialize LightRAG
|
||||
# We use TiDB DB as the KV/vector
|
||||
rag = LightRAG(
|
||||
enable_llm_cache=False,
|
||||
working_dir=WORKING_DIR,
|
||||
chunk_token_size=512,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=512,
|
||||
func=embedding_func,
|
||||
),
|
||||
kv_storage="TiDBKVStorage",
|
||||
vector_storage="TiDBVectorDBStorage",
|
||||
graph_storage="TiDBGraphStorage",
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
async def main():
|
||||
try:
|
||||
# Detect embedding dimension
|
||||
embedding_dimension = await get_embedding_dim()
|
||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Initialize LightRAG
|
||||
# We use TiDB DB as the KV/vector
|
||||
rag = LightRAG(
|
||||
enable_llm_cache=False,
|
||||
working_dir=WORKING_DIR,
|
||||
chunk_token_size=512,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=512,
|
||||
func=embedding_func,
|
||||
),
|
||||
kv_storage="TiDBKVStorage",
|
||||
vector_storage="TiDBVectorDBStorage",
|
||||
graph_storage="TiDBGraphStorage",
|
||||
)
|
||||
|
||||
# Extract and Insert into LightRAG storage
|
||||
with open("./dickens/demo.txt", "r", encoding="utf-8") as f:
|
||||
await rag.ainsert(f.read())
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform search in different modes
|
||||
modes = ["naive", "local", "global", "hybrid"]
|
||||
|
@@ -1,10 +1,12 @@
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
|
||||
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
@@ -18,38 +20,61 @@ if api_key is None:
|
||||
raise Exception("Please set ZHIPU_API_KEY in your environment")
|
||||
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=zhipu_complete,
|
||||
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=2048, # Zhipu embedding-3 dimension
|
||||
max_token_size=8192,
|
||||
func=lambda texts: zhipu_embedding(texts),
|
||||
),
|
||||
)
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=zhipu_complete,
|
||||
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
|
||||
llm_model_max_async=4,
|
||||
llm_model_max_token_size=32768,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=2048, # Zhipu embedding-3 dimension
|
||||
max_token_size=8192,
|
||||
func=lambda texts: zhipu_embedding(texts),
|
||||
),
|
||||
)
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
return rag
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -8,6 +8,7 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.zhipu import zhipu_complete
|
||||
from lightrag.llm.ollama import ollama_embedding
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
load_dotenv()
|
||||
ROOT_DIR = os.environ.get("ROOT_DIR")
|
||||
@@ -28,7 +29,7 @@ os.environ["POSTGRES_PASSWORD"] = "rag"
|
||||
os.environ["POSTGRES_DATABASE"] = "rag"
|
||||
|
||||
|
||||
async def main():
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=zhipu_complete,
|
||||
@@ -50,9 +51,18 @@ async def main():
|
||||
auto_manage_storages_states=False,
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
async def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# add embedding_func for graph database, it's deleted in commit 5661d76860436f7bf5aef2e50d9ee4a59660146c
|
||||
rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
|
||||
await rag.initialize_storages()
|
||||
|
||||
with open(f"{ROOT_DIR}/book.txt", "r", encoding="utf-8") as f:
|
||||
await rag.ainsert(f.read())
|
||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
from openai import AzureOpenAI
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
@@ -80,24 +81,33 @@ asyncio.run(test_funcs())
|
||||
|
||||
embedding_dimension = 3072
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
book1 = open("./book_1.txt", encoding="utf-8")
|
||||
book2 = open("./book_2.txt", encoding="utf-8")
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=embedding_dimension,
|
||||
max_token_size=8192,
|
||||
func=embedding_func,
|
||||
),
|
||||
)
|
||||
|
||||
rag.insert([book1.read(), book2.read()])
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
# Example function demonstrating the new query_with_separate_keyword_extraction usage
|
||||
async def run_example():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
book1 = open("./book_1.txt", encoding="utf-8")
|
||||
book2 = open("./book_2.txt", encoding="utf-8")
|
||||
|
||||
rag.insert([book1.read(), book2.read()])
|
||||
query = "What are the top themes in this story?"
|
||||
prompt = "Please simplify the response for a young audience."
|
||||
|
||||
|
@@ -1,6 +1,8 @@
|
||||
import os
|
||||
import asyncio
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
#########
|
||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||
# import nest_asyncio
|
||||
@@ -12,31 +14,55 @@ WORKING_DIR = "./dickens"
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
||||
)
|
||||
|
||||
with open("./dickens/book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
||||
)
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
return rag
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
import numpy as np
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
#########
|
||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||
@@ -67,7 +68,7 @@ async def create_embedding_function_instance():
|
||||
async def initialize_rag():
|
||||
embedding_func_instance = await create_embedding_function_instance()
|
||||
if CHROMADB_USE_LOCAL_PERSISTENT:
|
||||
return LightRAG(
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete,
|
||||
embedding_func=embedding_func_instance,
|
||||
@@ -87,7 +88,7 @@ async def initialize_rag():
|
||||
},
|
||||
)
|
||||
else:
|
||||
return LightRAG(
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete,
|
||||
embedding_func=embedding_func_instance,
|
||||
@@ -111,29 +112,47 @@ async def initialize_rag():
|
||||
},
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Run the initialization
|
||||
rag = asyncio.run(initialize_rag())
|
||||
return rag
|
||||
|
||||
# with open("./dickens/book.txt", "r", encoding="utf-8") as f:
|
||||
# rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import os
|
||||
import logging
|
||||
import asyncio
|
||||
import numpy as np
|
||||
|
||||
from dotenv import load_dotenv
|
||||
@@ -8,7 +9,9 @@ from sentence_transformers import SentenceTransformer
|
||||
from openai import AzureOpenAI
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
WORKING_DIR = "./dickens"
|
||||
# Configure Logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
@@ -56,10 +59,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return embeddings
|
||||
|
||||
|
||||
def main():
|
||||
WORKING_DIR = "./dickens"
|
||||
|
||||
# Initialize LightRAG with the LLM model function and embedding function
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
@@ -74,6 +74,15 @@ def main():
|
||||
},
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
# Insert the custom chunks into LightRAG
|
||||
book1 = open("./book_1.txt", encoding="utf-8")
|
||||
book2 = open("./book_2.txt", encoding="utf-8")
|
||||
|
@@ -1,7 +1,8 @@
|
||||
import os
|
||||
import asyncio
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.openai import gpt_4o_mini_complete
|
||||
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
#########
|
||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||
@@ -14,33 +15,57 @@ WORKING_DIR = "./local_neo4jWorkDir"
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||
graph_storage="Neo4JStorage",
|
||||
log_level="INFO",
|
||||
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
||||
)
|
||||
|
||||
with open("./book.txt") as f:
|
||||
rag.insert(f.read())
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||
graph_storage="Neo4JStorage",
|
||||
log_level="INFO",
|
||||
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
||||
)
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||
)
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||
)
|
||||
return rag
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||
)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||
rag.insert(f.read())
|
||||
|
||||
# Perform naive search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform local search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform global search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
)
|
||||
)
|
||||
|
||||
# Perform hybrid search
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,10 @@
|
||||
import os
|
||||
import time
|
||||
import asyncio
|
||||
from lightrag import LightRAG, QueryParam
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
# Working directory and the directory path for text files
|
||||
WORKING_DIR = "./dickens"
|
||||
@@ -12,17 +14,24 @@ TEXT_FILES_DIR = "/llm/mt"
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
# Initialize LightRAG
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="qwen2.5:3b-instruct-max-context",
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
|
||||
),
|
||||
)
|
||||
|
||||
async def initialize_rag():
|
||||
# Initialize LightRAG
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete,
|
||||
llm_model_name="qwen2.5:3b-instruct-max-context",
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
max_token_size=8192,
|
||||
func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
|
||||
),
|
||||
)
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
# Read all .txt files from the TEXT_FILES_DIR directory
|
||||
texts = []
|
||||
@@ -47,58 +56,66 @@ def insert_texts_with_retry(rag, texts, retries=3, delay=5):
|
||||
raise RuntimeError("Failed to insert texts after multiple retries.")
|
||||
|
||||
|
||||
insert_texts_with_retry(rag, texts)
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
|
||||
# Perform different types of queries and handle potential errors
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
insert_texts_with_retry(rag, texts)
|
||||
|
||||
# Perform different types of queries and handle potential errors
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||
)
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error performing naive search: {e}")
|
||||
except Exception as e:
|
||||
print(f"Error performing naive search: {e}")
|
||||
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||
)
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error performing local search: {e}")
|
||||
except Exception as e:
|
||||
print(f"Error performing local search: {e}")
|
||||
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="global"),
|
||||
)
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error performing global search: {e}")
|
||||
except Exception as e:
|
||||
print(f"Error performing global search: {e}")
|
||||
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||
try:
|
||||
print(
|
||||
rag.query(
|
||||
"What are the top themes in this story?",
|
||||
param=QueryParam(mode="hybrid"),
|
||||
)
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Error performing hybrid search: {e}")
|
||||
except Exception as e:
|
||||
print(f"Error performing hybrid search: {e}")
|
||||
|
||||
# Function to clear VRAM resources
|
||||
def clear_vram():
|
||||
os.system("sudo nvidia-smi --gpu-reset")
|
||||
|
||||
# Regularly clear VRAM to prevent overflow
|
||||
clear_vram_interval = 3600 # Clear once every hour
|
||||
start_time = time.time()
|
||||
|
||||
while True:
|
||||
current_time = time.time()
|
||||
if current_time - start_time > clear_vram_interval:
|
||||
clear_vram()
|
||||
start_time = current_time
|
||||
time.sleep(60) # Check the time every minute
|
||||
|
||||
|
||||
# Function to clear VRAM resources
|
||||
def clear_vram():
|
||||
os.system("sudo nvidia-smi --gpu-reset")
|
||||
|
||||
|
||||
# Regularly clear VRAM to prevent overflow
|
||||
clear_vram_interval = 3600 # Clear once every hour
|
||||
start_time = time.time()
|
||||
|
||||
while True:
|
||||
current_time = time.time()
|
||||
if current_time - start_time > clear_vram_interval:
|
||||
clear_vram()
|
||||
start_time = current_time
|
||||
time.sleep(60) # Check the time every minute
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -392,6 +392,7 @@ class LightRAG:
|
||||
namespace=make_namespace(
|
||||
self.namespace_prefix, NameSpace.KV_STORE_LLM_RESPONSE_CACHE
|
||||
),
|
||||
global_config=asdict(self),
|
||||
embedding_func=self.embedding_func,
|
||||
)
|
||||
|
||||
@@ -949,17 +950,21 @@ class LightRAG:
|
||||
pipeline_status["latest_message"] = log_message
|
||||
pipeline_status["history_messages"].append(log_message)
|
||||
|
||||
def insert_custom_kg(self, custom_kg: dict[str, Any]) -> None:
|
||||
def insert_custom_kg(
|
||||
self, custom_kg: dict[str, Any], full_doc_id: str = None
|
||||
) -> None:
|
||||
loop = always_get_an_event_loop()
|
||||
loop.run_until_complete(self.ainsert_custom_kg(custom_kg))
|
||||
loop.run_until_complete(self.ainsert_custom_kg(custom_kg, full_doc_id))
|
||||
|
||||
async def ainsert_custom_kg(self, custom_kg: dict[str, Any]) -> None:
|
||||
async def ainsert_custom_kg(
|
||||
self, custom_kg: dict[str, Any], full_doc_id: str = None
|
||||
) -> None:
|
||||
update_storage = False
|
||||
try:
|
||||
# Insert chunks into vector storage
|
||||
all_chunks_data: dict[str, dict[str, str]] = {}
|
||||
chunk_to_source_map: dict[str, str] = {}
|
||||
for chunk_data in custom_kg.get("chunks", {}):
|
||||
for chunk_data in custom_kg.get("chunks", []):
|
||||
chunk_content = self.clean_text(chunk_data["content"])
|
||||
source_id = chunk_data["source_id"]
|
||||
tokens = len(
|
||||
@@ -979,7 +984,9 @@ class LightRAG:
|
||||
"source_id": source_id,
|
||||
"tokens": tokens,
|
||||
"chunk_order_index": chunk_order_index,
|
||||
"full_doc_id": source_id,
|
||||
"full_doc_id": full_doc_id
|
||||
if full_doc_id is not None
|
||||
else source_id,
|
||||
"status": DocStatus.PROCESSED,
|
||||
}
|
||||
all_chunks_data[chunk_id] = chunk_entry
|
||||
@@ -987,9 +994,10 @@ class LightRAG:
|
||||
update_storage = True
|
||||
|
||||
if all_chunks_data:
|
||||
await self.chunks_vdb.upsert(all_chunks_data)
|
||||
if all_chunks_data:
|
||||
await self.text_chunks.upsert(all_chunks_data)
|
||||
await asyncio.gather(
|
||||
self.chunks_vdb.upsert(all_chunks_data),
|
||||
self.text_chunks.upsert(all_chunks_data),
|
||||
)
|
||||
|
||||
# Insert entities into knowledge graph
|
||||
all_entities_data: list[dict[str, str]] = []
|
||||
@@ -997,7 +1005,6 @@ class LightRAG:
|
||||
entity_name = entity_data["entity_name"]
|
||||
entity_type = entity_data.get("entity_type", "UNKNOWN")
|
||||
description = entity_data.get("description", "No description provided")
|
||||
# source_id = entity_data["source_id"]
|
||||
source_chunk_id = entity_data.get("source_id", "UNKNOWN")
|
||||
source_id = chunk_to_source_map.get(source_chunk_id, "UNKNOWN")
|
||||
|
||||
@@ -1029,7 +1036,6 @@ class LightRAG:
|
||||
description = relationship_data["description"]
|
||||
keywords = relationship_data["keywords"]
|
||||
weight = relationship_data.get("weight", 1.0)
|
||||
# source_id = relationship_data["source_id"]
|
||||
source_chunk_id = relationship_data.get("source_id", "UNKNOWN")
|
||||
source_id = chunk_to_source_map.get(source_chunk_id, "UNKNOWN")
|
||||
|
||||
@@ -1069,34 +1075,43 @@ class LightRAG:
|
||||
"tgt_id": tgt_id,
|
||||
"description": description,
|
||||
"keywords": keywords,
|
||||
"source_id": source_id,
|
||||
"weight": weight,
|
||||
}
|
||||
all_relationships_data.append(edge_data)
|
||||
update_storage = True
|
||||
|
||||
# Insert entities into vector storage if needed
|
||||
# Insert entities into vector storage with consistent format
|
||||
data_for_vdb = {
|
||||
compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
|
||||
"content": dp["entity_name"] + dp["description"],
|
||||
"content": dp["entity_name"] + "\n" + dp["description"],
|
||||
"entity_name": dp["entity_name"],
|
||||
"source_id": dp["source_id"],
|
||||
"description": dp["description"],
|
||||
"entity_type": dp["entity_type"],
|
||||
}
|
||||
for dp in all_entities_data
|
||||
}
|
||||
await self.entities_vdb.upsert(data_for_vdb)
|
||||
|
||||
# Insert relationships into vector storage if needed
|
||||
# Insert relationships into vector storage with consistent format
|
||||
data_for_vdb = {
|
||||
compute_mdhash_id(dp["src_id"] + dp["tgt_id"], prefix="rel-"): {
|
||||
"src_id": dp["src_id"],
|
||||
"tgt_id": dp["tgt_id"],
|
||||
"content": dp["keywords"]
|
||||
+ dp["src_id"]
|
||||
+ dp["tgt_id"]
|
||||
+ dp["description"],
|
||||
"source_id": dp["source_id"],
|
||||
"content": f"{dp['keywords']}\t{dp['src_id']}\n{dp['tgt_id']}\n{dp['description']}",
|
||||
"keywords": dp["keywords"],
|
||||
"description": dp["description"],
|
||||
"weight": dp["weight"],
|
||||
}
|
||||
for dp in all_relationships_data
|
||||
}
|
||||
await self.relationships_vdb.upsert(data_for_vdb)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in ainsert_custom_kg: {e}")
|
||||
raise
|
||||
finally:
|
||||
if update_storage:
|
||||
await self._insert_done()
|
||||
@@ -1412,17 +1427,19 @@ class LightRAG:
|
||||
# 3. Before deleting, check the related entities and relationships for these chunks
|
||||
for chunk_id in chunk_ids:
|
||||
# Check entities
|
||||
entities_storage = await self.entities_vdb.client_storage
|
||||
entities = [
|
||||
dp
|
||||
for dp in self.entities_vdb.client_storage["data"]
|
||||
for dp in entities_storage["data"]
|
||||
if chunk_id in dp.get("source_id")
|
||||
]
|
||||
logger.debug(f"Chunk {chunk_id} has {len(entities)} related entities")
|
||||
|
||||
# Check relationships
|
||||
relationships_storage = await self.relationships_vdb.client_storage
|
||||
relations = [
|
||||
dp
|
||||
for dp in self.relationships_vdb.client_storage["data"]
|
||||
for dp in relationships_storage["data"]
|
||||
if chunk_id in dp.get("source_id")
|
||||
]
|
||||
logger.debug(f"Chunk {chunk_id} has {len(relations)} related relations")
|
||||
@@ -1486,7 +1503,9 @@ class LightRAG:
|
||||
for entity in entities_to_delete:
|
||||
await self.entities_vdb.delete_entity(entity)
|
||||
logger.debug(f"Deleted entity {entity} from vector DB")
|
||||
self.chunk_entity_relation_graph.remove_nodes(list(entities_to_delete))
|
||||
await self.chunk_entity_relation_graph.remove_nodes(
|
||||
list(entities_to_delete)
|
||||
)
|
||||
logger.debug(f"Deleted {len(entities_to_delete)} entities from graph")
|
||||
|
||||
# Update entities
|
||||
@@ -1505,7 +1524,7 @@ class LightRAG:
|
||||
rel_id_1 = compute_mdhash_id(tgt + src, prefix="rel-")
|
||||
await self.relationships_vdb.delete([rel_id_0, rel_id_1])
|
||||
logger.debug(f"Deleted relationship {src}-{tgt} from vector DB")
|
||||
self.chunk_entity_relation_graph.remove_edges(
|
||||
await self.chunk_entity_relation_graph.remove_edges(
|
||||
list(relationships_to_delete)
|
||||
)
|
||||
logger.debug(
|
||||
@@ -1536,9 +1555,10 @@ class LightRAG:
|
||||
|
||||
async def process_data(data_type, vdb, chunk_id):
|
||||
# Check data (entities or relationships)
|
||||
storage = await vdb.client_storage
|
||||
data_with_chunk = [
|
||||
dp
|
||||
for dp in vdb.client_storage["data"]
|
||||
for dp in storage["data"]
|
||||
if chunk_id in (dp.get("source_id") or "").split(GRAPH_FIELD_SEP)
|
||||
]
|
||||
|
||||
@@ -1744,3 +1764,461 @@ class LightRAG:
|
||||
def clear_cache(self, modes: list[str] | None = None) -> None:
|
||||
"""Synchronous version of aclear_cache."""
|
||||
return always_get_an_event_loop().run_until_complete(self.aclear_cache(modes))
|
||||
|
||||
async def aedit_entity(
|
||||
self, entity_name: str, updated_data: dict[str, str], allow_rename: bool = True
|
||||
) -> dict[str, Any]:
|
||||
"""Asynchronously edit entity information.
|
||||
|
||||
Updates entity information in the knowledge graph and re-embeds the entity in the vector database.
|
||||
|
||||
Args:
|
||||
entity_name: Name of the entity to edit
|
||||
updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "entity_type": "new type"}
|
||||
allow_rename: Whether to allow entity renaming, defaults to True
|
||||
|
||||
Returns:
|
||||
Dictionary containing updated entity information
|
||||
"""
|
||||
try:
|
||||
# 1. Get current entity information
|
||||
node_data = await self.chunk_entity_relation_graph.get_node(entity_name)
|
||||
if not node_data:
|
||||
raise ValueError(f"Entity '{entity_name}' does not exist")
|
||||
|
||||
# Check if entity is being renamed
|
||||
new_entity_name = updated_data.get("entity_name", entity_name)
|
||||
is_renaming = new_entity_name != entity_name
|
||||
|
||||
# If renaming, check if new name already exists
|
||||
if is_renaming:
|
||||
if not allow_rename:
|
||||
raise ValueError(
|
||||
"Entity renaming is not allowed. Set allow_rename=True to enable this feature"
|
||||
)
|
||||
|
||||
existing_node = await self.chunk_entity_relation_graph.get_node(
|
||||
new_entity_name
|
||||
)
|
||||
if existing_node:
|
||||
raise ValueError(
|
||||
f"Entity name '{new_entity_name}' already exists, cannot rename"
|
||||
)
|
||||
|
||||
# 2. Update entity information in the graph
|
||||
new_node_data = {**node_data, **updated_data}
|
||||
if "entity_name" in new_node_data:
|
||||
del new_node_data[
|
||||
"entity_name"
|
||||
] # Node data should not contain entity_name field
|
||||
|
||||
# If renaming entity
|
||||
if is_renaming:
|
||||
logger.info(f"Renaming entity '{entity_name}' to '{new_entity_name}'")
|
||||
|
||||
# Create new entity
|
||||
await self.chunk_entity_relation_graph.upsert_node(
|
||||
new_entity_name, new_node_data
|
||||
)
|
||||
|
||||
# Get all edges related to the original entity
|
||||
edges = await self.chunk_entity_relation_graph.get_node_edges(
|
||||
entity_name
|
||||
)
|
||||
if edges:
|
||||
# Recreate edges for the new entity
|
||||
for source, target in edges:
|
||||
edge_data = await self.chunk_entity_relation_graph.get_edge(
|
||||
source, target
|
||||
)
|
||||
if edge_data:
|
||||
if source == entity_name:
|
||||
await self.chunk_entity_relation_graph.upsert_edge(
|
||||
new_entity_name, target, edge_data
|
||||
)
|
||||
else: # target == entity_name
|
||||
await self.chunk_entity_relation_graph.upsert_edge(
|
||||
source, new_entity_name, edge_data
|
||||
)
|
||||
|
||||
# Delete old entity
|
||||
await self.chunk_entity_relation_graph.delete_node(entity_name)
|
||||
|
||||
# Delete old entity record from vector database
|
||||
old_entity_id = compute_mdhash_id(entity_name, prefix="ent-")
|
||||
await self.entities_vdb.delete([old_entity_id])
|
||||
|
||||
# Update working entity name to new name
|
||||
entity_name = new_entity_name
|
||||
else:
|
||||
# If not renaming, directly update node data
|
||||
await self.chunk_entity_relation_graph.upsert_node(
|
||||
entity_name, new_node_data
|
||||
)
|
||||
|
||||
# 3. Recalculate entity's vector representation and update vector database
|
||||
description = new_node_data.get("description", "")
|
||||
source_id = new_node_data.get("source_id", "")
|
||||
entity_type = new_node_data.get("entity_type", "")
|
||||
content = entity_name + "\n" + description
|
||||
|
||||
# Calculate entity ID
|
||||
entity_id = compute_mdhash_id(entity_name, prefix="ent-")
|
||||
|
||||
# Prepare data for vector database update
|
||||
entity_data = {
|
||||
entity_id: {
|
||||
"content": content,
|
||||
"entity_name": entity_name,
|
||||
"source_id": source_id,
|
||||
"description": description,
|
||||
"entity_type": entity_type,
|
||||
}
|
||||
}
|
||||
|
||||
# Update vector database
|
||||
await self.entities_vdb.upsert(entity_data)
|
||||
|
||||
# 4. Save changes
|
||||
await self._edit_entity_done()
|
||||
|
||||
logger.info(f"Entity '{entity_name}' successfully updated")
|
||||
return await self.get_entity_info(entity_name, include_vector_data=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error while editing entity '{entity_name}': {e}")
|
||||
raise
|
||||
|
||||
def edit_entity(
|
||||
self, entity_name: str, updated_data: dict[str, str], allow_rename: bool = True
|
||||
) -> dict[str, Any]:
|
||||
"""Synchronously edit entity information.
|
||||
|
||||
Updates entity information in the knowledge graph and re-embeds the entity in the vector database.
|
||||
|
||||
Args:
|
||||
entity_name: Name of the entity to edit
|
||||
updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "entity_type": "new type"}
|
||||
allow_rename: Whether to allow entity renaming, defaults to True
|
||||
|
||||
Returns:
|
||||
Dictionary containing updated entity information
|
||||
"""
|
||||
loop = always_get_an_event_loop()
|
||||
return loop.run_until_complete(
|
||||
self.aedit_entity(entity_name, updated_data, allow_rename)
|
||||
)
|
||||
|
||||
async def _edit_entity_done(self) -> None:
|
||||
"""Callback after entity editing is complete, ensures updates are persisted"""
|
||||
await asyncio.gather(
|
||||
*[
|
||||
cast(StorageNameSpace, storage_inst).index_done_callback()
|
||||
for storage_inst in [ # type: ignore
|
||||
self.entities_vdb,
|
||||
self.chunk_entity_relation_graph,
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
async def aedit_relation(
|
||||
self, source_entity: str, target_entity: str, updated_data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Asynchronously edit relation information.
|
||||
|
||||
Updates relation (edge) information in the knowledge graph and re-embeds the relation in the vector database.
|
||||
|
||||
Args:
|
||||
source_entity: Name of the source entity
|
||||
target_entity: Name of the target entity
|
||||
updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "keywords": "new keywords"}
|
||||
|
||||
Returns:
|
||||
Dictionary containing updated relation information
|
||||
"""
|
||||
try:
|
||||
# 1. Get current relation information
|
||||
edge_data = await self.chunk_entity_relation_graph.get_edge(
|
||||
source_entity, target_entity
|
||||
)
|
||||
if not edge_data:
|
||||
raise ValueError(
|
||||
f"Relation from '{source_entity}' to '{target_entity}' does not exist"
|
||||
)
|
||||
|
||||
# 2. Update relation information in the graph
|
||||
new_edge_data = {**edge_data, **updated_data}
|
||||
await self.chunk_entity_relation_graph.upsert_edge(
|
||||
source_entity, target_entity, new_edge_data
|
||||
)
|
||||
|
||||
# 3. Recalculate relation's vector representation and update vector database
|
||||
description = new_edge_data.get("description", "")
|
||||
keywords = new_edge_data.get("keywords", "")
|
||||
source_id = new_edge_data.get("source_id", "")
|
||||
weight = float(new_edge_data.get("weight", 1.0))
|
||||
|
||||
# Create content for embedding
|
||||
content = f"{keywords}\t{source_entity}\n{target_entity}\n{description}"
|
||||
|
||||
# Calculate relation ID
|
||||
relation_id = compute_mdhash_id(
|
||||
source_entity + target_entity, prefix="rel-"
|
||||
)
|
||||
|
||||
# Prepare data for vector database update
|
||||
relation_data = {
|
||||
relation_id: {
|
||||
"content": content,
|
||||
"src_id": source_entity,
|
||||
"tgt_id": target_entity,
|
||||
"source_id": source_id,
|
||||
"description": description,
|
||||
"keywords": keywords,
|
||||
"weight": weight,
|
||||
}
|
||||
}
|
||||
|
||||
# Update vector database
|
||||
await self.relationships_vdb.upsert(relation_data)
|
||||
|
||||
# 4. Save changes
|
||||
await self._edit_relation_done()
|
||||
|
||||
logger.info(
|
||||
f"Relation from '{source_entity}' to '{target_entity}' successfully updated"
|
||||
)
|
||||
return await self.get_relation_info(
|
||||
source_entity, target_entity, include_vector_data=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error while editing relation from '{source_entity}' to '{target_entity}': {e}"
|
||||
)
|
||||
raise
|
||||
|
||||
def edit_relation(
|
||||
self, source_entity: str, target_entity: str, updated_data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Synchronously edit relation information.
|
||||
|
||||
Updates relation (edge) information in the knowledge graph and re-embeds the relation in the vector database.
|
||||
|
||||
Args:
|
||||
source_entity: Name of the source entity
|
||||
target_entity: Name of the target entity
|
||||
updated_data: Dictionary containing updated attributes, e.g. {"description": "new description", "keywords": "keywords"}
|
||||
|
||||
Returns:
|
||||
Dictionary containing updated relation information
|
||||
"""
|
||||
loop = always_get_an_event_loop()
|
||||
return loop.run_until_complete(
|
||||
self.aedit_relation(source_entity, target_entity, updated_data)
|
||||
)
|
||||
|
||||
async def _edit_relation_done(self) -> None:
|
||||
"""Callback after relation editing is complete, ensures updates are persisted"""
|
||||
await asyncio.gather(
|
||||
*[
|
||||
cast(StorageNameSpace, storage_inst).index_done_callback()
|
||||
for storage_inst in [ # type: ignore
|
||||
self.relationships_vdb,
|
||||
self.chunk_entity_relation_graph,
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
async def acreate_entity(
|
||||
self, entity_name: str, entity_data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Asynchronously create a new entity.
|
||||
|
||||
Creates a new entity in the knowledge graph and adds it to the vector database.
|
||||
|
||||
Args:
|
||||
entity_name: Name of the new entity
|
||||
entity_data: Dictionary containing entity attributes, e.g. {"description": "description", "entity_type": "type"}
|
||||
|
||||
Returns:
|
||||
Dictionary containing created entity information
|
||||
"""
|
||||
try:
|
||||
# Check if entity already exists
|
||||
existing_node = await self.chunk_entity_relation_graph.get_node(entity_name)
|
||||
if existing_node:
|
||||
raise ValueError(f"Entity '{entity_name}' already exists")
|
||||
|
||||
# Prepare node data with defaults if missing
|
||||
node_data = {
|
||||
"entity_type": entity_data.get("entity_type", "UNKNOWN"),
|
||||
"description": entity_data.get("description", ""),
|
||||
"source_id": entity_data.get("source_id", "manual"),
|
||||
}
|
||||
|
||||
# Add entity to knowledge graph
|
||||
await self.chunk_entity_relation_graph.upsert_node(entity_name, node_data)
|
||||
|
||||
# Prepare content for entity
|
||||
description = node_data.get("description", "")
|
||||
source_id = node_data.get("source_id", "")
|
||||
entity_type = node_data.get("entity_type", "")
|
||||
content = entity_name + "\n" + description
|
||||
|
||||
# Calculate entity ID
|
||||
entity_id = compute_mdhash_id(entity_name, prefix="ent-")
|
||||
|
||||
# Prepare data for vector database update
|
||||
entity_data_for_vdb = {
|
||||
entity_id: {
|
||||
"content": content,
|
||||
"entity_name": entity_name,
|
||||
"source_id": source_id,
|
||||
"description": description,
|
||||
"entity_type": entity_type,
|
||||
}
|
||||
}
|
||||
|
||||
# Update vector database
|
||||
await self.entities_vdb.upsert(entity_data_for_vdb)
|
||||
|
||||
# Save changes
|
||||
await self._edit_entity_done()
|
||||
|
||||
logger.info(f"Entity '{entity_name}' successfully created")
|
||||
return await self.get_entity_info(entity_name, include_vector_data=True)
|
||||
except Exception as e:
|
||||
logger.error(f"Error while creating entity '{entity_name}': {e}")
|
||||
raise
|
||||
|
||||
def create_entity(
|
||||
self, entity_name: str, entity_data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Synchronously create a new entity.
|
||||
|
||||
Creates a new entity in the knowledge graph and adds it to the vector database.
|
||||
|
||||
Args:
|
||||
entity_name: Name of the new entity
|
||||
entity_data: Dictionary containing entity attributes, e.g. {"description": "description", "entity_type": "type"}
|
||||
|
||||
Returns:
|
||||
Dictionary containing created entity information
|
||||
"""
|
||||
loop = always_get_an_event_loop()
|
||||
return loop.run_until_complete(self.acreate_entity(entity_name, entity_data))
|
||||
|
||||
async def acreate_relation(
|
||||
self, source_entity: str, target_entity: str, relation_data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Asynchronously create a new relation between entities.
|
||||
|
||||
Creates a new relation (edge) in the knowledge graph and adds it to the vector database.
|
||||
|
||||
Args:
|
||||
source_entity: Name of the source entity
|
||||
target_entity: Name of the target entity
|
||||
relation_data: Dictionary containing relation attributes, e.g. {"description": "description", "keywords": "keywords"}
|
||||
|
||||
Returns:
|
||||
Dictionary containing created relation information
|
||||
"""
|
||||
try:
|
||||
# Check if both entities exist
|
||||
source_exists = await self.chunk_entity_relation_graph.has_node(
|
||||
source_entity
|
||||
)
|
||||
target_exists = await self.chunk_entity_relation_graph.has_node(
|
||||
target_entity
|
||||
)
|
||||
|
||||
if not source_exists:
|
||||
raise ValueError(f"Source entity '{source_entity}' does not exist")
|
||||
if not target_exists:
|
||||
raise ValueError(f"Target entity '{target_entity}' does not exist")
|
||||
|
||||
# Check if relation already exists
|
||||
existing_edge = await self.chunk_entity_relation_graph.get_edge(
|
||||
source_entity, target_entity
|
||||
)
|
||||
if existing_edge:
|
||||
raise ValueError(
|
||||
f"Relation from '{source_entity}' to '{target_entity}' already exists"
|
||||
)
|
||||
|
||||
# Prepare edge data with defaults if missing
|
||||
edge_data = {
|
||||
"description": relation_data.get("description", ""),
|
||||
"keywords": relation_data.get("keywords", ""),
|
||||
"source_id": relation_data.get("source_id", "manual"),
|
||||
"weight": float(relation_data.get("weight", 1.0)),
|
||||
}
|
||||
|
||||
# Add relation to knowledge graph
|
||||
await self.chunk_entity_relation_graph.upsert_edge(
|
||||
source_entity, target_entity, edge_data
|
||||
)
|
||||
|
||||
# Prepare content for embedding
|
||||
description = edge_data.get("description", "")
|
||||
keywords = edge_data.get("keywords", "")
|
||||
source_id = edge_data.get("source_id", "")
|
||||
weight = edge_data.get("weight", 1.0)
|
||||
|
||||
# Create content for embedding
|
||||
content = f"{keywords}\t{source_entity}\n{target_entity}\n{description}"
|
||||
|
||||
# Calculate relation ID
|
||||
relation_id = compute_mdhash_id(
|
||||
source_entity + target_entity, prefix="rel-"
|
||||
)
|
||||
|
||||
# Prepare data for vector database update
|
||||
relation_data_for_vdb = {
|
||||
relation_id: {
|
||||
"content": content,
|
||||
"src_id": source_entity,
|
||||
"tgt_id": target_entity,
|
||||
"source_id": source_id,
|
||||
"description": description,
|
||||
"keywords": keywords,
|
||||
"weight": weight,
|
||||
}
|
||||
}
|
||||
|
||||
# Update vector database
|
||||
await self.relationships_vdb.upsert(relation_data_for_vdb)
|
||||
|
||||
# Save changes
|
||||
await self._edit_relation_done()
|
||||
|
||||
logger.info(
|
||||
f"Relation from '{source_entity}' to '{target_entity}' successfully created"
|
||||
)
|
||||
return await self.get_relation_info(
|
||||
source_entity, target_entity, include_vector_data=True
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error while creating relation from '{source_entity}' to '{target_entity}': {e}"
|
||||
)
|
||||
raise
|
||||
|
||||
def create_relation(
|
||||
self, source_entity: str, target_entity: str, relation_data: dict[str, Any]
|
||||
) -> dict[str, Any]:
|
||||
"""Synchronously create a new relation between entities.
|
||||
|
||||
Creates a new relation (edge) in the knowledge graph and adds it to the vector database.
|
||||
|
||||
Args:
|
||||
source_entity: Name of the source entity
|
||||
target_entity: Name of the target entity
|
||||
relation_data: Dictionary containing relation attributes, e.g. {"description": "description", "keywords": "keywords"}
|
||||
|
||||
Returns:
|
||||
Dictionary containing created relation information
|
||||
"""
|
||||
loop = always_get_an_event_loop()
|
||||
return loop.run_until_complete(
|
||||
self.acreate_relation(source_entity, target_entity, relation_data)
|
||||
)
|
||||
|
@@ -1,8 +1,10 @@
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import asyncio
|
||||
|
||||
from lightrag import LightRAG
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
|
||||
def insert_text(rag, file_path):
|
||||
@@ -29,6 +31,21 @@ WORKING_DIR = f"../{cls}"
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(working_dir=WORKING_DIR)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
@@ -1,11 +1,13 @@
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import asyncio
|
||||
import numpy as np
|
||||
|
||||
from lightrag import LightRAG
|
||||
from lightrag.utils import EmbeddingFunc
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||
|
||||
|
||||
## For Upstage API
|
||||
@@ -60,12 +62,27 @@ WORKING_DIR = f"../{cls}"
|
||||
if not os.path.exists(WORKING_DIR):
|
||||
os.mkdir(WORKING_DIR)
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=4096, max_token_size=8192, func=embedding_func
|
||||
),
|
||||
)
|
||||
|
||||
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
||||
async def initialize_rag():
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=4096, max_token_size=8192, func=embedding_func
|
||||
),
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
await initialize_pipeline_status()
|
||||
|
||||
return rag
|
||||
|
||||
|
||||
def main():
|
||||
# Initialize RAG instance
|
||||
rag = asyncio.run(initialize_rag())
|
||||
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
Reference in New Issue
Block a user