fix demo
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -57,7 +57,7 @@ ignore_this.txt
|
|||||||
*.ignore.*
|
*.ignore.*
|
||||||
|
|
||||||
# Project-specific files
|
# Project-specific files
|
||||||
dickens/
|
dickens*/
|
||||||
book.txt
|
book.txt
|
||||||
lightrag-dev/
|
lightrag-dev/
|
||||||
gui/
|
gui/
|
||||||
|
225
README.md
225
README.md
@@ -102,33 +102,47 @@ Use the below Python snippet (in a script) to initialize LightRAG and perform qu
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed
|
from lightrag.llm.openai import gpt_4o_mini_complete, gpt_4o_complete, openai_embed
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir="your/path",
|
rag = LightRAG(
|
||||||
embedding_func=openai_embed,
|
working_dir="your/path",
|
||||||
llm_model_func=gpt_4o_mini_complete
|
embedding_func=openai_embed,
|
||||||
)
|
llm_model_func=gpt_4o_mini_complete
|
||||||
|
)
|
||||||
|
|
||||||
# Insert text
|
await rag.initialize_storages()
|
||||||
rag.insert("Your text")
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# Perform naive search
|
return rag
|
||||||
mode="naive"
|
|
||||||
# Perform local search
|
|
||||||
mode="local"
|
|
||||||
# Perform global search
|
|
||||||
mode="global"
|
|
||||||
# Perform hybrid search
|
|
||||||
mode="hybrid"
|
|
||||||
# Mix mode Integrates knowledge graph and vector retrieval.
|
|
||||||
mode="mix"
|
|
||||||
|
|
||||||
rag.query(
|
def main():
|
||||||
"What are the top themes in this story?",
|
# Initialize RAG instance
|
||||||
param=QueryParam(mode=mode)
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
# Insert text
|
||||||
|
rag.insert("Your text")
|
||||||
|
|
||||||
|
# Perform naive search
|
||||||
|
mode="naive"
|
||||||
|
# Perform local search
|
||||||
|
mode="local"
|
||||||
|
# Perform global search
|
||||||
|
mode="global"
|
||||||
|
# Perform hybrid search
|
||||||
|
mode="hybrid"
|
||||||
|
# Mix mode Integrates knowledge graph and vector retrieval.
|
||||||
|
mode="mix"
|
||||||
|
|
||||||
|
rag.query(
|
||||||
|
"What are the top themes in this story?",
|
||||||
|
param=QueryParam(mode=mode)
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
```
|
```
|
||||||
|
|
||||||
### Query Param
|
### Query Param
|
||||||
@@ -190,15 +204,21 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
|||||||
base_url="https://api.upstage.ai/v1/solar"
|
base_url="https://api.upstage.ai/v1/solar"
|
||||||
)
|
)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=llm_model_func,
|
working_dir=WORKING_DIR,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_func=llm_model_func,
|
||||||
embedding_dim=4096,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=4096,
|
||||||
func=embedding_func
|
max_token_size=8192,
|
||||||
|
func=embedding_func
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
```
|
```
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
@@ -210,10 +230,6 @@ rag = LightRAG(
|
|||||||
See `lightrag_hf_demo.py`
|
See `lightrag_hf_demo.py`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from lightrag.llm import hf_model_complete, hf_embed
|
|
||||||
from transformers import AutoModel, AutoTokenizer
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
|
|
||||||
# Initialize LightRAG with Hugging Face model
|
# Initialize LightRAG with Hugging Face model
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
@@ -242,9 +258,6 @@ If you want to use Ollama models, you need to pull model you plan to use and emb
|
|||||||
Then you only need to set LightRAG as follows:
|
Then you only need to set LightRAG as follows:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
|
|
||||||
# Initialize LightRAG with Ollama model
|
# Initialize LightRAG with Ollama model
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
@@ -325,20 +338,58 @@ LightRAG supports integration with LlamaIndex.
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
# Using LlamaIndex with direct OpenAI access
|
# Using LlamaIndex with direct OpenAI access
|
||||||
|
import asyncio
|
||||||
from lightrag import LightRAG
|
from lightrag import LightRAG
|
||||||
from lightrag.llm.llama_index_impl import llama_index_complete_if_cache, llama_index_embed
|
from lightrag.llm.llama_index_impl import llama_index_complete_if_cache, llama_index_embed
|
||||||
from llama_index.embeddings.openai import OpenAIEmbedding
|
from llama_index.embeddings.openai import OpenAIEmbedding
|
||||||
from llama_index.llms.openai import OpenAI
|
from llama_index.llms.openai import OpenAI
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir="your/path",
|
rag = LightRAG(
|
||||||
llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
|
working_dir="your/path",
|
||||||
embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
|
llm_model_func=llama_index_complete_if_cache, # LlamaIndex-compatible completion function
|
||||||
embedding_dim=1536,
|
embedding_func=EmbeddingFunc( # LlamaIndex-compatible embedding function
|
||||||
max_token_size=8192,
|
embedding_dim=1536,
|
||||||
func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
|
max_token_size=8192,
|
||||||
),
|
func=lambda texts: llama_index_embed(texts, embed_model=embed_model)
|
||||||
)
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
# Perform naive search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform local search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
```
|
```
|
||||||
|
|
||||||
#### For detailed documentation and examples, see:
|
#### For detailed documentation and examples, see:
|
||||||
@@ -353,11 +404,6 @@ rag = LightRAG(
|
|||||||
LightRAG now supports multi-turn dialogue through the conversation history feature. Here's how to use it:
|
LightRAG now supports multi-turn dialogue through the conversation history feature. Here's how to use it:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
|
|
||||||
# Initialize LightRAG
|
|
||||||
rag = LightRAG(working_dir=WORKING_DIR)
|
|
||||||
|
|
||||||
# Create conversation history
|
# Create conversation history
|
||||||
conversation_history = [
|
conversation_history = [
|
||||||
{"role": "user", "content": "What is the main character's attitude towards Christmas?"},
|
{"role": "user", "content": "What is the main character's attitude towards Christmas?"},
|
||||||
@@ -387,11 +433,6 @@ response = rag.query(
|
|||||||
LightRAG now supports custom prompts for fine-tuned control over the system's behavior. Here's how to use it:
|
LightRAG now supports custom prompts for fine-tuned control over the system's behavior. Here's how to use it:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
|
|
||||||
# Initialize LightRAG
|
|
||||||
rag = LightRAG(working_dir=WORKING_DIR)
|
|
||||||
|
|
||||||
# Create query parameters
|
# Create query parameters
|
||||||
query_param = QueryParam(
|
query_param = QueryParam(
|
||||||
mode="hybrid", # or other mode: "local", "global", "hybrid", "mix" and "naive"
|
mode="hybrid", # or other mode: "local", "global", "hybrid", "mix" and "naive"
|
||||||
@@ -456,16 +497,6 @@ rag.query_with_separate_keyword_extraction(
|
|||||||
<summary> <b>Insert Custom KG</b> </summary>
|
<summary> <b>Insert Custom KG</b> </summary>
|
||||||
|
|
||||||
```python
|
```python
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
custom_kg = {
|
custom_kg = {
|
||||||
"entities": [
|
"entities": [
|
||||||
{
|
{
|
||||||
@@ -534,6 +565,7 @@ rag = LightRAG(
|
|||||||
"insert_batch_size": 20 # Process 20 documents per batch
|
"insert_batch_size": 20 # Process 20 documents per batch
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in batches of 20
|
rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in batches of 20
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -560,27 +592,6 @@ rag.insert(["TEXT1", "TEXT2",...], ids=["ID_FOR_TEXT1", "ID_FOR_TEXT2"])
|
|||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary><b>Incremental Insert</b></summary>
|
|
||||||
|
|
||||||
```python
|
|
||||||
# Incremental Insert: Insert new documents into an existing LightRAG instance
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
with open("./newText.txt") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
```
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary><b>Insert using Pipeline</b></summary>
|
<summary><b>Insert using Pipeline</b></summary>
|
||||||
|
|
||||||
@@ -592,6 +603,7 @@ And using a routine to process news documents.
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
rag = LightRAG(..)
|
rag = LightRAG(..)
|
||||||
|
|
||||||
await rag.apipeline_enqueue_documents(input)
|
await rag.apipeline_enqueue_documents(input)
|
||||||
# Your routine in loop
|
# Your routine in loop
|
||||||
await rag.apipeline_process_enqueue_documents(input)
|
await rag.apipeline_process_enqueue_documents(input)
|
||||||
@@ -633,8 +645,6 @@ export NEO4J_PASSWORD="password"
|
|||||||
|
|
||||||
# Note: Default settings use NetworkX
|
# Note: Default settings use NetworkX
|
||||||
# Initialize LightRAG with Neo4J implementation.
|
# Initialize LightRAG with Neo4J implementation.
|
||||||
WORKING_DIR = "./local_neo4jWorkDir"
|
|
||||||
|
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||||
@@ -706,26 +716,26 @@ You can also install `faiss-gpu` if you have GPU support.
|
|||||||
|
|
||||||
- Here we are using `sentence-transformers` but you can also use `OpenAIEmbedding` model with `3072` dimensions.
|
- Here we are using `sentence-transformers` but you can also use `OpenAIEmbedding` model with `3072` dimensions.
|
||||||
|
|
||||||
```
|
```python
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||||
model = SentenceTransformer('all-MiniLM-L6-v2')
|
model = SentenceTransformer('all-MiniLM-L6-v2')
|
||||||
embeddings = model.encode(texts, convert_to_numpy=True)
|
embeddings = model.encode(texts, convert_to_numpy=True)
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
# Initialize LightRAG with the LLM model function and embedding function
|
# Initialize LightRAG with the LLM model function and embedding function
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=llm_model_func,
|
llm_model_func=llm_model_func,
|
||||||
embedding_func=EmbeddingFunc(
|
embedding_func=EmbeddingFunc(
|
||||||
embedding_dim=384,
|
embedding_dim=384,
|
||||||
max_token_size=8192,
|
max_token_size=8192,
|
||||||
func=embedding_func,
|
func=embedding_func,
|
||||||
),
|
),
|
||||||
vector_storage="FaissVectorDBStorage",
|
vector_storage="FaissVectorDBStorage",
|
||||||
vector_db_storage_cls_kwargs={
|
vector_db_storage_cls_kwargs={
|
||||||
"cosine_better_than_threshold": 0.3 # Your desired threshold
|
"cosine_better_than_threshold": 0.3 # Your desired threshold
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
@@ -733,17 +743,6 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
|||||||
## Delete
|
## Delete
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Delete Entity: Deleting entities by their names
|
# Delete Entity: Deleting entities by their names
|
||||||
rag.delete_by_entity("Project Gutenberg")
|
rag.delete_by_entity("Project Gutenberg")
|
||||||
|
|
||||||
|
@@ -10,7 +10,7 @@ import os
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
from lightrag.kg.postgres_impl import PostgreSQLDB, PGKVStorage
|
from lightrag.kg.postgres_impl import PostgreSQLDB, PGKVStorage
|
||||||
from lightrag.storage import JsonKVStorage
|
from lightrag.kg.json_kv_impl import JsonKVStorage
|
||||||
from lightrag.namespace import NameSpace
|
from lightrag.namespace import NameSpace
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
from fastapi import FastAPI, HTTPException, File, UploadFile
|
from fastapi import FastAPI, HTTPException, File, UploadFile
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
import os
|
import os
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
@@ -8,12 +9,12 @@ from typing import Optional
|
|||||||
import asyncio
|
import asyncio
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
import aiofiles
|
import aiofiles
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# Apply nest_asyncio to solve event loop issues
|
# Apply nest_asyncio to solve event loop issues
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
|
|
||||||
DEFAULT_RAG_DIR = "index_default"
|
DEFAULT_RAG_DIR = "index_default"
|
||||||
app = FastAPI(title="LightRAG API", description="API for RAG operations")
|
|
||||||
|
|
||||||
DEFAULT_INPUT_FILE = "book.txt"
|
DEFAULT_INPUT_FILE = "book.txt"
|
||||||
INPUT_FILE = os.environ.get("INPUT_FILE", f"{DEFAULT_INPUT_FILE}")
|
INPUT_FILE = os.environ.get("INPUT_FILE", f"{DEFAULT_INPUT_FILE}")
|
||||||
@@ -28,23 +29,41 @@ if not os.path.exists(WORKING_DIR):
|
|||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
|
|
||||||
rag = LightRAG(
|
async def init():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=ollama_model_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="gemma2:9b",
|
llm_model_func=ollama_model_complete,
|
||||||
llm_model_max_async=4,
|
llm_model_name="gemma2:9b",
|
||||||
llm_model_max_token_size=8192,
|
llm_model_max_async=4,
|
||||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
|
llm_model_max_token_size=8192,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 8192}},
|
||||||
embedding_dim=768,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=768,
|
||||||
func=lambda texts: ollama_embed(
|
max_token_size=8192,
|
||||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
func=lambda texts: ollama_embed(
|
||||||
|
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
)
|
||||||
|
|
||||||
|
# Add initialization code
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def lifespan(app: FastAPI):
|
||||||
|
global rag
|
||||||
|
rag = await init()
|
||||||
|
print("done!")
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
app = FastAPI(
|
||||||
|
title="LightRAG API", description="API for RAG operations", lifespan=lifespan
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Data models
|
# Data models
|
||||||
class QueryRequest(BaseModel):
|
class QueryRequest(BaseModel):
|
||||||
query: str
|
query: str
|
||||||
|
@@ -1,4 +1,5 @@
|
|||||||
from fastapi import FastAPI, HTTPException, File, UploadFile
|
from fastapi import FastAPI, HTTPException, File, UploadFile
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
import os
|
import os
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
@@ -8,6 +9,7 @@ import numpy as np
|
|||||||
from typing import Optional
|
from typing import Optional
|
||||||
import asyncio
|
import asyncio
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# Apply nest_asyncio to solve event loop issues
|
# Apply nest_asyncio to solve event loop issues
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
@@ -71,16 +73,35 @@ async def get_embedding_dim():
|
|||||||
|
|
||||||
|
|
||||||
# Initialize RAG instance
|
# Initialize RAG instance
|
||||||
rag = LightRAG(
|
async def init():
|
||||||
working_dir=WORKING_DIR,
|
embedding_dimension = await get_embedding_dim()
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
|
||||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def lifespan(app: FastAPI):
|
||||||
|
global rag
|
||||||
|
rag = await init()
|
||||||
|
print("done!")
|
||||||
|
yield
|
||||||
|
|
||||||
|
|
||||||
|
app = FastAPI(
|
||||||
|
title="LightRAG API", description="API for RAG operations", lifespan=lifespan
|
||||||
|
)
|
||||||
|
|
||||||
# Data models
|
# Data models
|
||||||
|
|
||||||
|
@@ -1,101 +0,0 @@
|
|||||||
import os
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
import numpy as np
|
|
||||||
import asyncio
|
|
||||||
import nest_asyncio
|
|
||||||
|
|
||||||
# Apply nest_asyncio to solve event loop issues
|
|
||||||
nest_asyncio.apply()
|
|
||||||
|
|
||||||
DEFAULT_RAG_DIR = "index_default"
|
|
||||||
|
|
||||||
# Configure working directory
|
|
||||||
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
|
|
||||||
print(f"WORKING_DIR: {WORKING_DIR}")
|
|
||||||
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o-mini")
|
|
||||||
print(f"LLM_MODEL: {LLM_MODEL}")
|
|
||||||
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small")
|
|
||||||
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
|
||||||
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
|
||||||
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
|
||||||
BASE_URL = os.environ.get("BASE_URL", "https://api.openai.com/v1")
|
|
||||||
print(f"BASE_URL: {BASE_URL}")
|
|
||||||
API_KEY = os.environ.get("API_KEY", "xxxxxxxx")
|
|
||||||
print(f"API_KEY: {API_KEY}")
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
|
|
||||||
# LLM model function
|
|
||||||
|
|
||||||
|
|
||||||
async def llm_model_func(
|
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
|
||||||
) -> str:
|
|
||||||
return await openai_complete_if_cache(
|
|
||||||
model=LLM_MODEL,
|
|
||||||
prompt=prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
base_url=BASE_URL,
|
|
||||||
api_key=API_KEY,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# Embedding function
|
|
||||||
|
|
||||||
|
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
|
||||||
return await openai_embed(
|
|
||||||
texts=texts,
|
|
||||||
model=EMBEDDING_MODEL,
|
|
||||||
base_url=BASE_URL,
|
|
||||||
api_key=API_KEY,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def get_embedding_dim():
|
|
||||||
test_text = ["This is a test sentence."]
|
|
||||||
embedding = await embedding_func(test_text)
|
|
||||||
embedding_dim = embedding.shape[1]
|
|
||||||
print(f"{embedding_dim=}")
|
|
||||||
return embedding_dim
|
|
||||||
|
|
||||||
|
|
||||||
# Initialize RAG instance
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
|
||||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
|
@@ -16,6 +16,7 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
|
||||||
print(os.getcwd())
|
print(os.getcwd())
|
||||||
@@ -113,6 +114,9 @@ async def init():
|
|||||||
vector_storage="OracleVectorDBStorage",
|
vector_storage="OracleVectorDBStorage",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
return rag
|
return rag
|
||||||
|
|
||||||
|
|
||||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
import logging
|
import logging
|
||||||
from openai import AzureOpenAI
|
from openai import AzureOpenAI
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
@@ -90,6 +91,9 @@ rag = LightRAG(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
rag.initialize_storages()
|
||||||
|
initialize_pipeline_status()
|
||||||
|
|
||||||
book1 = open("./book_1.txt", encoding="utf-8")
|
book1 = open("./book_1.txt", encoding="utf-8")
|
||||||
book2 = open("./book_2.txt", encoding="utf-8")
|
book2 = open("./book_2.txt", encoding="utf-8")
|
||||||
|
|
||||||
|
@@ -8,6 +8,12 @@ import logging
|
|||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.bedrock import bedrock_complete, bedrock_embed
|
from lightrag.llm.bedrock import bedrock_complete, bedrock_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
logging.getLogger("aiobotocore").setLevel(logging.WARNING)
|
logging.getLogger("aiobotocore").setLevel(logging.WARNING)
|
||||||
|
|
||||||
@@ -15,22 +21,31 @@ WORKING_DIR = "./dickens"
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=bedrock_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
|
llm_model_func=bedrock_complete,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
|
||||||
embedding_dim=1024, max_token_size=8192, func=bedrock_embed
|
embedding_func=EmbeddingFunc(
|
||||||
),
|
embedding_dim=1024, max_token_size=8192, func=bedrock_embed
|
||||||
)
|
),
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
for mode in ["naive", "local", "global", "hybrid"]:
|
|
||||||
print("\n+-" + "-" * len(mode) + "-+")
|
|
||||||
print(f"| {mode.capitalize()} |")
|
|
||||||
print("+-" + "-" * len(mode) + "-+\n")
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
def main():
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
for mode in ["naive", "local", "global", "hybrid"]:
|
||||||
|
print("\n+-" + "-" * len(mode) + "-+")
|
||||||
|
print(f"| {mode.capitalize()} |")
|
||||||
|
print("+-" + "-" * len(mode) + "-+\n")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
|
||||||
|
)
|
||||||
|
@@ -8,6 +8,12 @@ from dotenv import load_dotenv
|
|||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from sentence_transformers import SentenceTransformer
|
from sentence_transformers import SentenceTransformer
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
# Apply nest_asyncio to solve event loop issues
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
gemini_api_key = os.getenv("GEMINI_API_KEY")
|
||||||
@@ -60,25 +66,37 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
|||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=llm_model_func,
|
working_dir=WORKING_DIR,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_func=llm_model_func,
|
||||||
embedding_dim=384,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=384,
|
||||||
func=embedding_func,
|
max_token_size=8192,
|
||||||
),
|
func=embedding_func,
|
||||||
)
|
),
|
||||||
|
)
|
||||||
|
|
||||||
file_path = "story.txt"
|
await rag.initialize_storages()
|
||||||
with open(file_path, "r") as file:
|
await initialize_pipeline_status()
|
||||||
text = file.read()
|
|
||||||
|
|
||||||
rag.insert(text)
|
return rag
|
||||||
|
|
||||||
response = rag.query(
|
def main():
|
||||||
query="What is the main theme of the story?",
|
# Initialize RAG instance
|
||||||
param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
file_path = "story.txt"
|
||||||
|
with open(file_path, "r") as file:
|
||||||
|
text = file.read()
|
||||||
|
|
||||||
print(response)
|
rag.insert(text)
|
||||||
|
|
||||||
|
response = rag.query(
|
||||||
|
query="What is the main theme of the story?",
|
||||||
|
param=QueryParam(mode="hybrid", top_k=5, response_type="single line"),
|
||||||
|
)
|
||||||
|
|
||||||
|
print(response)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -4,51 +4,68 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.hf import hf_model_complete, hf_embed
|
from lightrag.llm.hf import hf_model_complete, hf_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from transformers import AutoModel, AutoTokenizer
|
from transformers import AutoModel, AutoTokenizer
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=hf_model_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
|
llm_model_func=hf_model_complete,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
|
||||||
embedding_dim=384,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=5000,
|
embedding_dim=384,
|
||||||
func=lambda texts: hf_embed(
|
max_token_size=5000,
|
||||||
texts,
|
func=lambda texts: hf_embed(
|
||||||
tokenizer=AutoTokenizer.from_pretrained(
|
texts,
|
||||||
"sentence-transformers/all-MiniLM-L6-v2"
|
tokenizer=AutoTokenizer.from_pretrained(
|
||||||
),
|
"sentence-transformers/all-MiniLM-L6-v2"
|
||||||
embed_model=AutoModel.from_pretrained(
|
),
|
||||||
"sentence-transformers/all-MiniLM-L6-v2"
|
embed_model=AutoModel.from_pretrained(
|
||||||
|
"sentence-transformers/all-MiniLM-L6-v2"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
),
|
)
|
||||||
)
|
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
return rag
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
def main():
|
||||||
print(
|
rag = asyncio.run(initialize_rag())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
print(
|
rag.insert(f.read())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
# Perform naive search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
)
|
)
|
||||||
|
|
||||||
# Perform hybrid search
|
# Perform local search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -1,115 +0,0 @@
|
|||||||
import numpy as np
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from lightrag.llm.jina import jina_embed
|
|
||||||
from lightrag.llm.openai import openai_complete_if_cache
|
|
||||||
import os
|
|
||||||
import asyncio
|
|
||||||
|
|
||||||
|
|
||||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
|
||||||
return await jina_embed(texts, api_key="YourJinaAPIKey")
|
|
||||||
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
|
||||||
os.mkdir(WORKING_DIR)
|
|
||||||
|
|
||||||
|
|
||||||
async def llm_model_func(
|
|
||||||
prompt, system_prompt=None, history_messages=[], **kwargs
|
|
||||||
) -> str:
|
|
||||||
return await openai_complete_if_cache(
|
|
||||||
"solar-mini",
|
|
||||||
prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
api_key=os.getenv("UPSTAGE_API_KEY"),
|
|
||||||
base_url="https://api.upstage.ai/v1/solar",
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=1024, max_token_size=8192, func=embedding_func
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def lightraginsert(file_path, semaphore):
|
|
||||||
async with semaphore:
|
|
||||||
try:
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = f.read()
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
# If UTF-8 decoding fails, try other encodings
|
|
||||||
with open(file_path, "r", encoding="gbk") as f:
|
|
||||||
content = f.read()
|
|
||||||
await rag.ainsert(content)
|
|
||||||
|
|
||||||
|
|
||||||
async def process_files(directory, concurrency_limit):
|
|
||||||
semaphore = asyncio.Semaphore(concurrency_limit)
|
|
||||||
tasks = []
|
|
||||||
for root, dirs, files in os.walk(directory):
|
|
||||||
for f in files:
|
|
||||||
file_path = os.path.join(root, f)
|
|
||||||
if f.startswith("."):
|
|
||||||
continue
|
|
||||||
tasks.append(lightraginsert(file_path, semaphore))
|
|
||||||
await asyncio.gather(*tasks)
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
try:
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=1024,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
asyncio.run(process_files(WORKING_DIR, concurrency_limit=4))
|
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="global"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
await rag.aquery(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid"),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"An error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
asyncio.run(main())
|
|
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
|
|||||||
from llama_index.llms.openai import OpenAI
|
from llama_index.llms.openai import OpenAI
|
||||||
from llama_index.embeddings.openai import OpenAIEmbedding
|
from llama_index.embeddings.openai import OpenAIEmbedding
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# Configure working directory
|
# Configure working directory
|
||||||
WORKING_DIR = "./index_default"
|
WORKING_DIR = "./index_default"
|
||||||
@@ -76,38 +81,53 @@ async def get_embedding_dim():
|
|||||||
return embedding_dim
|
return embedding_dim
|
||||||
|
|
||||||
|
|
||||||
# Initialize RAG instance
|
async def initialize_rag():
|
||||||
rag = LightRAG(
|
embedding_dimension = await get_embedding_dim()
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
|
||||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Insert example text
|
rag = LightRAG(
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
working_dir=WORKING_DIR,
|
||||||
rag.insert(f.read())
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
# Test different query modes
|
await rag.initialize_storages()
|
||||||
print("\nNaive Search:")
|
await initialize_pipeline_status()
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nLocal Search:")
|
return rag
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nGlobal Search:")
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nHybrid Search:")
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
# Insert example text
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
# Test different query modes
|
||||||
|
print("\nNaive Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nLocal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nGlobal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nHybrid Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -8,6 +8,11 @@ from lightrag.utils import EmbeddingFunc
|
|||||||
from llama_index.llms.litellm import LiteLLM
|
from llama_index.llms.litellm import LiteLLM
|
||||||
from llama_index.embeddings.litellm import LiteLLMEmbedding
|
from llama_index.embeddings.litellm import LiteLLMEmbedding
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# Configure working directory
|
# Configure working directory
|
||||||
WORKING_DIR = "./index_default"
|
WORKING_DIR = "./index_default"
|
||||||
@@ -79,38 +84,53 @@ async def get_embedding_dim():
|
|||||||
return embedding_dim
|
return embedding_dim
|
||||||
|
|
||||||
|
|
||||||
# Initialize RAG instance
|
async def initialize_rag():
|
||||||
rag = LightRAG(
|
embedding_dimension = await get_embedding_dim()
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=asyncio.run(get_embedding_dim()),
|
|
||||||
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Insert example text
|
rag = LightRAG(
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
working_dir=WORKING_DIR,
|
||||||
rag.insert(f.read())
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
# Test different query modes
|
await rag.initialize_storages()
|
||||||
print("\nNaive Search:")
|
await initialize_pipeline_status()
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nLocal Search:")
|
return rag
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nGlobal Search:")
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
|
||||||
)
|
|
||||||
|
|
||||||
print("\nHybrid Search:")
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
# Insert example text
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
# Test different query modes
|
||||||
|
print("\nNaive Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nLocal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nGlobal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nHybrid Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -5,6 +5,12 @@ from lightrag.llm.lmdeploy import lmdeploy_model_if_cache
|
|||||||
from lightrag.llm.hf import hf_embed
|
from lightrag.llm.hf import hf_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from transformers import AutoModel, AutoTokenizer
|
from transformers import AutoModel, AutoTokenizer
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
@@ -35,46 +41,59 @@ async def lmdeploy_model_complete(
|
|||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=lmdeploy_model_complete,
|
llm_model_func=lmdeploy_model_complete,
|
||||||
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
|
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
|
||||||
embedding_func=EmbeddingFunc(
|
embedding_func=EmbeddingFunc(
|
||||||
embedding_dim=384,
|
embedding_dim=384,
|
||||||
max_token_size=5000,
|
max_token_size=5000,
|
||||||
func=lambda texts: hf_embed(
|
func=lambda texts: hf_embed(
|
||||||
texts,
|
texts,
|
||||||
tokenizer=AutoTokenizer.from_pretrained(
|
tokenizer=AutoTokenizer.from_pretrained(
|
||||||
"sentence-transformers/all-MiniLM-L6-v2"
|
"sentence-transformers/all-MiniLM-L6-v2"
|
||||||
),
|
),
|
||||||
embed_model=AutoModel.from_pretrained(
|
embed_model=AutoModel.from_pretrained(
|
||||||
"sentence-transformers/all-MiniLM-L6-v2"
|
"sentence-transformers/all-MiniLM-L6-v2"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
),
|
)
|
||||||
)
|
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
return rag
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
# Insert example text
|
||||||
print(
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
rag.insert(f.read())
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
# Test different query modes
|
||||||
print(
|
print("\nNaive Search:")
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
print(
|
||||||
)
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
# Perform hybrid search
|
print("\nLocal Search:")
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
print("\nGlobal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nHybrid Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -1,5 +1,9 @@
|
|||||||
import os
|
import os
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm import (
|
from lightrag.llm import (
|
||||||
openai_complete_if_cache,
|
openai_complete_if_cache,
|
||||||
@@ -7,10 +11,12 @@ from lightrag.llm import (
|
|||||||
)
|
)
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# for custom llm_model_func
|
# for custom llm_model_func
|
||||||
from lightrag.utils import locate_json_string_body_from_string
|
from lightrag.utils import locate_json_string_body_from_string
|
||||||
|
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
@@ -91,42 +97,37 @@ async def test_funcs():
|
|||||||
|
|
||||||
# asyncio.run(test_funcs())
|
# asyncio.run(test_funcs())
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
|
embedding_dimension = await get_embedding_dim()
|
||||||
|
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||||
|
|
||||||
|
# lightRAG class during indexing
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
# llm_model_name="meta/llama3-70b-instruct", #un comment if
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
|
||||||
|
# so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
|
||||||
|
# so you can adjust to be able to fit the NVIDIA model (future work)
|
||||||
|
func=indexing_embedding_func,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
async def main():
|
async def main():
|
||||||
try:
|
try:
|
||||||
embedding_dimension = await get_embedding_dim()
|
# Initialize RAG instance
|
||||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
# lightRAG class during indexing
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
# llm_model_name="meta/llama3-70b-instruct", #un comment if
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=512, # maximum token size, somehow it's still exceed maximum number of token
|
|
||||||
# so truncate (trunc) parameter on embedding_func will handle it and try to examine the tokenizer used in LightRAG
|
|
||||||
# so you can adjust to be able to fit the NVIDIA model (future work)
|
|
||||||
func=indexing_embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# reading file
|
# reading file
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
await rag.ainsert(f.read())
|
await rag.ainsert(f.read())
|
||||||
|
|
||||||
# redefine rag to change embedding into query type
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
# llm_model_name="meta/llama3-70b-instruct", #un comment if
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=512,
|
|
||||||
func=query_embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform naive search
|
# Perform naive search
|
||||||
print("==============Naive===============")
|
print("==============Naive===============")
|
||||||
print(
|
print(
|
||||||
|
@@ -1,4 +1,8 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
@@ -6,6 +10,7 @@ import os
|
|||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens_age"
|
WORKING_DIR = "./dickens_age"
|
||||||
|
|
||||||
@@ -22,59 +27,72 @@ os.environ["AGE_POSTGRES_HOST"] = "localhost"
|
|||||||
os.environ["AGE_POSTGRES_PORT"] = "5455"
|
os.environ["AGE_POSTGRES_PORT"] = "5455"
|
||||||
os.environ["AGE_GRAPH_NAME"] = "dickens"
|
os.environ["AGE_GRAPH_NAME"] = "dickens"
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=ollama_model_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="llama3.1:8b",
|
llm_model_func=ollama_model_complete,
|
||||||
llm_model_max_async=4,
|
llm_model_name="llama3.1:8b",
|
||||||
llm_model_max_token_size=32768,
|
llm_model_max_async=4,
|
||||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
llm_model_max_token_size=32768,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
||||||
embedding_dim=768,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=768,
|
||||||
func=lambda texts: ollama_embed(
|
max_token_size=8192,
|
||||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
func=lambda texts: ollama_embed(
|
||||||
|
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
graph_storage="AGEStorage",
|
||||||
graph_storage="AGEStorage",
|
)
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
await rag.initialize_storages()
|
||||||
rag.insert(f.read())
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# stream response
|
|
||||||
resp = rag.query(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid", stream=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
async def print_stream(stream):
|
async def print_stream(stream):
|
||||||
async for chunk in stream:
|
async for chunk in stream:
|
||||||
print(chunk, end="", flush=True)
|
print(chunk, end="", flush=True)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
if inspect.isasyncgen(resp):
|
# Insert example text
|
||||||
asyncio.run(print_stream(resp))
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
else:
|
rag.insert(f.read())
|
||||||
print(resp)
|
|
||||||
|
# Test different query modes
|
||||||
|
print("\nNaive Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nLocal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nGlobal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nHybrid Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# stream response
|
||||||
|
resp = rag.query(
|
||||||
|
"What are the top themes in this story?",
|
||||||
|
param=QueryParam(mode="hybrid", stream=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
if inspect.isasyncgen(resp):
|
||||||
|
asyncio.run(print_stream(resp))
|
||||||
|
else:
|
||||||
|
print(resp)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -1,10 +1,14 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
import os
|
import os
|
||||||
import inspect
|
import inspect
|
||||||
import logging
|
import logging
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
@@ -13,58 +17,71 @@ logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=ollama_model_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="gemma2:2b",
|
llm_model_func=ollama_model_complete,
|
||||||
llm_model_max_async=4,
|
llm_model_name="gemma2:2b",
|
||||||
llm_model_max_token_size=32768,
|
llm_model_max_async=4,
|
||||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
llm_model_max_token_size=32768,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
||||||
embedding_dim=768,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=768,
|
||||||
func=lambda texts: ollama_embed(
|
max_token_size=8192,
|
||||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
func=lambda texts: ollama_embed(
|
||||||
|
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
)
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
await rag.initialize_storages()
|
||||||
rag.insert(f.read())
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# stream response
|
|
||||||
resp = rag.query(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid", stream=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
async def print_stream(stream):
|
async def print_stream(stream):
|
||||||
async for chunk in stream:
|
async for chunk in stream:
|
||||||
print(chunk, end="", flush=True)
|
print(chunk, end="", flush=True)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
if inspect.isasyncgen(resp):
|
# Insert example text
|
||||||
asyncio.run(print_stream(resp))
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
else:
|
rag.insert(f.read())
|
||||||
print(resp)
|
|
||||||
|
# Test different query modes
|
||||||
|
print("\nNaive Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nLocal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nGlobal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nHybrid Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# stream response
|
||||||
|
resp = rag.query(
|
||||||
|
"What are the top themes in this story?",
|
||||||
|
param=QueryParam(mode="hybrid", stream=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
if inspect.isasyncgen(resp):
|
||||||
|
asyncio.run(print_stream(resp))
|
||||||
|
else:
|
||||||
|
print(resp)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -12,6 +12,7 @@ import os
|
|||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
from lightrag.llm.ollama import ollama_embed, ollama_model_complete
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens_gremlin"
|
WORKING_DIR = "./dickens_gremlin"
|
||||||
|
|
||||||
@@ -31,59 +32,72 @@ os.environ["GREMLIN_TRAVERSE_SOURCE"] = "g"
|
|||||||
os.environ["GREMLIN_USER"] = ""
|
os.environ["GREMLIN_USER"] = ""
|
||||||
os.environ["GREMLIN_PASSWORD"] = ""
|
os.environ["GREMLIN_PASSWORD"] = ""
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=ollama_model_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="llama3.1:8b",
|
llm_model_func=ollama_model_complete,
|
||||||
llm_model_max_async=4,
|
llm_model_name="llama3.1:8b",
|
||||||
llm_model_max_token_size=32768,
|
llm_model_max_async=4,
|
||||||
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
llm_model_max_token_size=32768,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}},
|
||||||
embedding_dim=768,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=768,
|
||||||
func=lambda texts: ollama_embed(
|
max_token_size=8192,
|
||||||
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
func=lambda texts: ollama_embed(
|
||||||
|
texts, embed_model="nomic-embed-text", host="http://localhost:11434"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
graph_storage="GremlinStorage",
|
||||||
graph_storage="GremlinStorage",
|
)
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
await rag.initialize_storages()
|
||||||
rag.insert(f.read())
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# Perform naive search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# stream response
|
|
||||||
resp = rag.query(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid", stream=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
async def print_stream(stream):
|
async def print_stream(stream):
|
||||||
async for chunk in stream:
|
async for chunk in stream:
|
||||||
print(chunk, end="", flush=True)
|
print(chunk, end="", flush=True)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
if inspect.isasyncgen(resp):
|
# Insert example text
|
||||||
asyncio.run(print_stream(resp))
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
else:
|
rag.insert(f.read())
|
||||||
print(resp)
|
|
||||||
|
# Test different query modes
|
||||||
|
print("\nNaive Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nLocal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nGlobal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nHybrid Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# stream response
|
||||||
|
resp = rag.query(
|
||||||
|
"What are the top themes in this story?",
|
||||||
|
param=QueryParam(mode="hybrid", stream=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
if inspect.isasyncgen(resp):
|
||||||
|
asyncio.run(print_stream(resp))
|
||||||
|
else:
|
||||||
|
print(resp)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -2,6 +2,11 @@ import os
|
|||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
import asyncio
|
||||||
|
import nest_asyncio
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# WorkingDir
|
# WorkingDir
|
||||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
@@ -27,30 +32,59 @@ os.environ["MILVUS_USER"] = "root"
|
|||||||
os.environ["MILVUS_PASSWORD"] = "root"
|
os.environ["MILVUS_PASSWORD"] = "root"
|
||||||
os.environ["MILVUS_DB_NAME"] = "lightrag"
|
os.environ["MILVUS_DB_NAME"] = "lightrag"
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=ollama_model_complete,
|
llm_model_func=ollama_model_complete,
|
||||||
llm_model_name="qwen2.5:14b",
|
llm_model_name="qwen2.5:14b",
|
||||||
llm_model_max_async=4,
|
llm_model_max_async=4,
|
||||||
llm_model_max_token_size=32768,
|
llm_model_max_token_size=32768,
|
||||||
llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
|
llm_model_kwargs={"host": "http://127.0.0.1:11434", "options": {"num_ctx": 32768}},
|
||||||
embedding_func=EmbeddingFunc(
|
embedding_func=EmbeddingFunc(
|
||||||
embedding_dim=1024,
|
embedding_dim=1024,
|
||||||
max_token_size=8192,
|
max_token_size=8192,
|
||||||
func=lambda texts: ollama_embed(
|
func=lambda texts: ollama_embed(
|
||||||
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
|
texts=texts, embed_model="bge-m3:latest", host="http://127.0.0.1:11434"
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
kv_storage="MongoKVStorage",
|
||||||
kv_storage="MongoKVStorage",
|
graph_storage="Neo4JStorage",
|
||||||
graph_storage="Neo4JStorage",
|
vector_storage="MilvusVectorDBStorage",
|
||||||
vector_storage="MilvusVectorDBStorage",
|
)
|
||||||
)
|
|
||||||
|
|
||||||
file = "./book.txt"
|
await rag.initialize_storages()
|
||||||
with open(file, "r") as f:
|
await initialize_pipeline_status()
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
print(
|
return rag
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
|
# Insert example text
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
# Test different query modes
|
||||||
|
print("\nNaive Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nLocal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nGlobal Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
print("\nHybrid Search:")
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
@@ -52,21 +53,28 @@ async def test_funcs():
|
|||||||
|
|
||||||
# asyncio.run(test_funcs())
|
# asyncio.run(test_funcs())
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
|
embedding_dimension = await get_embedding_dim()
|
||||||
|
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||||
|
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=8192,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
async def main():
|
async def main():
|
||||||
try:
|
try:
|
||||||
embedding_dimension = await get_embedding_dim()
|
# Initialize RAG instance
|
||||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
await rag.ainsert(f.read())
|
await rag.ainsert(f.read())
|
||||||
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
@@ -52,25 +53,33 @@ async def test_funcs():
|
|||||||
|
|
||||||
# asyncio.run(test_funcs())
|
# asyncio.run(test_funcs())
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
|
embedding_dimension = await get_embedding_dim()
|
||||||
|
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||||
|
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
embedding_cache_config={
|
||||||
|
"enabled": True,
|
||||||
|
"similarity_threshold": 0.90,
|
||||||
|
},
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=8192,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
try:
|
try:
|
||||||
embedding_dimension = await get_embedding_dim()
|
# Initialize RAG instance
|
||||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
embedding_cache_config={
|
|
||||||
"enabled": True,
|
|
||||||
"similarity_threshold": 0.90,
|
|
||||||
},
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
await rag.ainsert(f.read())
|
await rag.ainsert(f.read())
|
||||||
|
@@ -1,9 +1,11 @@
|
|||||||
import inspect
|
import inspect
|
||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
from lightrag import LightRAG
|
from lightrag import LightRAG
|
||||||
from lightrag.llm import openai_complete, openai_embed
|
from lightrag.llm import openai_complete, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc, always_get_an_event_loop
|
from lightrag.utils import EmbeddingFunc, always_get_an_event_loop
|
||||||
from lightrag import QueryParam
|
from lightrag import QueryParam
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# WorkingDir
|
# WorkingDir
|
||||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
@@ -13,42 +15,54 @@ if not os.path.exists(WORKING_DIR):
|
|||||||
print(f"WorkingDir: {WORKING_DIR}")
|
print(f"WorkingDir: {WORKING_DIR}")
|
||||||
|
|
||||||
api_key = "empty"
|
api_key = "empty"
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=openai_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="qwen2.5-14b-instruct@4bit",
|
llm_model_func=openai_complete,
|
||||||
llm_model_max_async=4,
|
llm_model_name="qwen2.5-14b-instruct@4bit",
|
||||||
llm_model_max_token_size=32768,
|
llm_model_max_async=4,
|
||||||
llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
|
llm_model_max_token_size=32768,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_kwargs={"base_url": "http://127.0.0.1:1234/v1", "api_key": api_key},
|
||||||
embedding_dim=1024,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=1024,
|
||||||
func=lambda texts: openai_embed(
|
max_token_size=8192,
|
||||||
texts=texts,
|
func=lambda texts: openai_embed(
|
||||||
model="text-embedding-bge-m3",
|
texts=texts,
|
||||||
base_url="http://127.0.0.1:1234/v1",
|
model="text-embedding-bge-m3",
|
||||||
api_key=api_key,
|
base_url="http://127.0.0.1:1234/v1",
|
||||||
|
api_key=api_key,
|
||||||
|
),
|
||||||
),
|
),
|
||||||
),
|
)
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
await rag.initialize_storages()
|
||||||
rag.insert(f.read())
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
resp = rag.query(
|
|
||||||
"What are the top themes in this story?",
|
|
||||||
param=QueryParam(mode="hybrid", stream=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
async def print_stream(stream):
|
async def print_stream(stream):
|
||||||
async for chunk in stream:
|
async for chunk in stream:
|
||||||
if chunk:
|
if chunk:
|
||||||
print(chunk, end="", flush=True)
|
print(chunk, end="", flush=True)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
resp = rag.query(
|
||||||
|
"What are the top themes in this story?",
|
||||||
|
param=QueryParam(mode="hybrid", stream=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
loop = always_get_an_event_loop()
|
||||||
|
if inspect.isasyncgen(resp):
|
||||||
|
loop.run_until_complete(print_stream(resp))
|
||||||
|
else:
|
||||||
|
print(resp)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
loop = always_get_an_event_loop()
|
|
||||||
if inspect.isasyncgen(resp):
|
|
||||||
loop.run_until_complete(print_stream(resp))
|
|
||||||
else:
|
|
||||||
print(resp)
|
|
||||||
|
@@ -1,40 +1,54 @@
|
|||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
embedding_func=openai_embed,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=gpt_4o_mini_complete,
|
embedding_func=openai_embed,
|
||||||
# llm_model_func=gpt_4o_complete
|
llm_model_func=gpt_4o_mini_complete,
|
||||||
)
|
# llm_model_func=gpt_4o_complete
|
||||||
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
return rag
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
print(
|
rag.insert(f.read())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
# Perform naive search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Perform local search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
|
||||||
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
#########
|
#########
|
||||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||||
@@ -52,7 +53,7 @@ async def create_embedding_function_instance():
|
|||||||
async def initialize_rag():
|
async def initialize_rag():
|
||||||
embedding_func_instance = await create_embedding_function_instance()
|
embedding_func_instance = await create_embedding_function_instance()
|
||||||
|
|
||||||
return LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=gpt_4o_mini_complete,
|
llm_model_func=gpt_4o_mini_complete,
|
||||||
embedding_func=embedding_func_instance,
|
embedding_func=embedding_func_instance,
|
||||||
@@ -60,14 +61,38 @@ async def initialize_rag():
|
|||||||
log_level="DEBUG",
|
log_level="DEBUG",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# Run the initialization
|
return rag
|
||||||
rag = asyncio.run(initialize_rag())
|
|
||||||
|
|
||||||
with open("book.txt", "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
# Perform naive search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform local search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -1,7 +1,9 @@
|
|||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.ollama import ollama_embed, openai_complete_if_cache
|
from lightrag.llm.ollama import ollama_embed, openai_complete_if_cache
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# WorkingDir
|
# WorkingDir
|
||||||
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||||
@@ -48,23 +50,52 @@ embedding_func = EmbeddingFunc(
|
|||||||
texts, embed_model="shaw/dmeta-embedding-zh", host="http://117.50.173.35:11434"
|
texts, embed_model="shaw/dmeta-embedding-zh", host="http://117.50.173.35:11434"
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
async def initialize_rag():
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
llm_model_max_token_size=32768,
|
||||||
|
embedding_func=embedding_func,
|
||||||
|
chunk_token_size=512,
|
||||||
|
chunk_overlap_token_size=256,
|
||||||
|
kv_storage="RedisKVStorage",
|
||||||
|
graph_storage="Neo4JStorage",
|
||||||
|
vector_storage="MilvusVectorDBStorage",
|
||||||
|
doc_status_storage="RedisKVStorage",
|
||||||
|
)
|
||||||
|
|
||||||
rag = LightRAG(
|
await rag.initialize_storages()
|
||||||
working_dir=WORKING_DIR,
|
await initialize_pipeline_status()
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
llm_model_max_token_size=32768,
|
|
||||||
embedding_func=embedding_func,
|
|
||||||
chunk_token_size=512,
|
|
||||||
chunk_overlap_token_size=256,
|
|
||||||
kv_storage="RedisKVStorage",
|
|
||||||
graph_storage="Neo4JStorage",
|
|
||||||
vector_storage="MilvusVectorDBStorage",
|
|
||||||
doc_status_storage="RedisKVStorage",
|
|
||||||
)
|
|
||||||
|
|
||||||
file = "../book.txt"
|
return rag
|
||||||
with open(file, "r", encoding="utf-8") as f:
|
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
|
|
||||||
print(rag.query("谁会3D建模 ?", param=QueryParam(mode="mix")))
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
|
rag.insert(f.read())
|
||||||
|
|
||||||
|
# Perform naive search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform local search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -6,6 +6,7 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
print(os.getcwd())
|
print(os.getcwd())
|
||||||
script_directory = Path(__file__).resolve().parent.parent
|
script_directory = Path(__file__).resolve().parent.parent
|
||||||
@@ -63,41 +64,48 @@ async def get_embedding_dim():
|
|||||||
embedding_dim = embedding.shape[1]
|
embedding_dim = embedding.shape[1]
|
||||||
return embedding_dim
|
return embedding_dim
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
|
# Detect embedding dimension
|
||||||
|
embedding_dimension = await get_embedding_dim()
|
||||||
|
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||||
|
|
||||||
|
# Initialize LightRAG
|
||||||
|
# We use Oracle DB as the KV/vector/graph storage
|
||||||
|
# You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
|
||||||
|
rag = LightRAG(
|
||||||
|
# log_level="DEBUG",
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
entity_extract_max_gleaning=1,
|
||||||
|
enable_llm_cache=True,
|
||||||
|
enable_llm_cache_for_entity_extract=True,
|
||||||
|
embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
|
||||||
|
chunk_token_size=CHUNK_TOKEN_SIZE,
|
||||||
|
llm_model_max_token_size=MAX_TOKENS,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=500,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
graph_storage="OracleGraphStorage",
|
||||||
|
kv_storage="OracleKVStorage",
|
||||||
|
vector_storage="OracleVectorDBStorage",
|
||||||
|
addon_params={
|
||||||
|
"example_number": 1,
|
||||||
|
"language": "Simplfied Chinese",
|
||||||
|
"entity_types": ["organization", "person", "geo", "event"],
|
||||||
|
"insert_batch_size": 2,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
try:
|
try:
|
||||||
# Detect embedding dimension
|
# Initialize RAG instance
|
||||||
embedding_dimension = await get_embedding_dim()
|
rag = asyncio.run(initialize_rag())
|
||||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
|
||||||
|
|
||||||
# Initialize LightRAG
|
|
||||||
# We use Oracle DB as the KV/vector/graph storage
|
|
||||||
# You can add `addon_params={"example_number": 1, "language": "Simplfied Chinese"}` to control the prompt
|
|
||||||
rag = LightRAG(
|
|
||||||
# log_level="DEBUG",
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
entity_extract_max_gleaning=1,
|
|
||||||
enable_llm_cache=True,
|
|
||||||
enable_llm_cache_for_entity_extract=True,
|
|
||||||
embedding_cache_config=None, # {"enabled": True,"similarity_threshold": 0.90},
|
|
||||||
chunk_token_size=CHUNK_TOKEN_SIZE,
|
|
||||||
llm_model_max_token_size=MAX_TOKENS,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=500,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
graph_storage="OracleGraphStorage",
|
|
||||||
kv_storage="OracleKVStorage",
|
|
||||||
vector_storage="OracleVectorDBStorage",
|
|
||||||
addon_params={
|
|
||||||
"example_number": 1,
|
|
||||||
"language": "Simplfied Chinese",
|
|
||||||
"entity_types": ["organization", "person", "geo", "event"],
|
|
||||||
"insert_batch_size": 2,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract and Insert into LightRAG storage
|
# Extract and Insert into LightRAG storage
|
||||||
with open(WORKING_DIR + "/docs.txt", "r", encoding="utf-8") as f:
|
with open(WORKING_DIR + "/docs.txt", "r", encoding="utf-8") as f:
|
||||||
|
@@ -5,6 +5,7 @@ from lightrag.llm.openai import openai_complete_if_cache
|
|||||||
from lightrag.llm.siliconcloud import siliconcloud_embedding
|
from lightrag.llm.siliconcloud import siliconcloud_embedding
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
@@ -46,35 +47,48 @@ async def test_funcs():
|
|||||||
|
|
||||||
asyncio.run(test_funcs())
|
asyncio.run(test_funcs())
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=768, max_token_size=512, func=embedding_func
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
rag = LightRAG(
|
await rag.initialize_storages()
|
||||||
working_dir=WORKING_DIR,
|
await initialize_pipeline_status()
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
return rag
|
||||||
embedding_dim=768, max_token_size=512, func=embedding_func
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
with open("./book.txt") as f:
|
def main():
|
||||||
rag.insert(f.read())
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
# Perform naive search
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
print(
|
rag.insert(f.read())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
# Perform naive search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
)
|
)
|
||||||
|
|
||||||
# Perform global search
|
# Perform local search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
|
||||||
# Perform hybrid search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
|
||||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
|||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache
|
from lightrag.llm import siliconcloud_embedding, openai_complete_if_cache
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
@@ -54,33 +55,40 @@ async def get_embedding_dim():
|
|||||||
embedding_dim = embedding.shape[1]
|
embedding_dim = embedding.shape[1]
|
||||||
return embedding_dim
|
return embedding_dim
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
|
# Detect embedding dimension
|
||||||
|
embedding_dimension = await get_embedding_dim()
|
||||||
|
print(f"Detected embedding dimension: {embedding_dimension}")
|
||||||
|
|
||||||
|
# Initialize LightRAG
|
||||||
|
# We use TiDB DB as the KV/vector
|
||||||
|
rag = LightRAG(
|
||||||
|
enable_llm_cache=False,
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
chunk_token_size=512,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=512,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
kv_storage="TiDBKVStorage",
|
||||||
|
vector_storage="TiDBVectorDBStorage",
|
||||||
|
graph_storage="TiDBGraphStorage",
|
||||||
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
try:
|
try:
|
||||||
# Detect embedding dimension
|
# Initialize RAG instance
|
||||||
embedding_dimension = await get_embedding_dim()
|
rag = asyncio.run(initialize_rag())
|
||||||
print(f"Detected embedding dimension: {embedding_dimension}")
|
|
||||||
|
|
||||||
# Initialize LightRAG
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
# We use TiDB DB as the KV/vector
|
rag.insert(f.read())
|
||||||
rag = LightRAG(
|
|
||||||
enable_llm_cache=False,
|
|
||||||
working_dir=WORKING_DIR,
|
|
||||||
chunk_token_size=512,
|
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=512,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
kv_storage="TiDBKVStorage",
|
|
||||||
vector_storage="TiDBVectorDBStorage",
|
|
||||||
graph_storage="TiDBGraphStorage",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Extract and Insert into LightRAG storage
|
|
||||||
with open("./dickens/demo.txt", "r", encoding="utf-8") as f:
|
|
||||||
await rag.ainsert(f.read())
|
|
||||||
|
|
||||||
# Perform search in different modes
|
# Perform search in different modes
|
||||||
modes = ["naive", "local", "global", "hybrid"]
|
modes = ["naive", "local", "global", "hybrid"]
|
||||||
|
@@ -1,10 +1,12 @@
|
|||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
|
from lightrag.llm.zhipu import zhipu_complete, zhipu_embedding
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
|
|
||||||
@@ -17,39 +19,51 @@ api_key = os.environ.get("ZHIPUAI_API_KEY")
|
|||||||
if api_key is None:
|
if api_key is None:
|
||||||
raise Exception("Please set ZHIPU_API_KEY in your environment")
|
raise Exception("Please set ZHIPU_API_KEY in your environment")
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
llm_model_func=zhipu_complete,
|
||||||
|
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
|
||||||
|
llm_model_max_async=4,
|
||||||
|
llm_model_max_token_size=32768,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=2048, # Zhipu embedding-3 dimension
|
||||||
|
max_token_size=8192,
|
||||||
|
func=lambda texts: zhipu_embedding(texts),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
rag = LightRAG(
|
await rag.initialize_storages()
|
||||||
working_dir=WORKING_DIR,
|
await initialize_pipeline_status()
|
||||||
llm_model_func=zhipu_complete,
|
|
||||||
llm_model_name="glm-4-flashx", # Using the most cost/performance balance model, but you can change it here.
|
|
||||||
llm_model_max_async=4,
|
|
||||||
llm_model_max_token_size=32768,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=2048, # Zhipu embedding-3 dimension
|
|
||||||
max_token_size=8192,
|
|
||||||
func=lambda texts: zhipu_embedding(texts),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
with open("./book.txt", "r", encoding="utf-8") as f:
|
return rag
|
||||||
rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
print(
|
rag.insert(f.read())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
# Perform naive search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
)
|
)
|
||||||
|
|
||||||
# Perform hybrid search
|
# Perform local search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -8,6 +8,7 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.zhipu import zhipu_complete
|
from lightrag.llm.zhipu import zhipu_complete
|
||||||
from lightrag.llm.ollama import ollama_embedding
|
from lightrag.llm.ollama import ollama_embedding
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
ROOT_DIR = os.environ.get("ROOT_DIR")
|
ROOT_DIR = os.environ.get("ROOT_DIR")
|
||||||
@@ -27,8 +28,7 @@ os.environ["POSTGRES_USER"] = "rag"
|
|||||||
os.environ["POSTGRES_PASSWORD"] = "rag"
|
os.environ["POSTGRES_PASSWORD"] = "rag"
|
||||||
os.environ["POSTGRES_DATABASE"] = "rag"
|
os.environ["POSTGRES_DATABASE"] = "rag"
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
async def main():
|
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=zhipu_complete,
|
llm_model_func=zhipu_complete,
|
||||||
@@ -50,9 +50,17 @@ async def main():
|
|||||||
auto_manage_storages_states=False,
|
auto_manage_storages_states=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
# add embedding_func for graph database, it's deleted in commit 5661d76860436f7bf5aef2e50d9ee4a59660146c
|
# add embedding_func for graph database, it's deleted in commit 5661d76860436f7bf5aef2e50d9ee4a59660146c
|
||||||
rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
|
rag.chunk_entity_relation_graph.embedding_func = rag.embedding_func
|
||||||
await rag.initialize_storages()
|
|
||||||
|
|
||||||
with open(f"{ROOT_DIR}/book.txt", "r", encoding="utf-8") as f:
|
with open(f"{ROOT_DIR}/book.txt", "r", encoding="utf-8") as f:
|
||||||
await rag.ainsert(f.read())
|
await rag.ainsert(f.read())
|
||||||
|
@@ -6,6 +6,7 @@ import numpy as np
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
import logging
|
import logging
|
||||||
from openai import AzureOpenAI
|
from openai import AzureOpenAI
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
@@ -79,25 +80,32 @@ async def test_funcs():
|
|||||||
asyncio.run(test_funcs())
|
asyncio.run(test_funcs())
|
||||||
|
|
||||||
embedding_dimension = 3072
|
embedding_dimension = 3072
|
||||||
|
async def initialize_rag():
|
||||||
|
rag = LightRAG(
|
||||||
|
working_dir=WORKING_DIR,
|
||||||
|
llm_model_func=llm_model_func,
|
||||||
|
embedding_func=EmbeddingFunc(
|
||||||
|
embedding_dim=embedding_dimension,
|
||||||
|
max_token_size=8192,
|
||||||
|
func=embedding_func,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
rag = LightRAG(
|
await rag.initialize_storages()
|
||||||
working_dir=WORKING_DIR,
|
await initialize_pipeline_status()
|
||||||
llm_model_func=llm_model_func,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dimension,
|
|
||||||
max_token_size=8192,
|
|
||||||
func=embedding_func,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
book1 = open("./book_1.txt", encoding="utf-8")
|
return rag
|
||||||
book2 = open("./book_2.txt", encoding="utf-8")
|
|
||||||
|
|
||||||
rag.insert([book1.read(), book2.read()])
|
|
||||||
|
|
||||||
|
|
||||||
# Example function demonstrating the new query_with_separate_keyword_extraction usage
|
# Example function demonstrating the new query_with_separate_keyword_extraction usage
|
||||||
async def run_example():
|
async def run_example():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
|
book1 = open("./book_1.txt", encoding="utf-8")
|
||||||
|
book2 = open("./book_2.txt", encoding="utf-8")
|
||||||
|
|
||||||
|
rag.insert([book1.read(), book2.read()])
|
||||||
query = "What are the top themes in this story?"
|
query = "What are the top themes in this story?"
|
||||||
prompt = "Please simplify the response for a young audience."
|
prompt = "Please simplify the response for a young audience."
|
||||||
|
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.openai import gpt_4o_mini_complete
|
from lightrag.llm.openai import gpt_4o_mini_complete
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
#########
|
#########
|
||||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||||
# import nest_asyncio
|
# import nest_asyncio
|
||||||
@@ -12,31 +13,45 @@ WORKING_DIR = "./dickens"
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
working_dir=WORKING_DIR,
|
||||||
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||||
)
|
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
||||||
|
)
|
||||||
|
|
||||||
with open("./dickens/book.txt", "r", encoding="utf-8") as f:
|
await rag.initialize_storages()
|
||||||
rag.insert(f.read())
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# Perform naive search
|
return rag
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
print(
|
rag.insert(f.read())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
|
||||||
)
|
# Perform naive search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform local search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@@ -4,6 +4,7 @@ from lightrag import LightRAG, QueryParam
|
|||||||
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
from lightrag.llm.openai import gpt_4o_mini_complete, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
#########
|
#########
|
||||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||||
@@ -67,7 +68,7 @@ async def create_embedding_function_instance():
|
|||||||
async def initialize_rag():
|
async def initialize_rag():
|
||||||
embedding_func_instance = await create_embedding_function_instance()
|
embedding_func_instance = await create_embedding_function_instance()
|
||||||
if CHROMADB_USE_LOCAL_PERSISTENT:
|
if CHROMADB_USE_LOCAL_PERSISTENT:
|
||||||
return LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=gpt_4o_mini_complete,
|
llm_model_func=gpt_4o_mini_complete,
|
||||||
embedding_func=embedding_func_instance,
|
embedding_func=embedding_func_instance,
|
||||||
@@ -87,7 +88,7 @@ async def initialize_rag():
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=gpt_4o_mini_complete,
|
llm_model_func=gpt_4o_mini_complete,
|
||||||
embedding_func=embedding_func_instance,
|
embedding_func=embedding_func_instance,
|
||||||
@@ -112,28 +113,36 @@ async def initialize_rag():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
# Run the initialization
|
await rag.initialize_storages()
|
||||||
rag = asyncio.run(initialize_rag())
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# with open("./dickens/book.txt", "r", encoding="utf-8") as f:
|
return rag
|
||||||
# rag.insert(f.read())
|
|
||||||
|
|
||||||
# Perform naive search
|
# Initialize RAG instance
|
||||||
print(
|
rag = asyncio.run(initialize_rag())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
print(
|
rag.insert(f.read())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
# Perform naive search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
)
|
)
|
||||||
|
|
||||||
# Perform hybrid search
|
# Perform local search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -1,5 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
|
import asyncio
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
@@ -8,7 +9,9 @@ from sentence_transformers import SentenceTransformer
|
|||||||
from openai import AzureOpenAI
|
from openai import AzureOpenAI
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
WORKING_DIR = "./dickens"
|
||||||
# Configure Logging
|
# Configure Logging
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
@@ -55,11 +58,7 @@ async def embedding_func(texts: list[str]) -> np.ndarray:
|
|||||||
embeddings = model.encode(texts, convert_to_numpy=True)
|
embeddings = model.encode(texts, convert_to_numpy=True)
|
||||||
return embeddings
|
return embeddings
|
||||||
|
|
||||||
|
async def initialize_rag():
|
||||||
def main():
|
|
||||||
WORKING_DIR = "./dickens"
|
|
||||||
|
|
||||||
# Initialize LightRAG with the LLM model function and embedding function
|
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=llm_model_func,
|
llm_model_func=llm_model_func,
|
||||||
@@ -74,6 +73,15 @@ def main():
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
# Insert the custom chunks into LightRAG
|
# Insert the custom chunks into LightRAG
|
||||||
book1 = open("./book_1.txt", encoding="utf-8")
|
book1 = open("./book_1.txt", encoding="utf-8")
|
||||||
book2 = open("./book_2.txt", encoding="utf-8")
|
book2 = open("./book_2.txt", encoding="utf-8")
|
||||||
|
@@ -1,7 +1,8 @@
|
|||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.openai import gpt_4o_mini_complete
|
from lightrag.llm.openai import gpt_4o_mini_complete
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
#########
|
#########
|
||||||
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
|
||||||
@@ -14,33 +15,46 @@ WORKING_DIR = "./local_neo4jWorkDir"
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
working_dir=WORKING_DIR,
|
||||||
graph_storage="Neo4JStorage",
|
llm_model_func=gpt_4o_mini_complete, # Use gpt_4o_mini_complete LLM model
|
||||||
log_level="INFO",
|
graph_storage="Neo4JStorage",
|
||||||
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
log_level="INFO",
|
||||||
)
|
# llm_model_func=gpt_4o_complete # Optionally, use a stronger model
|
||||||
|
)
|
||||||
|
|
||||||
with open("./book.txt") as f:
|
await rag.initialize_storages()
|
||||||
rag.insert(f.read())
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
# Perform naive search
|
return rag
|
||||||
print(
|
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform local search
|
def main():
|
||||||
print(
|
# Initialize RAG instance
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
rag = asyncio.run(initialize_rag())
|
||||||
)
|
|
||||||
|
|
||||||
# Perform global search
|
with open("./book.txt", "r", encoding="utf-8") as f:
|
||||||
print(
|
rag.insert(f.read())
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Perform hybrid search
|
# Perform naive search
|
||||||
print(
|
print(
|
||||||
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Perform local search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform global search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform hybrid search
|
||||||
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -18,6 +18,7 @@
|
|||||||
"from lightrag import LightRAG, QueryParam\n",
|
"from lightrag import LightRAG, QueryParam\n",
|
||||||
"from lightrag.llm.openai import openai_complete_if_cache, openai_embed\n",
|
"from lightrag.llm.openai import openai_complete_if_cache, openai_embed\n",
|
||||||
"from lightrag.utils import EmbeddingFunc\n",
|
"from lightrag.utils import EmbeddingFunc\n",
|
||||||
|
"from lightrag.kg.shared_storage import initialize_pipeline_status\n",
|
||||||
"import nest_asyncio"
|
"import nest_asyncio"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@@ -25,7 +26,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "dd17956ec322b361",
|
"id": "dd17956ec322b361",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "#### split by character"
|
"source": [
|
||||||
|
"#### split by character"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
@@ -109,14 +112,26 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"rag = LightRAG(\n",
|
"import asyncio\n",
|
||||||
" working_dir=WORKING_DIR,\n",
|
"import nest_asyncio\n",
|
||||||
" llm_model_func=llm_model_func,\n",
|
"\n",
|
||||||
" embedding_func=EmbeddingFunc(\n",
|
"nest_asyncio.apply()\n",
|
||||||
" embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
|
"\n",
|
||||||
" ),\n",
|
"async def initialize_rag():\n",
|
||||||
" chunk_token_size=512,\n",
|
" rag = LightRAG(\n",
|
||||||
")"
|
" working_dir=WORKING_DIR,\n",
|
||||||
|
" llm_model_func=llm_model_func,\n",
|
||||||
|
" embedding_func=EmbeddingFunc(\n",
|
||||||
|
" embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
|
||||||
|
" ),\n",
|
||||||
|
" chunk_token_size=512,\n",
|
||||||
|
" )\n",
|
||||||
|
" await rag.initialize_storages()\n",
|
||||||
|
" await initialize_pipeline_status()\n",
|
||||||
|
"\n",
|
||||||
|
" return rag\n",
|
||||||
|
"\n",
|
||||||
|
"rag = asyncio.run(initialize_rag())"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -908,7 +923,9 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"id": "4e5bfad24cb721a8",
|
"id": "4e5bfad24cb721a8",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": "#### split by character only"
|
"source": [
|
||||||
|
"#### split by character only"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
|
@@ -1,8 +1,10 @@
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
import asyncio
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
# Working directory and the directory path for text files
|
# Working directory and the directory path for text files
|
||||||
WORKING_DIR = "./dickens"
|
WORKING_DIR = "./dickens"
|
||||||
@@ -12,17 +14,22 @@ TEXT_FILES_DIR = "/llm/mt"
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
# Initialize LightRAG
|
async def initialize_rag():
|
||||||
rag = LightRAG(
|
# Initialize LightRAG
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=ollama_model_complete,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_name="qwen2.5:3b-instruct-max-context",
|
llm_model_func=ollama_model_complete,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_name="qwen2.5:3b-instruct-max-context",
|
||||||
embedding_dim=768,
|
embedding_func=EmbeddingFunc(
|
||||||
max_token_size=8192,
|
embedding_dim=768,
|
||||||
func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
|
max_token_size=8192,
|
||||||
),
|
func=lambda texts: ollama_embed(texts, embed_model="nomic-embed-text"),
|
||||||
)
|
),
|
||||||
|
)
|
||||||
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
# Read all .txt files from the TEXT_FILES_DIR directory
|
# Read all .txt files from the TEXT_FILES_DIR directory
|
||||||
texts = []
|
texts = []
|
||||||
@@ -47,58 +54,65 @@ def insert_texts_with_retry(rag, texts, retries=3, delay=5):
|
|||||||
raise RuntimeError("Failed to insert texts after multiple retries.")
|
raise RuntimeError("Failed to insert texts after multiple retries.")
|
||||||
|
|
||||||
|
|
||||||
insert_texts_with_retry(rag, texts)
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
|
||||||
# Perform different types of queries and handle potential errors
|
insert_texts_with_retry(rag, texts)
|
||||||
try:
|
|
||||||
print(
|
# Perform different types of queries and handle potential errors
|
||||||
rag.query(
|
try:
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
print(
|
||||||
|
rag.query(
|
||||||
|
"What are the top themes in this story?", param=QueryParam(mode="naive")
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
except Exception as e:
|
||||||
except Exception as e:
|
print(f"Error performing naive search: {e}")
|
||||||
print(f"Error performing naive search: {e}")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print(
|
print(
|
||||||
rag.query(
|
rag.query(
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="local")
|
"What are the top themes in this story?", param=QueryParam(mode="local")
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
except Exception as e:
|
||||||
except Exception as e:
|
print(f"Error performing local search: {e}")
|
||||||
print(f"Error performing local search: {e}")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print(
|
print(
|
||||||
rag.query(
|
rag.query(
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="global")
|
"What are the top themes in this story?", param=QueryParam(mode="global")
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
except Exception as e:
|
||||||
except Exception as e:
|
print(f"Error performing global search: {e}")
|
||||||
print(f"Error performing global search: {e}")
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
print(
|
print(
|
||||||
rag.query(
|
rag.query(
|
||||||
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
"What are the top themes in this story?", param=QueryParam(mode="hybrid")
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
except Exception as e:
|
||||||
except Exception as e:
|
print(f"Error performing hybrid search: {e}")
|
||||||
print(f"Error performing hybrid search: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
# Function to clear VRAM resources
|
# Function to clear VRAM resources
|
||||||
def clear_vram():
|
def clear_vram():
|
||||||
os.system("sudo nvidia-smi --gpu-reset")
|
os.system("sudo nvidia-smi --gpu-reset")
|
||||||
|
|
||||||
|
|
||||||
# Regularly clear VRAM to prevent overflow
|
# Regularly clear VRAM to prevent overflow
|
||||||
clear_vram_interval = 3600 # Clear once every hour
|
clear_vram_interval = 3600 # Clear once every hour
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
current_time = time.time()
|
current_time = time.time()
|
||||||
if current_time - start_time > clear_vram_interval:
|
if current_time - start_time > clear_vram_interval:
|
||||||
clear_vram()
|
clear_vram()
|
||||||
start_time = current_time
|
start_time = current_time
|
||||||
time.sleep(60) # Check the time every minute
|
time.sleep(60) # Check the time every minute
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -1,8 +1,10 @@
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import asyncio
|
||||||
|
|
||||||
from lightrag import LightRAG
|
from lightrag import LightRAG
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
|
||||||
def insert_text(rag, file_path):
|
def insert_text(rag, file_path):
|
||||||
@@ -29,6 +31,19 @@ WORKING_DIR = f"../{cls}"
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(working_dir=WORKING_DIR)
|
async def initialize_rag():
|
||||||
|
rag = LightRAG(working_dir=WORKING_DIR)
|
||||||
|
|
||||||
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -1,11 +1,13 @@
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import asyncio
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from lightrag import LightRAG
|
from lightrag import LightRAG
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||||
|
from lightrag.kg.shared_storage import initialize_pipeline_status
|
||||||
|
|
||||||
|
|
||||||
## For Upstage API
|
## For Upstage API
|
||||||
@@ -60,12 +62,25 @@ WORKING_DIR = f"../{cls}"
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
rag = LightRAG(
|
async def initialize_rag():
|
||||||
working_dir=WORKING_DIR,
|
rag = LightRAG(
|
||||||
llm_model_func=llm_model_func,
|
working_dir=WORKING_DIR,
|
||||||
embedding_func=EmbeddingFunc(
|
llm_model_func=llm_model_func,
|
||||||
embedding_dim=4096, max_token_size=8192, func=embedding_func
|
embedding_func=EmbeddingFunc(
|
||||||
),
|
embedding_dim=4096, max_token_size=8192, func=embedding_func
|
||||||
)
|
),
|
||||||
|
)
|
||||||
|
|
||||||
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
await rag.initialize_storages()
|
||||||
|
await initialize_pipeline_status()
|
||||||
|
|
||||||
|
return rag
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Initialize RAG instance
|
||||||
|
rag = asyncio.run(initialize_rag())
|
||||||
|
insert_text(rag, f"../datasets/unique_contexts/{cls}_unique_contexts.json")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
@@ -1,203 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
"""
|
|
||||||
Start LightRAG server with Gunicorn
|
|
||||||
"""
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import signal
|
|
||||||
import pipmaster as pm
|
|
||||||
from lightrag.api.utils_api import parse_args, display_splash_screen
|
|
||||||
from lightrag.kg.shared_storage import initialize_share_data, finalize_share_data
|
|
||||||
|
|
||||||
|
|
||||||
def check_and_install_dependencies():
|
|
||||||
"""Check and install required dependencies"""
|
|
||||||
required_packages = [
|
|
||||||
"gunicorn",
|
|
||||||
"tiktoken",
|
|
||||||
"psutil",
|
|
||||||
# Add other required packages here
|
|
||||||
]
|
|
||||||
|
|
||||||
for package in required_packages:
|
|
||||||
if not pm.is_installed(package):
|
|
||||||
print(f"Installing {package}...")
|
|
||||||
pm.install(package)
|
|
||||||
print(f"{package} installed successfully")
|
|
||||||
|
|
||||||
|
|
||||||
# Signal handler for graceful shutdown
|
|
||||||
def signal_handler(sig, frame):
|
|
||||||
print("\n\n" + "=" * 80)
|
|
||||||
print("RECEIVED TERMINATION SIGNAL")
|
|
||||||
print(f"Process ID: {os.getpid()}")
|
|
||||||
print("=" * 80 + "\n")
|
|
||||||
|
|
||||||
# Release shared resources
|
|
||||||
finalize_share_data()
|
|
||||||
|
|
||||||
# Exit with success status
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
# Check and install dependencies
|
|
||||||
check_and_install_dependencies()
|
|
||||||
|
|
||||||
# Register signal handlers for graceful shutdown
|
|
||||||
signal.signal(signal.SIGINT, signal_handler) # Ctrl+C
|
|
||||||
signal.signal(signal.SIGTERM, signal_handler) # kill command
|
|
||||||
|
|
||||||
# Parse all arguments using parse_args
|
|
||||||
args = parse_args(is_uvicorn_mode=False)
|
|
||||||
|
|
||||||
# Display startup information
|
|
||||||
display_splash_screen(args)
|
|
||||||
|
|
||||||
print("🚀 Starting LightRAG with Gunicorn")
|
|
||||||
print(f"🔄 Worker management: Gunicorn (workers={args.workers})")
|
|
||||||
print("🔍 Preloading app: Enabled")
|
|
||||||
print("📝 Note: Using Gunicorn's preload feature for shared data initialization")
|
|
||||||
print("\n\n" + "=" * 80)
|
|
||||||
print("MAIN PROCESS INITIALIZATION")
|
|
||||||
print(f"Process ID: {os.getpid()}")
|
|
||||||
print(f"Workers setting: {args.workers}")
|
|
||||||
print("=" * 80 + "\n")
|
|
||||||
|
|
||||||
# Import Gunicorn's StandaloneApplication
|
|
||||||
from gunicorn.app.base import BaseApplication
|
|
||||||
|
|
||||||
# Define a custom application class that loads our config
|
|
||||||
class GunicornApp(BaseApplication):
|
|
||||||
def __init__(self, app, options=None):
|
|
||||||
self.options = options or {}
|
|
||||||
self.application = app
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
def load_config(self):
|
|
||||||
# Define valid Gunicorn configuration options
|
|
||||||
valid_options = {
|
|
||||||
"bind",
|
|
||||||
"workers",
|
|
||||||
"worker_class",
|
|
||||||
"timeout",
|
|
||||||
"keepalive",
|
|
||||||
"preload_app",
|
|
||||||
"errorlog",
|
|
||||||
"accesslog",
|
|
||||||
"loglevel",
|
|
||||||
"certfile",
|
|
||||||
"keyfile",
|
|
||||||
"limit_request_line",
|
|
||||||
"limit_request_fields",
|
|
||||||
"limit_request_field_size",
|
|
||||||
"graceful_timeout",
|
|
||||||
"max_requests",
|
|
||||||
"max_requests_jitter",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Special hooks that need to be set separately
|
|
||||||
special_hooks = {
|
|
||||||
"on_starting",
|
|
||||||
"on_reload",
|
|
||||||
"on_exit",
|
|
||||||
"pre_fork",
|
|
||||||
"post_fork",
|
|
||||||
"pre_exec",
|
|
||||||
"pre_request",
|
|
||||||
"post_request",
|
|
||||||
"worker_init",
|
|
||||||
"worker_exit",
|
|
||||||
"nworkers_changed",
|
|
||||||
"child_exit",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Import and configure the gunicorn_config module
|
|
||||||
import gunicorn_config
|
|
||||||
|
|
||||||
# Set configuration variables in gunicorn_config, prioritizing command line arguments
|
|
||||||
gunicorn_config.workers = (
|
|
||||||
args.workers if args.workers else int(os.getenv("WORKERS", 1))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Bind configuration prioritizes command line arguments
|
|
||||||
host = args.host if args.host != "0.0.0.0" else os.getenv("HOST", "0.0.0.0")
|
|
||||||
port = args.port if args.port != 9621 else int(os.getenv("PORT", 9621))
|
|
||||||
gunicorn_config.bind = f"{host}:{port}"
|
|
||||||
|
|
||||||
# Log level configuration prioritizes command line arguments
|
|
||||||
gunicorn_config.loglevel = (
|
|
||||||
args.log_level.lower()
|
|
||||||
if args.log_level
|
|
||||||
else os.getenv("LOG_LEVEL", "info")
|
|
||||||
)
|
|
||||||
|
|
||||||
# Timeout configuration prioritizes command line arguments
|
|
||||||
gunicorn_config.timeout = (
|
|
||||||
args.timeout if args.timeout else int(os.getenv("TIMEOUT", 150))
|
|
||||||
)
|
|
||||||
|
|
||||||
# Keepalive configuration
|
|
||||||
gunicorn_config.keepalive = int(os.getenv("KEEPALIVE", 5))
|
|
||||||
|
|
||||||
# SSL configuration prioritizes command line arguments
|
|
||||||
if args.ssl or os.getenv("SSL", "").lower() in (
|
|
||||||
"true",
|
|
||||||
"1",
|
|
||||||
"yes",
|
|
||||||
"t",
|
|
||||||
"on",
|
|
||||||
):
|
|
||||||
gunicorn_config.certfile = (
|
|
||||||
args.ssl_certfile
|
|
||||||
if args.ssl_certfile
|
|
||||||
else os.getenv("SSL_CERTFILE")
|
|
||||||
)
|
|
||||||
gunicorn_config.keyfile = (
|
|
||||||
args.ssl_keyfile if args.ssl_keyfile else os.getenv("SSL_KEYFILE")
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set configuration options from the module
|
|
||||||
for key in dir(gunicorn_config):
|
|
||||||
if key in valid_options:
|
|
||||||
value = getattr(gunicorn_config, key)
|
|
||||||
# Skip functions like on_starting and None values
|
|
||||||
if not callable(value) and value is not None:
|
|
||||||
self.cfg.set(key, value)
|
|
||||||
# Set special hooks
|
|
||||||
elif key in special_hooks:
|
|
||||||
value = getattr(gunicorn_config, key)
|
|
||||||
if callable(value):
|
|
||||||
self.cfg.set(key, value)
|
|
||||||
|
|
||||||
if hasattr(gunicorn_config, "logconfig_dict"):
|
|
||||||
self.cfg.set(
|
|
||||||
"logconfig_dict", getattr(gunicorn_config, "logconfig_dict")
|
|
||||||
)
|
|
||||||
|
|
||||||
def load(self):
|
|
||||||
# Import the application
|
|
||||||
from lightrag.api.lightrag_server import get_application
|
|
||||||
|
|
||||||
return get_application(args)
|
|
||||||
|
|
||||||
# Create the application
|
|
||||||
app = GunicornApp("")
|
|
||||||
|
|
||||||
# Force workers to be an integer and greater than 1 for multi-process mode
|
|
||||||
workers_count = int(args.workers)
|
|
||||||
if workers_count > 1:
|
|
||||||
# Set a flag to indicate we're in the main process
|
|
||||||
os.environ["LIGHTRAG_MAIN_PROCESS"] = "1"
|
|
||||||
initialize_share_data(workers_count)
|
|
||||||
else:
|
|
||||||
initialize_share_data(1)
|
|
||||||
|
|
||||||
# Run the application
|
|
||||||
print("\nStarting Gunicorn with direct Python API...")
|
|
||||||
app.run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
Reference in New Issue
Block a user