Linting and formatting
This commit is contained in:
@@ -1,6 +1,9 @@
|
|||||||
import os
|
import os
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.wrapper.llama_index_impl import llama_index_complete_if_cache, llama_index_embed
|
from lightrag.wrapper.llama_index_impl import (
|
||||||
|
llama_index_complete_if_cache,
|
||||||
|
llama_index_embed,
|
||||||
|
)
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from llama_index.llms.openai import OpenAI
|
from llama_index.llms.openai import OpenAI
|
||||||
from llama_index.embeddings.openai import OpenAIEmbedding
|
from llama_index.embeddings.openai import OpenAIEmbedding
|
||||||
@@ -25,20 +28,21 @@ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here")
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
|
|
||||||
# Initialize LLM function
|
# Initialize LLM function
|
||||||
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
||||||
try:
|
try:
|
||||||
# Initialize OpenAI if not in kwargs
|
# Initialize OpenAI if not in kwargs
|
||||||
if 'llm_instance' not in kwargs:
|
if "llm_instance" not in kwargs:
|
||||||
llm_instance = OpenAI(
|
llm_instance = OpenAI(
|
||||||
model=LLM_MODEL,
|
model=LLM_MODEL,
|
||||||
api_key=OPENAI_API_KEY,
|
api_key=OPENAI_API_KEY,
|
||||||
temperature=0.7,
|
temperature=0.7,
|
||||||
)
|
)
|
||||||
kwargs['llm_instance'] = llm_instance
|
kwargs["llm_instance"] = llm_instance
|
||||||
|
|
||||||
response = await llama_index_complete_if_cache(
|
response = await llama_index_complete_if_cache(
|
||||||
kwargs['llm_instance'],
|
kwargs["llm_instance"],
|
||||||
prompt,
|
prompt,
|
||||||
system_prompt=system_prompt,
|
system_prompt=system_prompt,
|
||||||
history_messages=history_messages,
|
history_messages=history_messages,
|
||||||
@@ -49,6 +53,7 @@ async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwar
|
|||||||
print(f"LLM request failed: {str(e)}")
|
print(f"LLM request failed: {str(e)}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
# Initialize embedding function
|
# Initialize embedding function
|
||||||
async def embedding_func(texts):
|
async def embedding_func(texts):
|
||||||
try:
|
try:
|
||||||
@@ -61,6 +66,7 @@ async def embedding_func(texts):
|
|||||||
print(f"Embedding failed: {str(e)}")
|
print(f"Embedding failed: {str(e)}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
# Get embedding dimension
|
# Get embedding dimension
|
||||||
async def get_embedding_dim():
|
async def get_embedding_dim():
|
||||||
test_text = ["This is a test sentence."]
|
test_text = ["This is a test sentence."]
|
||||||
@@ -69,6 +75,7 @@ async def get_embedding_dim():
|
|||||||
print(f"embedding_dim={embedding_dim}")
|
print(f"embedding_dim={embedding_dim}")
|
||||||
return embedding_dim
|
return embedding_dim
|
||||||
|
|
||||||
|
|
||||||
# Initialize RAG instance
|
# Initialize RAG instance
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
@@ -86,13 +93,21 @@ with open("./book.txt", "r", encoding="utf-8") as f:
|
|||||||
|
|
||||||
# Test different query modes
|
# Test different query modes
|
||||||
print("\nNaive Search:")
|
print("\nNaive Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
print("\nLocal Search:")
|
print("\nLocal Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
print("\nGlobal Search:")
|
print("\nGlobal Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
print("\nHybrid Search:")
|
print("\nHybrid Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
@@ -1,6 +1,9 @@
|
|||||||
import os
|
import os
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.wrapper.llama_index_impl import llama_index_complete_if_cache, llama_index_embed
|
from lightrag.wrapper.llama_index_impl import (
|
||||||
|
llama_index_complete_if_cache,
|
||||||
|
llama_index_embed,
|
||||||
|
)
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from llama_index.llms.litellm import LiteLLM
|
from llama_index.llms.litellm import LiteLLM
|
||||||
from llama_index.embeddings.litellm import LiteLLMEmbedding
|
from llama_index.embeddings.litellm import LiteLLMEmbedding
|
||||||
@@ -27,21 +30,22 @@ LITELLM_KEY = os.environ.get("LITELLM_KEY", "sk-1234")
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
|
|
||||||
# Initialize LLM function
|
# Initialize LLM function
|
||||||
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs):
|
||||||
try:
|
try:
|
||||||
# Initialize LiteLLM if not in kwargs
|
# Initialize LiteLLM if not in kwargs
|
||||||
if 'llm_instance' not in kwargs:
|
if "llm_instance" not in kwargs:
|
||||||
llm_instance = LiteLLM(
|
llm_instance = LiteLLM(
|
||||||
model=f"openai/{LLM_MODEL}", # Format: "provider/model_name"
|
model=f"openai/{LLM_MODEL}", # Format: "provider/model_name"
|
||||||
api_base=LITELLM_URL,
|
api_base=LITELLM_URL,
|
||||||
api_key=LITELLM_KEY,
|
api_key=LITELLM_KEY,
|
||||||
temperature=0.7,
|
temperature=0.7,
|
||||||
)
|
)
|
||||||
kwargs['llm_instance'] = llm_instance
|
kwargs["llm_instance"] = llm_instance
|
||||||
|
|
||||||
response = await llama_index_complete_if_cache(
|
response = await llama_index_complete_if_cache(
|
||||||
kwargs['llm_instance'],
|
kwargs["llm_instance"],
|
||||||
prompt,
|
prompt,
|
||||||
system_prompt=system_prompt,
|
system_prompt=system_prompt,
|
||||||
history_messages=history_messages,
|
history_messages=history_messages,
|
||||||
@@ -52,6 +56,7 @@ async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwar
|
|||||||
print(f"LLM request failed: {str(e)}")
|
print(f"LLM request failed: {str(e)}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
# Initialize embedding function
|
# Initialize embedding function
|
||||||
async def embedding_func(texts):
|
async def embedding_func(texts):
|
||||||
try:
|
try:
|
||||||
@@ -65,6 +70,7 @@ async def embedding_func(texts):
|
|||||||
print(f"Embedding failed: {str(e)}")
|
print(f"Embedding failed: {str(e)}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
# Get embedding dimension
|
# Get embedding dimension
|
||||||
async def get_embedding_dim():
|
async def get_embedding_dim():
|
||||||
test_text = ["This is a test sentence."]
|
test_text = ["This is a test sentence."]
|
||||||
@@ -73,6 +79,7 @@ async def get_embedding_dim():
|
|||||||
print(f"embedding_dim={embedding_dim}")
|
print(f"embedding_dim={embedding_dim}")
|
||||||
return embedding_dim
|
return embedding_dim
|
||||||
|
|
||||||
|
|
||||||
# Initialize RAG instance
|
# Initialize RAG instance
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
@@ -90,13 +97,21 @@ with open("./book.txt", "r", encoding="utf-8") as f:
|
|||||||
|
|
||||||
# Test different query modes
|
# Test different query modes
|
||||||
print("\nNaive Search:")
|
print("\nNaive Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
|
||||||
|
)
|
||||||
|
|
||||||
print("\nLocal Search:")
|
print("\nLocal Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
|
||||||
|
)
|
||||||
|
|
||||||
print("\nGlobal Search:")
|
print("\nGlobal Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
|
||||||
|
)
|
||||||
|
|
||||||
print("\nHybrid Search:")
|
print("\nHybrid Search:")
|
||||||
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
|
print(
|
||||||
|
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
|
||||||
|
)
|
||||||
|
Reference in New Issue
Block a user