diff --git a/examples/lightrag_api_llamaindex_direct_demo_simplified.py b/examples/lightrag_api_llamaindex_direct_demo_simplified.py index 50dfec96..a1781842 100644 --- a/examples/lightrag_api_llamaindex_direct_demo_simplified.py +++ b/examples/lightrag_api_llamaindex_direct_demo_simplified.py @@ -1,6 +1,9 @@ import os from lightrag import LightRAG, QueryParam -from lightrag.wrapper.llama_index_impl import llama_index_complete_if_cache, llama_index_embed +from lightrag.wrapper.llama_index_impl import ( + llama_index_complete_if_cache, + llama_index_embed, +) from lightrag.utils import EmbeddingFunc from llama_index.llms.openai import OpenAI from llama_index.embeddings.openai import OpenAIEmbedding @@ -25,20 +28,21 @@ OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here") if not os.path.exists(WORKING_DIR): os.mkdir(WORKING_DIR) + # Initialize LLM function async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs): try: # Initialize OpenAI if not in kwargs - if 'llm_instance' not in kwargs: + if "llm_instance" not in kwargs: llm_instance = OpenAI( model=LLM_MODEL, api_key=OPENAI_API_KEY, temperature=0.7, ) - kwargs['llm_instance'] = llm_instance + kwargs["llm_instance"] = llm_instance response = await llama_index_complete_if_cache( - kwargs['llm_instance'], + kwargs["llm_instance"], prompt, system_prompt=system_prompt, history_messages=history_messages, @@ -49,6 +53,7 @@ async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwar print(f"LLM request failed: {str(e)}") raise + # Initialize embedding function async def embedding_func(texts): try: @@ -61,6 +66,7 @@ async def embedding_func(texts): print(f"Embedding failed: {str(e)}") raise + # Get embedding dimension async def get_embedding_dim(): test_text = ["This is a test sentence."] @@ -69,6 +75,7 @@ async def get_embedding_dim(): print(f"embedding_dim={embedding_dim}") return embedding_dim + # Initialize RAG instance rag = LightRAG( working_dir=WORKING_DIR, @@ -86,13 +93,21 @@ with open("./book.txt", "r", encoding="utf-8") as f: # Test different query modes print("\nNaive Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))) +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")) +) print("\nLocal Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))) +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="local")) +) print("\nGlobal Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))) +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="global")) +) print("\nHybrid Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))) \ No newline at end of file +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")) +) diff --git a/examples/lightrag_api_llamaindex_litellm_demo_simplified.py b/examples/lightrag_api_llamaindex_litellm_demo_simplified.py index 11bdeba8..a1ab90db 100644 --- a/examples/lightrag_api_llamaindex_litellm_demo_simplified.py +++ b/examples/lightrag_api_llamaindex_litellm_demo_simplified.py @@ -1,6 +1,9 @@ import os from lightrag import LightRAG, QueryParam -from lightrag.wrapper.llama_index_impl import llama_index_complete_if_cache, llama_index_embed +from lightrag.wrapper.llama_index_impl import ( + llama_index_complete_if_cache, + llama_index_embed, +) from lightrag.utils import EmbeddingFunc from llama_index.llms.litellm import LiteLLM from llama_index.embeddings.litellm import LiteLLMEmbedding @@ -27,21 +30,22 @@ LITELLM_KEY = os.environ.get("LITELLM_KEY", "sk-1234") if not os.path.exists(WORKING_DIR): os.mkdir(WORKING_DIR) + # Initialize LLM function async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwargs): try: # Initialize LiteLLM if not in kwargs - if 'llm_instance' not in kwargs: + if "llm_instance" not in kwargs: llm_instance = LiteLLM( model=f"openai/{LLM_MODEL}", # Format: "provider/model_name" api_base=LITELLM_URL, api_key=LITELLM_KEY, temperature=0.7, ) - kwargs['llm_instance'] = llm_instance + kwargs["llm_instance"] = llm_instance response = await llama_index_complete_if_cache( - kwargs['llm_instance'], + kwargs["llm_instance"], prompt, system_prompt=system_prompt, history_messages=history_messages, @@ -52,6 +56,7 @@ async def llm_model_func(prompt, system_prompt=None, history_messages=[], **kwar print(f"LLM request failed: {str(e)}") raise + # Initialize embedding function async def embedding_func(texts): try: @@ -65,6 +70,7 @@ async def embedding_func(texts): print(f"Embedding failed: {str(e)}") raise + # Get embedding dimension async def get_embedding_dim(): test_text = ["This is a test sentence."] @@ -73,6 +79,7 @@ async def get_embedding_dim(): print(f"embedding_dim={embedding_dim}") return embedding_dim + # Initialize RAG instance rag = LightRAG( working_dir=WORKING_DIR, @@ -90,13 +97,21 @@ with open("./book.txt", "r", encoding="utf-8") as f: # Test different query modes print("\nNaive Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))) +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")) +) print("\nLocal Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))) +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="local")) +) print("\nGlobal Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))) +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="global")) +) print("\nHybrid Search:") -print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))) \ No newline at end of file +print( + rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")) +)