diff --git a/examples/lightrag_api_llamaindex_direct_demo_simplified.py b/examples/lightrag_llamaindex_direct_demo.py similarity index 95% rename from examples/lightrag_api_llamaindex_direct_demo_simplified.py rename to examples/lightrag_llamaindex_direct_demo.py index a1781842..5db158ce 100644 --- a/examples/lightrag_api_llamaindex_direct_demo_simplified.py +++ b/examples/lightrag_llamaindex_direct_demo.py @@ -1,6 +1,6 @@ import os from lightrag import LightRAG, QueryParam -from lightrag.wrapper.llama_index_impl import ( +from lightrag.llm.llama_index_impl import ( llama_index_complete_if_cache, llama_index_embed, ) @@ -10,14 +10,13 @@ from llama_index.embeddings.openai import OpenAIEmbedding import asyncio # Configure working directory -DEFAULT_RAG_DIR = "index_default" -WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}") +WORKING_DIR = "./index_default" print(f"WORKING_DIR: {WORKING_DIR}") # Model configuration LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4") print(f"LLM_MODEL: {LLM_MODEL}") -EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small") +EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large") print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}") EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192)) print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}") @@ -26,6 +25,7 @@ print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}") OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here") if not os.path.exists(WORKING_DIR): + print(f"Creating working directory: {WORKING_DIR}") os.mkdir(WORKING_DIR) diff --git a/examples/lightrag_api_llamaindex_litellm_demo_simplified.py b/examples/lightrag_llamaindex_litellm_demo.py similarity index 92% rename from examples/lightrag_api_llamaindex_litellm_demo_simplified.py rename to examples/lightrag_llamaindex_litellm_demo.py index a1ab90db..3511ecf3 100644 --- a/examples/lightrag_api_llamaindex_litellm_demo_simplified.py +++ b/examples/lightrag_llamaindex_litellm_demo.py @@ -1,6 +1,6 @@ import os from lightrag import LightRAG, QueryParam -from lightrag.wrapper.llama_index_impl import ( +from lightrag.llm.llama_index_impl import ( llama_index_complete_if_cache, llama_index_embed, ) @@ -10,14 +10,13 @@ from llama_index.embeddings.litellm import LiteLLMEmbedding import asyncio # Configure working directory -DEFAULT_RAG_DIR = "index_default" -WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}") +WORKING_DIR = "./index_default" print(f"WORKING_DIR: {WORKING_DIR}") # Model configuration -LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o") +LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4") print(f"LLM_MODEL: {LLM_MODEL}") -EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "embedding-model") +EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large") print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}") EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192)) print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}") diff --git a/lightrag/wrapper/Readme.md b/lightrag/llm/Readme.md similarity index 100% rename from lightrag/wrapper/Readme.md rename to lightrag/llm/Readme.md diff --git a/lightrag/wrapper/llama_index_impl.py b/lightrag/llm/llama_index_impl.py similarity index 100% rename from lightrag/wrapper/llama_index_impl.py rename to lightrag/llm/llama_index_impl.py diff --git a/lightrag/wrapper/__init__.py b/lightrag/wrapper/__init__.py deleted file mode 100644 index e69de29b..00000000