Moved back to llm dir as per

https://github.com/HKUDS/LightRAG/pull/864#issuecomment-2669705946

- Created two new example scripts demonstrating LightRAG integration with LlamaIndex:
  - `lightrag_llamaindex_direct_demo.py`: Direct OpenAI integration
  - `lightrag_llamaindex_litellm_demo.py`: LiteLLM proxy integration
- Both examples showcase different search modes (naive, local, global, hybrid)
- Includes configuration for working directory, models, and API settings
- Demonstrates text insertion and querying using LightRAG with LlamaIndex
- removed wrapper directory and references to it
This commit is contained in:
Pankaj Kaushal
2025-02-20 10:22:26 +01:00
parent 277070e03b
commit 173a806b9a
5 changed files with 8 additions and 9 deletions

View File

@@ -1,6 +1,6 @@
import os
from lightrag import LightRAG, QueryParam
from lightrag.wrapper.llama_index_impl import (
from lightrag.llm.llama_index_impl import (
llama_index_complete_if_cache,
llama_index_embed,
)
@@ -10,14 +10,13 @@ from llama_index.embeddings.openai import OpenAIEmbedding
import asyncio
# Configure working directory
DEFAULT_RAG_DIR = "index_default"
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
WORKING_DIR = "./index_default"
print(f"WORKING_DIR: {WORKING_DIR}")
# Model configuration
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
print(f"LLM_MODEL: {LLM_MODEL}")
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small")
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
@@ -26,6 +25,7 @@ print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here")
if not os.path.exists(WORKING_DIR):
print(f"Creating working directory: {WORKING_DIR}")
os.mkdir(WORKING_DIR)

View File

@@ -1,6 +1,6 @@
import os
from lightrag import LightRAG, QueryParam
from lightrag.wrapper.llama_index_impl import (
from lightrag.llm.llama_index_impl import (
llama_index_complete_if_cache,
llama_index_embed,
)
@@ -10,14 +10,13 @@ from llama_index.embeddings.litellm import LiteLLMEmbedding
import asyncio
# Configure working directory
DEFAULT_RAG_DIR = "index_default"
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
WORKING_DIR = "./index_default"
print(f"WORKING_DIR: {WORKING_DIR}")
# Model configuration
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o")
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
print(f"LLM_MODEL: {LLM_MODEL}")
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "embedding-model")
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")