Moved back to llm dir as per
https://github.com/HKUDS/LightRAG/pull/864#issuecomment-2669705946 - Created two new example scripts demonstrating LightRAG integration with LlamaIndex: - `lightrag_llamaindex_direct_demo.py`: Direct OpenAI integration - `lightrag_llamaindex_litellm_demo.py`: LiteLLM proxy integration - Both examples showcase different search modes (naive, local, global, hybrid) - Includes configuration for working directory, models, and API settings - Demonstrates text insertion and querying using LightRAG with LlamaIndex - removed wrapper directory and references to it
This commit is contained in:
@@ -1,6 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.wrapper.llama_index_impl import (
|
from lightrag.llm.llama_index_impl import (
|
||||||
llama_index_complete_if_cache,
|
llama_index_complete_if_cache,
|
||||||
llama_index_embed,
|
llama_index_embed,
|
||||||
)
|
)
|
||||||
@@ -10,14 +10,13 @@ from llama_index.embeddings.openai import OpenAIEmbedding
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
# Configure working directory
|
# Configure working directory
|
||||||
DEFAULT_RAG_DIR = "index_default"
|
WORKING_DIR = "./index_default"
|
||||||
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
|
|
||||||
print(f"WORKING_DIR: {WORKING_DIR}")
|
print(f"WORKING_DIR: {WORKING_DIR}")
|
||||||
|
|
||||||
# Model configuration
|
# Model configuration
|
||||||
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
|
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
|
||||||
print(f"LLM_MODEL: {LLM_MODEL}")
|
print(f"LLM_MODEL: {LLM_MODEL}")
|
||||||
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-small")
|
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
|
||||||
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
||||||
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
||||||
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
||||||
@@ -26,6 +25,7 @@ print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
|||||||
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here")
|
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "your-api-key-here")
|
||||||
|
|
||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
|
print(f"Creating working directory: {WORKING_DIR}")
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
|
|
@@ -1,6 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.wrapper.llama_index_impl import (
|
from lightrag.llm.llama_index_impl import (
|
||||||
llama_index_complete_if_cache,
|
llama_index_complete_if_cache,
|
||||||
llama_index_embed,
|
llama_index_embed,
|
||||||
)
|
)
|
||||||
@@ -10,14 +10,13 @@ from llama_index.embeddings.litellm import LiteLLMEmbedding
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
# Configure working directory
|
# Configure working directory
|
||||||
DEFAULT_RAG_DIR = "index_default"
|
WORKING_DIR = "./index_default"
|
||||||
WORKING_DIR = os.environ.get("RAG_DIR", f"{DEFAULT_RAG_DIR}")
|
|
||||||
print(f"WORKING_DIR: {WORKING_DIR}")
|
print(f"WORKING_DIR: {WORKING_DIR}")
|
||||||
|
|
||||||
# Model configuration
|
# Model configuration
|
||||||
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4o")
|
LLM_MODEL = os.environ.get("LLM_MODEL", "gpt-4")
|
||||||
print(f"LLM_MODEL: {LLM_MODEL}")
|
print(f"LLM_MODEL: {LLM_MODEL}")
|
||||||
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "embedding-model")
|
EMBEDDING_MODEL = os.environ.get("EMBEDDING_MODEL", "text-embedding-3-large")
|
||||||
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
print(f"EMBEDDING_MODEL: {EMBEDDING_MODEL}")
|
||||||
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
EMBEDDING_MAX_TOKEN_SIZE = int(os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192))
|
||||||
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
print(f"EMBEDDING_MAX_TOKEN_SIZE: {EMBEDDING_MAX_TOKEN_SIZE}")
|
Reference in New Issue
Block a user