Merge remote-tracking branch 'upstream/main'
This commit is contained in:
161
README.md
161
README.md
@@ -26,7 +26,7 @@ This repository hosts the code of LightRAG. The structure of this code is based
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
## 🎉 News
|
## 🎉 News
|
||||||
- [x] [2025.01.06]🎯📢LightRAG now supports [PostgreSQL for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-postgres-for-storage).
|
- [x] [2025.01.06]🎯📢You can now [use PostgreSQL for Storage](#using-postgresql-for-storage).
|
||||||
- [x] [2024.12.31]🎯📢LightRAG now supports [deletion by document ID](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete).
|
- [x] [2024.12.31]🎯📢LightRAG now supports [deletion by document ID](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete).
|
||||||
- [x] [2024.11.25]🎯📢LightRAG now supports seamless integration of [custom knowledge graphs](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#insert-custom-kg), empowering users to enhance the system with their own domain expertise.
|
- [x] [2024.11.25]🎯📢LightRAG now supports seamless integration of [custom knowledge graphs](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#insert-custom-kg), empowering users to enhance the system with their own domain expertise.
|
||||||
- [x] [2024.11.19]🎯📢A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). Many thanks to the blog author.
|
- [x] [2024.11.19]🎯📢A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). Many thanks to the blog author.
|
||||||
@@ -361,6 +361,11 @@ see test_neo4j.py for a working example.
|
|||||||
For production level scenarios you will most likely want to leverage an enterprise solution. PostgreSQL can provide a one-stop solution for you as KV store, VectorDB (pgvector) and GraphDB (apache AGE).
|
For production level scenarios you will most likely want to leverage an enterprise solution. PostgreSQL can provide a one-stop solution for you as KV store, VectorDB (pgvector) and GraphDB (apache AGE).
|
||||||
* PostgreSQL is lightweight,the whole binary distribution including all necessary plugins can be zipped to 40MB: Ref to [Windows Release](https://github.com/ShanGor/apache-age-windows/releases/tag/PG17%2Fv1.5.0-rc0) as it is easy to install for Linux/Mac.
|
* PostgreSQL is lightweight,the whole binary distribution including all necessary plugins can be zipped to 40MB: Ref to [Windows Release](https://github.com/ShanGor/apache-age-windows/releases/tag/PG17%2Fv1.5.0-rc0) as it is easy to install for Linux/Mac.
|
||||||
* How to start? Ref to: [examples/lightrag_zhipu_postgres_demo.py](https://github.com/HKUDS/LightRAG/blob/main/examples/lightrag_zhipu_postgres_demo.py)
|
* How to start? Ref to: [examples/lightrag_zhipu_postgres_demo.py](https://github.com/HKUDS/LightRAG/blob/main/examples/lightrag_zhipu_postgres_demo.py)
|
||||||
|
* Create index for AGE example: (Change below `dickens` to your graph name if necessary)
|
||||||
|
```
|
||||||
|
SET search_path = ag_catalog, "$user", public;
|
||||||
|
CREATE INDEX idx_entity ON dickens."Entity" USING gin (agtype_access_operator(properties, '"node_id"'));
|
||||||
|
```
|
||||||
|
|
||||||
### Insert Custom KG
|
### Insert Custom KG
|
||||||
|
|
||||||
@@ -912,12 +917,14 @@ pip install -e ".[api]"
|
|||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
Before running any of the servers, ensure you have the corresponding backend service running:
|
Before running any of the servers, ensure you have the corresponding backend service running for both llm and embedding.
|
||||||
|
The new api allows you to mix different bindings for llm/embeddings.
|
||||||
|
For example, you have the possibility to use ollama for the embedding and openai for the llm.
|
||||||
|
|
||||||
#### For LoLLMs Server
|
#### For LoLLMs Server
|
||||||
- LoLLMs must be running and accessible
|
- LoLLMs must be running and accessible
|
||||||
- Default connection: http://localhost:9600
|
- Default connection: http://localhost:9600
|
||||||
- Configure using --lollms-host if running on a different host/port
|
- Configure using --llm-binding-host and/or --embedding-binding-host if running on a different host/port
|
||||||
|
|
||||||
#### For Ollama Server
|
#### For Ollama Server
|
||||||
- Ollama must be running and accessible
|
- Ollama must be running and accessible
|
||||||
@@ -953,113 +960,96 @@ The output of the last command will give you the endpoint and the key for the Op
|
|||||||
|
|
||||||
Each server has its own specific configuration options:
|
Each server has its own specific configuration options:
|
||||||
|
|
||||||
#### LoLLMs Server Options
|
#### LightRag Server Options
|
||||||
|
|
||||||
| Parameter | Default | Description |
|
|
||||||
|-----------|---------|-------------|
|
|
||||||
| --host | 0.0.0.0 | RAG server host |
|
|
||||||
| --port | 9621 | RAG server port |
|
|
||||||
| --model | mistral-nemo:latest | LLM model name |
|
|
||||||
| --embedding-model | bge-m3:latest | Embedding model name |
|
|
||||||
| --lollms-host | http://localhost:9600 | LoLLMS backend URL |
|
|
||||||
| --working-dir | ./rag_storage | Working directory for RAG |
|
|
||||||
| --max-async | 4 | Maximum async operations |
|
|
||||||
| --max-tokens | 32768 | Maximum token size |
|
|
||||||
| --embedding-dim | 1024 | Embedding dimensions |
|
|
||||||
| --max-embed-tokens | 8192 | Maximum embedding token size |
|
|
||||||
| --input-file | ./book.txt | Initial input file |
|
|
||||||
| --log-level | INFO | Logging level |
|
|
||||||
| --key | none | Access Key to protect the lightrag service |
|
|
||||||
|
|
||||||
#### Ollama Server Options
|
|
||||||
|
|
||||||
| Parameter | Default | Description |
|
|
||||||
|-----------|---------|-------------|
|
|
||||||
| --host | 0.0.0.0 | RAG server host |
|
|
||||||
| --port | 9621 | RAG server port |
|
|
||||||
| --model | mistral-nemo:latest | LLM model name |
|
|
||||||
| --embedding-model | bge-m3:latest | Embedding model name |
|
|
||||||
| --ollama-host | http://localhost:11434 | Ollama backend URL |
|
|
||||||
| --working-dir | ./rag_storage | Working directory for RAG |
|
|
||||||
| --max-async | 4 | Maximum async operations |
|
|
||||||
| --max-tokens | 32768 | Maximum token size |
|
|
||||||
| --embedding-dim | 1024 | Embedding dimensions |
|
|
||||||
| --max-embed-tokens | 8192 | Maximum embedding token size |
|
|
||||||
| --input-file | ./book.txt | Initial input file |
|
|
||||||
| --log-level | INFO | Logging level |
|
|
||||||
| --key | none | Access Key to protect the lightrag service |
|
|
||||||
|
|
||||||
#### OpenAI Server Options
|
|
||||||
|
|
||||||
| Parameter | Default | Description |
|
|
||||||
|-----------|---------|-------------|
|
|
||||||
| --host | 0.0.0.0 | RAG server host |
|
|
||||||
| --port | 9621 | RAG server port |
|
|
||||||
| --model | gpt-4 | OpenAI model name |
|
|
||||||
| --embedding-model | text-embedding-3-large | OpenAI embedding model |
|
|
||||||
| --working-dir | ./rag_storage | Working directory for RAG |
|
|
||||||
| --max-tokens | 32768 | Maximum token size |
|
|
||||||
| --max-embed-tokens | 8192 | Maximum embedding token size |
|
|
||||||
| --input-dir | ./inputs | Input directory for documents |
|
|
||||||
| --log-level | INFO | Logging level |
|
|
||||||
| --key | none | Access Key to protect the lightrag service |
|
|
||||||
|
|
||||||
#### OpenAI AZURE Server Options
|
|
||||||
|
|
||||||
| Parameter | Default | Description |
|
| Parameter | Default | Description |
|
||||||
|-----------|---------|-------------|
|
|-----------|---------|-------------|
|
||||||
| --host | 0.0.0.0 | Server host |
|
| --host | 0.0.0.0 | Server host |
|
||||||
| --port | 9621 | Server port |
|
| --port | 9621 | Server port |
|
||||||
| --model | gpt-4 | OpenAI model name |
|
| --llm-binding | ollama | LLM binding to be used. Supported: lollms, ollama, openai |
|
||||||
| --embedding-model | text-embedding-3-large | OpenAI embedding model |
|
| --llm-binding-host | (dynamic) | LLM server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
|
||||||
| --working-dir | ./rag_storage | Working directory for RAG |
|
| --llm-model | mistral-nemo:latest | LLM model name |
|
||||||
|
| --embedding-binding | ollama | Embedding binding to be used. Supported: lollms, ollama, openai |
|
||||||
|
| --embedding-binding-host | (dynamic) | Embedding server host URL. Defaults based on binding: http://localhost:11434 (ollama), http://localhost:9600 (lollms), https://api.openai.com/v1 (openai) |
|
||||||
|
| --embedding-model | bge-m3:latest | Embedding model name |
|
||||||
|
| --working-dir | ./rag_storage | Working directory for RAG storage |
|
||||||
|
| --input-dir | ./inputs | Directory containing input documents |
|
||||||
|
| --max-async | 4 | Maximum async operations |
|
||||||
| --max-tokens | 32768 | Maximum token size |
|
| --max-tokens | 32768 | Maximum token size |
|
||||||
|
| --embedding-dim | 1024 | Embedding dimensions |
|
||||||
| --max-embed-tokens | 8192 | Maximum embedding token size |
|
| --max-embed-tokens | 8192 | Maximum embedding token size |
|
||||||
| --input-dir | ./inputs | Input directory for documents |
|
| --timeout | None | Timeout in seconds (useful when using slow AI). Use None for infinite timeout |
|
||||||
| --enable-cache | True | Enable response cache |
|
| --log-level | INFO | Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL) |
|
||||||
| --log-level | INFO | Logging level |
|
| --key | None | API key for authentication. Protects lightrag server against unauthorized access |
|
||||||
| --key | none | Access Key to protect the lightrag service |
|
| --ssl | False | Enable HTTPS |
|
||||||
|
| --ssl-certfile | None | Path to SSL certificate file (required if --ssl is enabled) |
|
||||||
|
| --ssl-keyfile | None | Path to SSL private key file (required if --ssl is enabled) |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`.
|
For protecting the server using an authentication key, you can also use an environment variable named `LIGHTRAG_API_KEY`.
|
||||||
### Example Usage
|
### Example Usage
|
||||||
|
|
||||||
#### LoLLMs RAG Server
|
#### Running a Lightrag server with ollama default local server as llm and embedding backends
|
||||||
|
|
||||||
|
Ollama is the default backend for both llm and embedding, so by default you can run lightrag-server with no parameters and the default ones will be used. Make sure ollama is installed and is running and default models are already installed on ollama.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Custom configuration with specific model and working directory
|
# Run lightrag with ollama, mistral-nemo:latest for llm, and bge-m3:latest for embedding
|
||||||
lollms-lightrag-server --model mistral-nemo --port 8080 --working-dir ./custom_rag
|
lightrag-server
|
||||||
|
|
||||||
# Using specific models (ensure they are installed in your LoLLMs instance)
|
# Using specific models (ensure they are installed in your ollama instance)
|
||||||
lollms-lightrag-server --model mistral-nemo:latest --embedding-model bge-m3 --embedding-dim 1024
|
lightrag-server --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-model nomic-embed-text --embedding-dim 1024
|
||||||
|
|
||||||
# Using specific models and an authentication key
|
# Using an authentication key
|
||||||
lollms-lightrag-server --model mistral-nemo:latest --embedding-model bge-m3 --embedding-dim 1024 --key ky-mykey
|
lightrag-server --key my-key
|
||||||
|
|
||||||
|
# Using lollms for llm and ollama for embedding
|
||||||
|
lightrag-server --llm-binding lollms
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Ollama RAG Server
|
#### Running a Lightrag server with lollms default local server as llm and embedding backends
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Custom configuration with specific model and working directory
|
# Run lightrag with lollms, mistral-nemo:latest for llm, and bge-m3:latest for embedding, use lollms for both llm and embedding
|
||||||
ollama-lightrag-server --model mistral-nemo:latest --port 8080 --working-dir ./custom_rag
|
lightrag-server --llm-binding lollms --embedding-binding lollms
|
||||||
|
|
||||||
# Using specific models (ensure they are installed in your Ollama instance)
|
# Using specific models (ensure they are installed in your ollama instance)
|
||||||
ollama-lightrag-server --model mistral-nemo:latest --embedding-model bge-m3 --embedding-dim 1024
|
lightrag-server --llm-binding lollms --llm-model adrienbrault/nous-hermes2theta-llama3-8b:f16 --embedding-binding lollms --embedding-model nomic-embed-text --embedding-dim 1024
|
||||||
|
|
||||||
|
# Using an authentication key
|
||||||
|
lightrag-server --key my-key
|
||||||
|
|
||||||
|
# Using lollms for llm and openai for embedding
|
||||||
|
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
|
||||||
```
|
```
|
||||||
|
|
||||||
#### OpenAI RAG Server
|
|
||||||
|
#### Running a Lightrag server with openai server as llm and embedding backends
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Using GPT-4 with text-embedding-3-large
|
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
|
||||||
openai-lightrag-server --port 9624 --model gpt-4 --embedding-model text-embedding-3-large
|
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
|
||||||
```
|
|
||||||
#### Azure OpenAI RAG Server
|
# Using an authentication key
|
||||||
```bash
|
lightrag-server --llm-binding openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small --key my-key
|
||||||
# Using GPT-4 with text-embedding-3-large
|
|
||||||
azure-openai-lightrag-server --model gpt-4o --port 8080 --working-dir ./custom_rag --embedding-model text-embedding-3-large
|
# Using lollms for llm and openai for embedding
|
||||||
|
lightrag-server --llm-binding lollms --embedding-binding openai --embedding-model text-embedding-3-small
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Running a Lightrag server with azure openai server as llm and embedding backends
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run lightrag with lollms, GPT-4o-mini for llm, and text-embedding-3-small for embedding, use openai for both llm and embedding
|
||||||
|
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding openai --embedding-model text-embedding-3-small
|
||||||
|
|
||||||
|
# Using an authentication key
|
||||||
|
lightrag-server --llm-binding azure_openai --llm-model GPT-4o-mini --embedding-binding azure_openai --embedding-model text-embedding-3-small --key my-key
|
||||||
|
|
||||||
|
# Using lollms for llm and azure_openai for embedding
|
||||||
|
lightrag-server --llm-binding lollms --embedding-binding azure_openai --embedding-model text-embedding-3-small
|
||||||
|
```
|
||||||
|
|
||||||
**Important Notes:**
|
**Important Notes:**
|
||||||
- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance
|
- For LoLLMs: Make sure the specified models are installed in your LoLLMs instance
|
||||||
@@ -1069,10 +1059,7 @@ azure-openai-lightrag-server --model gpt-4o --port 8080 --working-dir ./custom_r
|
|||||||
|
|
||||||
For help on any server, use the --help flag:
|
For help on any server, use the --help flag:
|
||||||
```bash
|
```bash
|
||||||
lollms-lightrag-server --help
|
lightrag-server --help
|
||||||
ollama-lightrag-server --help
|
|
||||||
openai-lightrag-server --help
|
|
||||||
azure-openai-lightrag-server --help
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Note: If you don't need the API functionality, you can install the base package without API support using:
|
Note: If you don't need the API functionality, you can install the base package without API support using:
|
||||||
@@ -1092,7 +1079,7 @@ Query the RAG system with options for different search modes.
|
|||||||
```bash
|
```bash
|
||||||
curl -X POST "http://localhost:9621/query" \
|
curl -X POST "http://localhost:9621/query" \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d '{"query": "Your question here", "mode": "hybrid"}'
|
-d '{"query": "Your question here", "mode": "hybrid", ""}'
|
||||||
```
|
```
|
||||||
|
|
||||||
#### POST /query/stream
|
#### POST /query/stream
|
||||||
|
@@ -1,532 +0,0 @@
|
|||||||
from fastapi import FastAPI, HTTPException, File, UploadFile, Form
|
|
||||||
from pydantic import BaseModel
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import argparse
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm import (
|
|
||||||
azure_openai_complete_if_cache,
|
|
||||||
azure_openai_embedding,
|
|
||||||
)
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from typing import Optional, List
|
|
||||||
from enum import Enum
|
|
||||||
from pathlib import Path
|
|
||||||
import shutil
|
|
||||||
import aiofiles
|
|
||||||
from ascii_colors import trace_exception
|
|
||||||
import os
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
import inspect
|
|
||||||
import json
|
|
||||||
from fastapi.responses import StreamingResponse
|
|
||||||
|
|
||||||
from fastapi import Depends, Security
|
|
||||||
from fastapi.security import APIKeyHeader
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
|
||||||
|
|
||||||
from starlette.status import HTTP_403_FORBIDDEN
|
|
||||||
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
AZURE_OPENAI_API_VERSION = os.getenv("AZURE_OPENAI_API_VERSION")
|
|
||||||
AZURE_OPENAI_DEPLOYMENT = os.getenv("AZURE_OPENAI_DEPLOYMENT")
|
|
||||||
AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
|
|
||||||
AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
|
|
||||||
|
|
||||||
AZURE_EMBEDDING_DEPLOYMENT = os.getenv("AZURE_EMBEDDING_DEPLOYMENT")
|
|
||||||
AZURE_EMBEDDING_API_VERSION = os.getenv("AZURE_EMBEDDING_API_VERSION")
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="LightRAG FastAPI Server with OpenAI integration"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Server configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--host", default="0.0.0.0", help="Server host (default: 0.0.0.0)"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--port", type=int, default=9621, help="Server port (default: 9621)"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Directory configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--working-dir",
|
|
||||||
default="./rag_storage",
|
|
||||||
help="Working directory for RAG storage (default: ./rag_storage)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--input-dir",
|
|
||||||
default="./inputs",
|
|
||||||
help="Directory containing input documents (default: ./inputs)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Model configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--model", default="gpt-4o", help="OpenAI model name (default: gpt-4o)"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--embedding-model",
|
|
||||||
default="text-embedding-3-large",
|
|
||||||
help="OpenAI embedding model (default: text-embedding-3-large)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# RAG configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-tokens",
|
|
||||||
type=int,
|
|
||||||
default=32768,
|
|
||||||
help="Maximum token size (default: 32768)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-embed-tokens",
|
|
||||||
type=int,
|
|
||||||
default=8192,
|
|
||||||
help="Maximum embedding token size (default: 8192)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--enable-cache",
|
|
||||||
default=True,
|
|
||||||
help="Enable response cache (default: True)",
|
|
||||||
)
|
|
||||||
# Logging configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--log-level",
|
|
||||||
default="INFO",
|
|
||||||
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
|
||||||
help="Logging level (default: INFO)",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--key",
|
|
||||||
type=str,
|
|
||||||
help="API key for authentication. This protects lightrag server against unauthorized access",
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentManager:
|
|
||||||
"""Handles document operations and tracking"""
|
|
||||||
|
|
||||||
def __init__(self, input_dir: str, supported_extensions: tuple = (".txt", ".md")):
|
|
||||||
self.input_dir = Path(input_dir)
|
|
||||||
self.supported_extensions = supported_extensions
|
|
||||||
self.indexed_files = set()
|
|
||||||
|
|
||||||
# Create input directory if it doesn't exist
|
|
||||||
self.input_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
def scan_directory(self) -> List[Path]:
|
|
||||||
"""Scan input directory for new files"""
|
|
||||||
new_files = []
|
|
||||||
for ext in self.supported_extensions:
|
|
||||||
for file_path in self.input_dir.rglob(f"*{ext}"):
|
|
||||||
if file_path not in self.indexed_files:
|
|
||||||
new_files.append(file_path)
|
|
||||||
return new_files
|
|
||||||
|
|
||||||
def mark_as_indexed(self, file_path: Path):
|
|
||||||
"""Mark a file as indexed"""
|
|
||||||
self.indexed_files.add(file_path)
|
|
||||||
|
|
||||||
def is_supported_file(self, filename: str) -> bool:
|
|
||||||
"""Check if file type is supported"""
|
|
||||||
return any(filename.lower().endswith(ext) for ext in self.supported_extensions)
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic models
|
|
||||||
class SearchMode(str, Enum):
|
|
||||||
naive = "naive"
|
|
||||||
local = "local"
|
|
||||||
global_ = "global"
|
|
||||||
hybrid = "hybrid"
|
|
||||||
|
|
||||||
|
|
||||||
class QueryRequest(BaseModel):
|
|
||||||
query: str
|
|
||||||
mode: SearchMode = SearchMode.hybrid
|
|
||||||
only_need_context: bool = False
|
|
||||||
# stream: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
class QueryResponse(BaseModel):
|
|
||||||
response: str
|
|
||||||
|
|
||||||
|
|
||||||
class InsertTextRequest(BaseModel):
|
|
||||||
text: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
class InsertResponse(BaseModel):
|
|
||||||
status: str
|
|
||||||
message: str
|
|
||||||
document_count: int
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_key_dependency(api_key: Optional[str]):
|
|
||||||
if not api_key:
|
|
||||||
# If no API key is configured, return a dummy dependency that always succeeds
|
|
||||||
async def no_auth():
|
|
||||||
return None
|
|
||||||
|
|
||||||
return no_auth
|
|
||||||
|
|
||||||
# If API key is configured, use proper authentication
|
|
||||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
|
||||||
|
|
||||||
async def api_key_auth(api_key_header_value: str | None = Security(api_key_header)):
|
|
||||||
if not api_key_header_value:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=HTTP_403_FORBIDDEN, detail="API Key required"
|
|
||||||
)
|
|
||||||
if api_key_header_value != api_key:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=HTTP_403_FORBIDDEN, detail="Invalid API Key"
|
|
||||||
)
|
|
||||||
return api_key_header_value
|
|
||||||
|
|
||||||
return api_key_auth
|
|
||||||
|
|
||||||
|
|
||||||
async def get_embedding_dim(embedding_model: str) -> int:
|
|
||||||
"""Get embedding dimensions for the specified model"""
|
|
||||||
test_text = ["This is a test sentence."]
|
|
||||||
embedding = await azure_openai_embedding(test_text, model=embedding_model)
|
|
||||||
return embedding.shape[1]
|
|
||||||
|
|
||||||
|
|
||||||
def create_app(args):
|
|
||||||
# Setup logging
|
|
||||||
logging.basicConfig(
|
|
||||||
format="%(levelname)s:%(message)s", level=getattr(logging, args.log_level)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if API key is provided either through env var or args
|
|
||||||
api_key = os.getenv("LIGHTRAG_API_KEY") or args.key
|
|
||||||
|
|
||||||
# Initialize FastAPI
|
|
||||||
app = FastAPI(
|
|
||||||
title="LightRAG API",
|
|
||||||
description="API for querying text using LightRAG with separate storage and input directories"
|
|
||||||
+ "(With authentication)"
|
|
||||||
if api_key
|
|
||||||
else "",
|
|
||||||
version="1.0.0",
|
|
||||||
openapi_tags=[{"name": "api"}],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add CORS middleware
|
|
||||||
app.add_middleware(
|
|
||||||
CORSMiddleware,
|
|
||||||
allow_origins=["*"],
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["*"],
|
|
||||||
allow_headers=["*"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create the optional API key dependency
|
|
||||||
optional_api_key = get_api_key_dependency(api_key)
|
|
||||||
|
|
||||||
# Create working directory if it doesn't exist
|
|
||||||
Path(args.working_dir).mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Initialize document manager
|
|
||||||
doc_manager = DocumentManager(args.input_dir)
|
|
||||||
|
|
||||||
# Get embedding dimensions
|
|
||||||
embedding_dim = asyncio.run(get_embedding_dim(args.embedding_model))
|
|
||||||
|
|
||||||
async def async_openai_complete(
|
|
||||||
prompt, system_prompt=None, history_messages=[], **kwargs
|
|
||||||
):
|
|
||||||
"""Async wrapper for OpenAI completion"""
|
|
||||||
kwargs.pop("keyword_extraction", None)
|
|
||||||
|
|
||||||
return await azure_openai_complete_if_cache(
|
|
||||||
args.model,
|
|
||||||
prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
base_url=AZURE_OPENAI_ENDPOINT,
|
|
||||||
api_key=AZURE_OPENAI_API_KEY,
|
|
||||||
api_version=AZURE_OPENAI_API_VERSION,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize RAG with OpenAI configuration
|
|
||||||
rag = LightRAG(
|
|
||||||
enable_llm_cache=args.enable_cache,
|
|
||||||
working_dir=args.working_dir,
|
|
||||||
llm_model_func=async_openai_complete,
|
|
||||||
llm_model_name=args.model,
|
|
||||||
llm_model_max_token_size=args.max_tokens,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dim,
|
|
||||||
max_token_size=args.max_embed_tokens,
|
|
||||||
func=lambda texts: azure_openai_embedding(
|
|
||||||
texts, model=args.embedding_model
|
|
||||||
),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
@app.on_event("startup")
|
|
||||||
async def startup_event():
|
|
||||||
"""Index all files in input directory during startup"""
|
|
||||||
try:
|
|
||||||
new_files = doc_manager.scan_directory()
|
|
||||||
for file_path in new_files:
|
|
||||||
try:
|
|
||||||
# Use async file reading
|
|
||||||
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = await f.read()
|
|
||||||
# Use the async version of insert directly
|
|
||||||
await rag.ainsert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
logging.info(f"Indexed file: {file_path}")
|
|
||||||
except Exception as e:
|
|
||||||
trace_exception(e)
|
|
||||||
logging.error(f"Error indexing file {file_path}: {str(e)}")
|
|
||||||
|
|
||||||
logging.info(f"Indexed {len(new_files)} documents from {args.input_dir}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error during startup indexing: {str(e)}")
|
|
||||||
|
|
||||||
@app.post("/documents/scan", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def scan_for_new_documents():
|
|
||||||
"""Manually trigger scanning for new documents"""
|
|
||||||
try:
|
|
||||||
new_files = doc_manager.scan_directory()
|
|
||||||
indexed_count = 0
|
|
||||||
|
|
||||||
for file_path in new_files:
|
|
||||||
try:
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = f.read()
|
|
||||||
await rag.ainsert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
indexed_count += 1
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error indexing file {file_path}: {str(e)}")
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": "success",
|
|
||||||
"indexed_count": indexed_count,
|
|
||||||
"total_documents": len(doc_manager.indexed_files),
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post("/resetcache", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def reset_cache():
|
|
||||||
"""Manually reset cache"""
|
|
||||||
try:
|
|
||||||
cachefile = args.working_dir + "/kv_store_llm_response_cache.json"
|
|
||||||
if os.path.exists(cachefile):
|
|
||||||
with open(cachefile, "w") as f:
|
|
||||||
f.write("{}")
|
|
||||||
return {"status": "success"}
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post("/documents/upload", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def upload_to_input_dir(file: UploadFile = File(...)):
|
|
||||||
"""Upload a file to the input directory"""
|
|
||||||
try:
|
|
||||||
if not doc_manager.is_supported_file(file.filename):
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail=f"Unsupported file type. Supported types: {doc_manager.supported_extensions}",
|
|
||||||
)
|
|
||||||
|
|
||||||
file_path = doc_manager.input_dir / file.filename
|
|
||||||
with open(file_path, "wb") as buffer:
|
|
||||||
shutil.copyfileobj(file.file, buffer)
|
|
||||||
|
|
||||||
# Immediately index the uploaded file
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = f.read()
|
|
||||||
await rag.ainsert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": "success",
|
|
||||||
"message": f"File uploaded and indexed: {file.filename}",
|
|
||||||
"total_documents": len(doc_manager.indexed_files),
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/query", response_model=QueryResponse, dependencies=[Depends(optional_api_key)]
|
|
||||||
)
|
|
||||||
async def query_text(request: QueryRequest):
|
|
||||||
try:
|
|
||||||
response = await rag.aquery(
|
|
||||||
request.query,
|
|
||||||
param=QueryParam(
|
|
||||||
mode=request.mode,
|
|
||||||
stream=False,
|
|
||||||
only_need_context=request.only_need_context,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
return QueryResponse(response=response)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post("/query/stream", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def query_text_stream(request: QueryRequest):
|
|
||||||
try:
|
|
||||||
response = await rag.aquery(
|
|
||||||
request.query,
|
|
||||||
param=QueryParam(
|
|
||||||
mode=request.mode,
|
|
||||||
stream=True,
|
|
||||||
only_need_context=request.only_need_context,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
if inspect.isasyncgen(response):
|
|
||||||
|
|
||||||
async def stream_generator():
|
|
||||||
async for chunk in response:
|
|
||||||
yield json.dumps({"data": chunk}) + "\n"
|
|
||||||
|
|
||||||
return StreamingResponse(
|
|
||||||
stream_generator(), media_type="application/json"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
return QueryResponse(response=response)
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/text",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_text(request: InsertTextRequest):
|
|
||||||
try:
|
|
||||||
await rag.ainsert(request.text)
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message="Text successfully inserted",
|
|
||||||
document_count=1,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/file",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_file(file: UploadFile = File(...), description: str = Form(None)):
|
|
||||||
try:
|
|
||||||
content = await file.read()
|
|
||||||
|
|
||||||
if file.filename.endswith((".txt", ".md")):
|
|
||||||
text = content.decode("utf-8")
|
|
||||||
rag.insert(text)
|
|
||||||
else:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail="Unsupported file type. Only .txt and .md files are supported",
|
|
||||||
)
|
|
||||||
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message=f"File '{file.filename}' successfully inserted",
|
|
||||||
document_count=1,
|
|
||||||
)
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
raise HTTPException(status_code=400, detail="File encoding not supported")
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/batch",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_batch(files: List[UploadFile] = File(...)):
|
|
||||||
try:
|
|
||||||
inserted_count = 0
|
|
||||||
failed_files = []
|
|
||||||
|
|
||||||
for file in files:
|
|
||||||
try:
|
|
||||||
content = await file.read()
|
|
||||||
if file.filename.endswith((".txt", ".md")):
|
|
||||||
text = content.decode("utf-8")
|
|
||||||
rag.insert(text)
|
|
||||||
inserted_count += 1
|
|
||||||
else:
|
|
||||||
failed_files.append(f"{file.filename} (unsupported type)")
|
|
||||||
except Exception as e:
|
|
||||||
failed_files.append(f"{file.filename} ({str(e)})")
|
|
||||||
|
|
||||||
status_message = f"Successfully inserted {inserted_count} documents"
|
|
||||||
if failed_files:
|
|
||||||
status_message += f". Failed files: {', '.join(failed_files)}"
|
|
||||||
|
|
||||||
return InsertResponse(
|
|
||||||
status="success" if inserted_count > 0 else "partial_success",
|
|
||||||
message=status_message,
|
|
||||||
document_count=len(files),
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.delete(
|
|
||||||
"/documents",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def clear_documents():
|
|
||||||
try:
|
|
||||||
rag.text_chunks = []
|
|
||||||
rag.entities_vdb = None
|
|
||||||
rag.relationships_vdb = None
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message="All documents cleared successfully",
|
|
||||||
document_count=0,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.get("/health", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def get_status():
|
|
||||||
"""Get current system status"""
|
|
||||||
return {
|
|
||||||
"status": "healthy",
|
|
||||||
"working_directory": str(args.working_dir),
|
|
||||||
"input_directory": str(args.input_dir),
|
|
||||||
"indexed_files": len(doc_manager.indexed_files),
|
|
||||||
"configuration": {
|
|
||||||
"model": args.model,
|
|
||||||
"embedding_model": args.embedding_model,
|
|
||||||
"max_tokens": args.max_tokens,
|
|
||||||
"embedding_dim": embedding_dim,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return app
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = parse_args()
|
|
||||||
import uvicorn
|
|
||||||
|
|
||||||
app = create_app(args)
|
|
||||||
uvicorn.run(app, host=args.host, port=args.port)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -4,6 +4,10 @@ import logging
|
|||||||
import argparse
|
import argparse
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from lightrag.llm import lollms_model_complete, lollms_embed
|
from lightrag.llm import lollms_model_complete, lollms_embed
|
||||||
|
from lightrag.llm import ollama_model_complete, ollama_embed
|
||||||
|
from lightrag.llm import openai_complete_if_cache, openai_embedding
|
||||||
|
from lightrag.llm import azure_openai_complete_if_cache, azure_openai_embedding
|
||||||
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
@@ -20,11 +24,39 @@ from fastapi.middleware.cors import CORSMiddleware
|
|||||||
from starlette.status import HTTP_403_FORBIDDEN
|
from starlette.status import HTTP_403_FORBIDDEN
|
||||||
|
|
||||||
|
|
||||||
|
def get_default_host(binding_type: str) -> str:
|
||||||
|
default_hosts = {
|
||||||
|
"ollama": "http://localhost:11434",
|
||||||
|
"lollms": "http://localhost:9600",
|
||||||
|
"azure_openai": "https://api.openai.com/v1",
|
||||||
|
"openai": "https://api.openai.com/v1",
|
||||||
|
}
|
||||||
|
return default_hosts.get(
|
||||||
|
binding_type, "http://localhost:11434"
|
||||||
|
) # fallback to ollama if unknown
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="LightRAG FastAPI Server with separate working and input directories"
|
description="LightRAG FastAPI Server with separate working and input directories"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Start by the bindings
|
||||||
|
parser.add_argument(
|
||||||
|
"--llm-binding",
|
||||||
|
default="ollama",
|
||||||
|
help="LLM binding to be used. Supported: lollms, ollama, openai (default: ollama)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--embedding-binding",
|
||||||
|
default="ollama",
|
||||||
|
help="Embedding binding to be used. Supported: lollms, ollama, openai (default: ollama)",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse just these arguments first
|
||||||
|
temp_args, _ = parser.parse_known_args()
|
||||||
|
|
||||||
|
# Add remaining arguments with dynamic defaults for hosts
|
||||||
# Server configuration
|
# Server configuration
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--host", default="0.0.0.0", help="Server host (default: 0.0.0.0)"
|
"--host", default="0.0.0.0", help="Server host (default: 0.0.0.0)"
|
||||||
@@ -45,23 +77,45 @@ def parse_args():
|
|||||||
help="Directory containing input documents (default: ./inputs)",
|
help="Directory containing input documents (default: ./inputs)",
|
||||||
)
|
)
|
||||||
|
|
||||||
# Model configuration
|
# LLM Model configuration
|
||||||
|
default_llm_host = get_default_host(temp_args.llm_binding)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--model",
|
"--llm-binding-host",
|
||||||
|
default=default_llm_host,
|
||||||
|
help=f"llm server host URL (default: {default_llm_host})",
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--llm-model",
|
||||||
default="mistral-nemo:latest",
|
default="mistral-nemo:latest",
|
||||||
help="LLM model name (default: mistral-nemo:latest)",
|
help="LLM model name (default: mistral-nemo:latest)",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Embedding model configuration
|
||||||
|
default_embedding_host = get_default_host(temp_args.embedding_binding)
|
||||||
|
parser.add_argument(
|
||||||
|
"--embedding-binding-host",
|
||||||
|
default=default_embedding_host,
|
||||||
|
help=f"embedding server host URL (default: {default_embedding_host})",
|
||||||
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--embedding-model",
|
"--embedding-model",
|
||||||
default="bge-m3:latest",
|
default="bge-m3:latest",
|
||||||
help="Embedding model name (default: bge-m3:latest)",
|
help="Embedding model name (default: bge-m3:latest)",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"--lollms-host",
|
|
||||||
default="http://localhost:9600",
|
|
||||||
help="lollms host URL (default: http://localhost:9600)",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
def timeout_type(value):
|
||||||
|
if value is None or value == "None":
|
||||||
|
return None
|
||||||
|
return int(value)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--timeout",
|
||||||
|
default=None,
|
||||||
|
type=timeout_type,
|
||||||
|
help="Timeout in seconds (useful when using slow AI). Use None for infinite timeout",
|
||||||
|
)
|
||||||
# RAG configuration
|
# RAG configuration
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--max-async", type=int, default=4, help="Maximum async operations (default: 4)"
|
"--max-async", type=int, default=4, help="Maximum async operations (default: 4)"
|
||||||
@@ -100,6 +154,20 @@ def parse_args():
|
|||||||
default=None,
|
default=None,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Optional https parameters
|
||||||
|
parser.add_argument(
|
||||||
|
"--ssl", action="store_true", help="Enable HTTPS (default: False)"
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ssl-certfile",
|
||||||
|
default=None,
|
||||||
|
help="Path to SSL certificate file (required if --ssl is enabled)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--ssl-keyfile",
|
||||||
|
default=None,
|
||||||
|
help="Path to SSL private key file (required if --ssl is enabled)",
|
||||||
|
)
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
@@ -188,6 +256,24 @@ def get_api_key_dependency(api_key: Optional[str]):
|
|||||||
|
|
||||||
|
|
||||||
def create_app(args):
|
def create_app(args):
|
||||||
|
# Verify that bindings arer correctly setup
|
||||||
|
if args.llm_binding not in ["lollms", "ollama", "openai"]:
|
||||||
|
raise Exception("llm binding not supported")
|
||||||
|
|
||||||
|
if args.embedding_binding not in ["lollms", "ollama", "openai"]:
|
||||||
|
raise Exception("embedding binding not supported")
|
||||||
|
|
||||||
|
# Add SSL validation
|
||||||
|
if args.ssl:
|
||||||
|
if not args.ssl_certfile or not args.ssl_keyfile:
|
||||||
|
raise Exception(
|
||||||
|
"SSL certificate and key files must be provided when SSL is enabled"
|
||||||
|
)
|
||||||
|
if not os.path.exists(args.ssl_certfile):
|
||||||
|
raise Exception(f"SSL certificate file not found: {args.ssl_certfile}")
|
||||||
|
if not os.path.exists(args.ssl_keyfile):
|
||||||
|
raise Exception(f"SSL key file not found: {args.ssl_keyfile}")
|
||||||
|
|
||||||
# Setup logging
|
# Setup logging
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
format="%(levelname)s:%(message)s", level=getattr(logging, args.log_level)
|
format="%(levelname)s:%(message)s", level=getattr(logging, args.log_level)
|
||||||
@@ -203,7 +289,7 @@ def create_app(args):
|
|||||||
+ "(With authentication)"
|
+ "(With authentication)"
|
||||||
if api_key
|
if api_key
|
||||||
else "",
|
else "",
|
||||||
version="1.0.0",
|
version="1.0.1",
|
||||||
openapi_tags=[{"name": "api"}],
|
openapi_tags=[{"name": "api"}],
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -228,19 +314,44 @@ def create_app(args):
|
|||||||
# Initialize RAG
|
# Initialize RAG
|
||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=args.working_dir,
|
working_dir=args.working_dir,
|
||||||
llm_model_func=lollms_model_complete,
|
llm_model_func=lollms_model_complete
|
||||||
llm_model_name=args.model,
|
if args.llm_binding == "lollms"
|
||||||
|
else ollama_model_complete
|
||||||
|
if args.llm_binding == "ollama"
|
||||||
|
else azure_openai_complete_if_cache
|
||||||
|
if args.llm_binding == "azure_openai"
|
||||||
|
else openai_complete_if_cache,
|
||||||
|
llm_model_name=args.llm_model,
|
||||||
llm_model_max_async=args.max_async,
|
llm_model_max_async=args.max_async,
|
||||||
llm_model_max_token_size=args.max_tokens,
|
llm_model_max_token_size=args.max_tokens,
|
||||||
llm_model_kwargs={
|
llm_model_kwargs={
|
||||||
"host": args.lollms_host,
|
"host": args.llm_binding_host,
|
||||||
|
"timeout": args.timeout,
|
||||||
"options": {"num_ctx": args.max_tokens},
|
"options": {"num_ctx": args.max_tokens},
|
||||||
},
|
},
|
||||||
embedding_func=EmbeddingFunc(
|
embedding_func=EmbeddingFunc(
|
||||||
embedding_dim=args.embedding_dim,
|
embedding_dim=args.embedding_dim,
|
||||||
max_token_size=args.max_embed_tokens,
|
max_token_size=args.max_embed_tokens,
|
||||||
func=lambda texts: lollms_embed(
|
func=lambda texts: lollms_embed(
|
||||||
texts, embed_model=args.embedding_model, host=args.lollms_host
|
texts,
|
||||||
|
embed_model=args.embedding_model,
|
||||||
|
host=args.embedding_binding_host,
|
||||||
|
)
|
||||||
|
if args.llm_binding == "lollms"
|
||||||
|
else ollama_embed(
|
||||||
|
texts,
|
||||||
|
embed_model=args.embedding_model,
|
||||||
|
host=args.embedding_binding_host,
|
||||||
|
)
|
||||||
|
if args.llm_binding == "ollama"
|
||||||
|
else azure_openai_embedding(
|
||||||
|
texts,
|
||||||
|
model=args.embedding_model, # no host is used for openai
|
||||||
|
)
|
||||||
|
if args.llm_binding == "azure_openai"
|
||||||
|
else openai_embedding(
|
||||||
|
texts,
|
||||||
|
model=args.embedding_model, # no host is used for openai
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
@@ -470,10 +581,15 @@ def create_app(args):
|
|||||||
"input_directory": str(args.input_dir),
|
"input_directory": str(args.input_dir),
|
||||||
"indexed_files": len(doc_manager.indexed_files),
|
"indexed_files": len(doc_manager.indexed_files),
|
||||||
"configuration": {
|
"configuration": {
|
||||||
"model": args.model,
|
# LLM configuration binding/host address (if applicable)/model (if applicable)
|
||||||
|
"llm_binding": args.llm_binding,
|
||||||
|
"llm_binding_host": args.llm_binding_host,
|
||||||
|
"llm_model": args.llm_model,
|
||||||
|
# embedding model configuration binding/host address (if applicable)/model (if applicable)
|
||||||
|
"embedding_binding": args.embedding_binding,
|
||||||
|
"embedding_binding_host": args.embedding_binding_host,
|
||||||
"embedding_model": args.embedding_model,
|
"embedding_model": args.embedding_model,
|
||||||
"max_tokens": args.max_tokens,
|
"max_tokens": args.max_tokens,
|
||||||
"lollms_host": args.lollms_host,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -485,7 +601,19 @@ def main():
|
|||||||
import uvicorn
|
import uvicorn
|
||||||
|
|
||||||
app = create_app(args)
|
app = create_app(args)
|
||||||
uvicorn.run(app, host=args.host, port=args.port)
|
uvicorn_config = {
|
||||||
|
"app": app,
|
||||||
|
"host": args.host,
|
||||||
|
"port": args.port,
|
||||||
|
}
|
||||||
|
if args.ssl:
|
||||||
|
uvicorn_config.update(
|
||||||
|
{
|
||||||
|
"ssl_certfile": args.ssl_certfile,
|
||||||
|
"ssl_keyfile": args.ssl_keyfile,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
uvicorn.run(**uvicorn_config)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
@@ -1,491 +0,0 @@
|
|||||||
from fastapi import FastAPI, HTTPException, File, UploadFile, Form
|
|
||||||
from pydantic import BaseModel
|
|
||||||
import logging
|
|
||||||
import argparse
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm import ollama_model_complete, ollama_embed
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from typing import Optional, List
|
|
||||||
from enum import Enum
|
|
||||||
from pathlib import Path
|
|
||||||
import shutil
|
|
||||||
import aiofiles
|
|
||||||
from ascii_colors import trace_exception
|
|
||||||
import os
|
|
||||||
|
|
||||||
from fastapi import Depends, Security
|
|
||||||
from fastapi.security import APIKeyHeader
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
|
||||||
|
|
||||||
from starlette.status import HTTP_403_FORBIDDEN
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="LightRAG FastAPI Server with separate working and input directories"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Server configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--host", default="0.0.0.0", help="Server host (default: 0.0.0.0)"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--port", type=int, default=9621, help="Server port (default: 9621)"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Directory configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--working-dir",
|
|
||||||
default="./rag_storage",
|
|
||||||
help="Working directory for RAG storage (default: ./rag_storage)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--input-dir",
|
|
||||||
default="./inputs",
|
|
||||||
help="Directory containing input documents (default: ./inputs)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Model configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--model",
|
|
||||||
default="mistral-nemo:latest",
|
|
||||||
help="LLM model name (default: mistral-nemo:latest)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--embedding-model",
|
|
||||||
default="bge-m3:latest",
|
|
||||||
help="Embedding model name (default: bge-m3:latest)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--ollama-host",
|
|
||||||
default="http://localhost:11434",
|
|
||||||
help="Ollama host URL (default: http://localhost:11434)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# RAG configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-async", type=int, default=4, help="Maximum async operations (default: 4)"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-tokens",
|
|
||||||
type=int,
|
|
||||||
default=32768,
|
|
||||||
help="Maximum token size (default: 32768)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--embedding-dim",
|
|
||||||
type=int,
|
|
||||||
default=1024,
|
|
||||||
help="Embedding dimensions (default: 1024)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-embed-tokens",
|
|
||||||
type=int,
|
|
||||||
default=8192,
|
|
||||||
help="Maximum embedding token size (default: 8192)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Logging configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--log-level",
|
|
||||||
default="INFO",
|
|
||||||
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
|
||||||
help="Logging level (default: INFO)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--key",
|
|
||||||
type=str,
|
|
||||||
help="API key for authentication. This protects lightrag server against unauthorized access",
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentManager:
|
|
||||||
"""Handles document operations and tracking"""
|
|
||||||
|
|
||||||
def __init__(self, input_dir: str, supported_extensions: tuple = (".txt", ".md")):
|
|
||||||
self.input_dir = Path(input_dir)
|
|
||||||
self.supported_extensions = supported_extensions
|
|
||||||
self.indexed_files = set()
|
|
||||||
|
|
||||||
# Create input directory if it doesn't exist
|
|
||||||
self.input_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
def scan_directory(self) -> List[Path]:
|
|
||||||
"""Scan input directory for new files"""
|
|
||||||
new_files = []
|
|
||||||
for ext in self.supported_extensions:
|
|
||||||
for file_path in self.input_dir.rglob(f"*{ext}"):
|
|
||||||
if file_path not in self.indexed_files:
|
|
||||||
new_files.append(file_path)
|
|
||||||
return new_files
|
|
||||||
|
|
||||||
def mark_as_indexed(self, file_path: Path):
|
|
||||||
"""Mark a file as indexed"""
|
|
||||||
self.indexed_files.add(file_path)
|
|
||||||
|
|
||||||
def is_supported_file(self, filename: str) -> bool:
|
|
||||||
"""Check if file type is supported"""
|
|
||||||
return any(filename.lower().endswith(ext) for ext in self.supported_extensions)
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic models
|
|
||||||
class SearchMode(str, Enum):
|
|
||||||
naive = "naive"
|
|
||||||
local = "local"
|
|
||||||
global_ = "global"
|
|
||||||
hybrid = "hybrid"
|
|
||||||
|
|
||||||
|
|
||||||
class QueryRequest(BaseModel):
|
|
||||||
query: str
|
|
||||||
mode: SearchMode = SearchMode.hybrid
|
|
||||||
stream: bool = False
|
|
||||||
only_need_context: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
class QueryResponse(BaseModel):
|
|
||||||
response: str
|
|
||||||
|
|
||||||
|
|
||||||
class InsertTextRequest(BaseModel):
|
|
||||||
text: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
class InsertResponse(BaseModel):
|
|
||||||
status: str
|
|
||||||
message: str
|
|
||||||
document_count: int
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_key_dependency(api_key: Optional[str]):
|
|
||||||
if not api_key:
|
|
||||||
# If no API key is configured, return a dummy dependency that always succeeds
|
|
||||||
async def no_auth():
|
|
||||||
return None
|
|
||||||
|
|
||||||
return no_auth
|
|
||||||
|
|
||||||
# If API key is configured, use proper authentication
|
|
||||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
|
||||||
|
|
||||||
async def api_key_auth(api_key_header_value: str | None = Security(api_key_header)):
|
|
||||||
if not api_key_header_value:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=HTTP_403_FORBIDDEN, detail="API Key required"
|
|
||||||
)
|
|
||||||
if api_key_header_value != api_key:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=HTTP_403_FORBIDDEN, detail="Invalid API Key"
|
|
||||||
)
|
|
||||||
return api_key_header_value
|
|
||||||
|
|
||||||
return api_key_auth
|
|
||||||
|
|
||||||
|
|
||||||
def create_app(args):
|
|
||||||
# Setup logging
|
|
||||||
logging.basicConfig(
|
|
||||||
format="%(levelname)s:%(message)s", level=getattr(logging, args.log_level)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if API key is provided either through env var or args
|
|
||||||
api_key = os.getenv("LIGHTRAG_API_KEY") or args.key
|
|
||||||
|
|
||||||
# Initialize FastAPI
|
|
||||||
app = FastAPI(
|
|
||||||
title="LightRAG API",
|
|
||||||
description="API for querying text using LightRAG with separate storage and input directories"
|
|
||||||
+ "(With authentication)"
|
|
||||||
if api_key
|
|
||||||
else "",
|
|
||||||
version="1.0.0",
|
|
||||||
openapi_tags=[{"name": "api"}],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add CORS middleware
|
|
||||||
app.add_middleware(
|
|
||||||
CORSMiddleware,
|
|
||||||
allow_origins=["*"],
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["*"],
|
|
||||||
allow_headers=["*"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create the optional API key dependency
|
|
||||||
optional_api_key = get_api_key_dependency(api_key)
|
|
||||||
|
|
||||||
# Create working directory if it doesn't exist
|
|
||||||
Path(args.working_dir).mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Initialize document manager
|
|
||||||
doc_manager = DocumentManager(args.input_dir)
|
|
||||||
|
|
||||||
# Initialize RAG
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=args.working_dir,
|
|
||||||
llm_model_func=ollama_model_complete,
|
|
||||||
llm_model_name=args.model,
|
|
||||||
llm_model_max_async=args.max_async,
|
|
||||||
llm_model_max_token_size=args.max_tokens,
|
|
||||||
llm_model_kwargs={
|
|
||||||
"host": args.ollama_host,
|
|
||||||
"options": {"num_ctx": args.max_tokens},
|
|
||||||
},
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=args.embedding_dim,
|
|
||||||
max_token_size=args.max_embed_tokens,
|
|
||||||
func=lambda texts: ollama_embed(
|
|
||||||
texts, embed_model=args.embedding_model, host=args.ollama_host
|
|
||||||
),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
@app.on_event("startup")
|
|
||||||
async def startup_event():
|
|
||||||
"""Index all files in input directory during startup"""
|
|
||||||
try:
|
|
||||||
new_files = doc_manager.scan_directory()
|
|
||||||
for file_path in new_files:
|
|
||||||
try:
|
|
||||||
# Use async file reading
|
|
||||||
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = await f.read()
|
|
||||||
# Use the async version of insert directly
|
|
||||||
await rag.ainsert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
logging.info(f"Indexed file: {file_path}")
|
|
||||||
except Exception as e:
|
|
||||||
trace_exception(e)
|
|
||||||
logging.error(f"Error indexing file {file_path}: {str(e)}")
|
|
||||||
|
|
||||||
logging.info(f"Indexed {len(new_files)} documents from {args.input_dir}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error during startup indexing: {str(e)}")
|
|
||||||
|
|
||||||
@app.post("/documents/scan", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def scan_for_new_documents():
|
|
||||||
"""Manually trigger scanning for new documents"""
|
|
||||||
try:
|
|
||||||
new_files = doc_manager.scan_directory()
|
|
||||||
indexed_count = 0
|
|
||||||
|
|
||||||
for file_path in new_files:
|
|
||||||
try:
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = f.read()
|
|
||||||
await rag.ainsert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
indexed_count += 1
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error indexing file {file_path}: {str(e)}")
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": "success",
|
|
||||||
"indexed_count": indexed_count,
|
|
||||||
"total_documents": len(doc_manager.indexed_files),
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post("/documents/upload", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def upload_to_input_dir(file: UploadFile = File(...)):
|
|
||||||
"""Upload a file to the input directory"""
|
|
||||||
try:
|
|
||||||
if not doc_manager.is_supported_file(file.filename):
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail=f"Unsupported file type. Supported types: {doc_manager.supported_extensions}",
|
|
||||||
)
|
|
||||||
|
|
||||||
file_path = doc_manager.input_dir / file.filename
|
|
||||||
with open(file_path, "wb") as buffer:
|
|
||||||
shutil.copyfileobj(file.file, buffer)
|
|
||||||
|
|
||||||
# Immediately index the uploaded file
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = f.read()
|
|
||||||
await rag.ainsert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": "success",
|
|
||||||
"message": f"File uploaded and indexed: {file.filename}",
|
|
||||||
"total_documents": len(doc_manager.indexed_files),
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/query", response_model=QueryResponse, dependencies=[Depends(optional_api_key)]
|
|
||||||
)
|
|
||||||
async def query_text(request: QueryRequest):
|
|
||||||
try:
|
|
||||||
response = await rag.aquery(
|
|
||||||
request.query,
|
|
||||||
param=QueryParam(
|
|
||||||
mode=request.mode,
|
|
||||||
stream=request.stream,
|
|
||||||
only_need_context=request.only_need_context,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
if request.stream:
|
|
||||||
result = ""
|
|
||||||
async for chunk in response:
|
|
||||||
result += chunk
|
|
||||||
return QueryResponse(response=result)
|
|
||||||
else:
|
|
||||||
return QueryResponse(response=response)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post("/query/stream", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def query_text_stream(request: QueryRequest):
|
|
||||||
try:
|
|
||||||
response = rag.query(
|
|
||||||
request.query,
|
|
||||||
param=QueryParam(
|
|
||||||
mode=request.mode,
|
|
||||||
stream=True,
|
|
||||||
only_need_context=request.only_need_context,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def stream_generator():
|
|
||||||
async for chunk in response:
|
|
||||||
yield chunk
|
|
||||||
|
|
||||||
return stream_generator()
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/text",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_text(request: InsertTextRequest):
|
|
||||||
try:
|
|
||||||
await rag.ainsert(request.text)
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message="Text successfully inserted",
|
|
||||||
document_count=1,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/file",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_file(file: UploadFile = File(...), description: str = Form(None)):
|
|
||||||
try:
|
|
||||||
content = await file.read()
|
|
||||||
|
|
||||||
if file.filename.endswith((".txt", ".md")):
|
|
||||||
text = content.decode("utf-8")
|
|
||||||
await rag.ainsert(text)
|
|
||||||
else:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail="Unsupported file type. Only .txt and .md files are supported",
|
|
||||||
)
|
|
||||||
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message=f"File '{file.filename}' successfully inserted",
|
|
||||||
document_count=1,
|
|
||||||
)
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
raise HTTPException(status_code=400, detail="File encoding not supported")
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/batch",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_batch(files: List[UploadFile] = File(...)):
|
|
||||||
try:
|
|
||||||
inserted_count = 0
|
|
||||||
failed_files = []
|
|
||||||
|
|
||||||
for file in files:
|
|
||||||
try:
|
|
||||||
content = await file.read()
|
|
||||||
if file.filename.endswith((".txt", ".md")):
|
|
||||||
text = content.decode("utf-8")
|
|
||||||
await rag.ainsert(text)
|
|
||||||
inserted_count += 1
|
|
||||||
else:
|
|
||||||
failed_files.append(f"{file.filename} (unsupported type)")
|
|
||||||
except Exception as e:
|
|
||||||
failed_files.append(f"{file.filename} ({str(e)})")
|
|
||||||
|
|
||||||
status_message = f"Successfully inserted {inserted_count} documents"
|
|
||||||
if failed_files:
|
|
||||||
status_message += f". Failed files: {', '.join(failed_files)}"
|
|
||||||
|
|
||||||
return InsertResponse(
|
|
||||||
status="success" if inserted_count > 0 else "partial_success",
|
|
||||||
message=status_message,
|
|
||||||
document_count=len(files),
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.delete(
|
|
||||||
"/documents",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def clear_documents():
|
|
||||||
try:
|
|
||||||
rag.text_chunks = []
|
|
||||||
rag.entities_vdb = None
|
|
||||||
rag.relationships_vdb = None
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message="All documents cleared successfully",
|
|
||||||
document_count=0,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.get("/health", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def get_status():
|
|
||||||
"""Get current system status"""
|
|
||||||
return {
|
|
||||||
"status": "healthy",
|
|
||||||
"working_directory": str(args.working_dir),
|
|
||||||
"input_directory": str(args.input_dir),
|
|
||||||
"indexed_files": len(doc_manager.indexed_files),
|
|
||||||
"configuration": {
|
|
||||||
"model": args.model,
|
|
||||||
"embedding_model": args.embedding_model,
|
|
||||||
"max_tokens": args.max_tokens,
|
|
||||||
"ollama_host": args.ollama_host,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return app
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = parse_args()
|
|
||||||
import uvicorn
|
|
||||||
|
|
||||||
app = create_app(args)
|
|
||||||
uvicorn.run(app, host=args.host, port=args.port)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -1,506 +0,0 @@
|
|||||||
from fastapi import FastAPI, HTTPException, File, UploadFile, Form
|
|
||||||
from pydantic import BaseModel
|
|
||||||
import asyncio
|
|
||||||
import logging
|
|
||||||
import argparse
|
|
||||||
from lightrag import LightRAG, QueryParam
|
|
||||||
from lightrag.llm import openai_complete_if_cache, openai_embedding
|
|
||||||
from lightrag.utils import EmbeddingFunc
|
|
||||||
from typing import Optional, List
|
|
||||||
from enum import Enum
|
|
||||||
from pathlib import Path
|
|
||||||
import shutil
|
|
||||||
import aiofiles
|
|
||||||
from ascii_colors import trace_exception
|
|
||||||
import nest_asyncio
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from fastapi import Depends, Security
|
|
||||||
from fastapi.security import APIKeyHeader
|
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
|
||||||
|
|
||||||
from starlette.status import HTTP_403_FORBIDDEN
|
|
||||||
|
|
||||||
# Apply nest_asyncio to solve event loop issues
|
|
||||||
nest_asyncio.apply()
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="LightRAG FastAPI Server with OpenAI integration"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Server configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--host", default="0.0.0.0", help="Server host (default: 0.0.0.0)"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--port", type=int, default=9621, help="Server port (default: 9621)"
|
|
||||||
)
|
|
||||||
|
|
||||||
# Directory configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--working-dir",
|
|
||||||
default="./rag_storage",
|
|
||||||
help="Working directory for RAG storage (default: ./rag_storage)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--input-dir",
|
|
||||||
default="./inputs",
|
|
||||||
help="Directory containing input documents (default: ./inputs)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Model configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--model", default="gpt-4", help="OpenAI model name (default: gpt-4)"
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--embedding-model",
|
|
||||||
default="text-embedding-3-large",
|
|
||||||
help="OpenAI embedding model (default: text-embedding-3-large)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# RAG configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-tokens",
|
|
||||||
type=int,
|
|
||||||
default=32768,
|
|
||||||
help="Maximum token size (default: 32768)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--max-embed-tokens",
|
|
||||||
type=int,
|
|
||||||
default=8192,
|
|
||||||
help="Maximum embedding token size (default: 8192)",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Logging configuration
|
|
||||||
parser.add_argument(
|
|
||||||
"--log-level",
|
|
||||||
default="INFO",
|
|
||||||
choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"],
|
|
||||||
help="Logging level (default: INFO)",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--key",
|
|
||||||
type=str,
|
|
||||||
help="API key for authentication. This protects lightrag server against unauthorized access",
|
|
||||||
default=None,
|
|
||||||
)
|
|
||||||
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
class DocumentManager:
|
|
||||||
"""Handles document operations and tracking"""
|
|
||||||
|
|
||||||
def __init__(self, input_dir: str, supported_extensions: tuple = (".txt", ".md")):
|
|
||||||
self.input_dir = Path(input_dir)
|
|
||||||
self.supported_extensions = supported_extensions
|
|
||||||
self.indexed_files = set()
|
|
||||||
|
|
||||||
# Create input directory if it doesn't exist
|
|
||||||
self.input_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
def scan_directory(self) -> List[Path]:
|
|
||||||
"""Scan input directory for new files"""
|
|
||||||
new_files = []
|
|
||||||
for ext in self.supported_extensions:
|
|
||||||
for file_path in self.input_dir.rglob(f"*{ext}"):
|
|
||||||
if file_path not in self.indexed_files:
|
|
||||||
new_files.append(file_path)
|
|
||||||
return new_files
|
|
||||||
|
|
||||||
def mark_as_indexed(self, file_path: Path):
|
|
||||||
"""Mark a file as indexed"""
|
|
||||||
self.indexed_files.add(file_path)
|
|
||||||
|
|
||||||
def is_supported_file(self, filename: str) -> bool:
|
|
||||||
"""Check if file type is supported"""
|
|
||||||
return any(filename.lower().endswith(ext) for ext in self.supported_extensions)
|
|
||||||
|
|
||||||
|
|
||||||
# Pydantic models
|
|
||||||
class SearchMode(str, Enum):
|
|
||||||
naive = "naive"
|
|
||||||
local = "local"
|
|
||||||
global_ = "global"
|
|
||||||
hybrid = "hybrid"
|
|
||||||
|
|
||||||
|
|
||||||
class QueryRequest(BaseModel):
|
|
||||||
query: str
|
|
||||||
mode: SearchMode = SearchMode.hybrid
|
|
||||||
stream: bool = False
|
|
||||||
only_need_context: bool = False
|
|
||||||
|
|
||||||
|
|
||||||
class QueryResponse(BaseModel):
|
|
||||||
response: str
|
|
||||||
|
|
||||||
|
|
||||||
class InsertTextRequest(BaseModel):
|
|
||||||
text: str
|
|
||||||
description: Optional[str] = None
|
|
||||||
|
|
||||||
|
|
||||||
class InsertResponse(BaseModel):
|
|
||||||
status: str
|
|
||||||
message: str
|
|
||||||
document_count: int
|
|
||||||
|
|
||||||
|
|
||||||
def get_api_key_dependency(api_key: Optional[str]):
|
|
||||||
if not api_key:
|
|
||||||
# If no API key is configured, return a dummy dependency that always succeeds
|
|
||||||
async def no_auth():
|
|
||||||
return None
|
|
||||||
|
|
||||||
return no_auth
|
|
||||||
|
|
||||||
# If API key is configured, use proper authentication
|
|
||||||
api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
|
|
||||||
|
|
||||||
async def api_key_auth(api_key_header_value: str | None = Security(api_key_header)):
|
|
||||||
if not api_key_header_value:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=HTTP_403_FORBIDDEN, detail="API Key required"
|
|
||||||
)
|
|
||||||
if api_key_header_value != api_key:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=HTTP_403_FORBIDDEN, detail="Invalid API Key"
|
|
||||||
)
|
|
||||||
return api_key_header_value
|
|
||||||
|
|
||||||
return api_key_auth
|
|
||||||
|
|
||||||
|
|
||||||
async def get_embedding_dim(embedding_model: str) -> int:
|
|
||||||
"""Get embedding dimensions for the specified model"""
|
|
||||||
test_text = ["This is a test sentence."]
|
|
||||||
embedding = await openai_embedding(test_text, model=embedding_model)
|
|
||||||
return embedding.shape[1]
|
|
||||||
|
|
||||||
|
|
||||||
def create_app(args):
|
|
||||||
# Setup logging
|
|
||||||
logging.basicConfig(
|
|
||||||
format="%(levelname)s:%(message)s", level=getattr(logging, args.log_level)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check if API key is provided either through env var or args
|
|
||||||
api_key = os.getenv("LIGHTRAG_API_KEY") or args.key
|
|
||||||
|
|
||||||
# Initialize FastAPI
|
|
||||||
app = FastAPI(
|
|
||||||
title="LightRAG API",
|
|
||||||
description="API for querying text using LightRAG with separate storage and input directories"
|
|
||||||
+ "(With authentication)"
|
|
||||||
if api_key
|
|
||||||
else "",
|
|
||||||
version="1.0.0",
|
|
||||||
openapi_tags=[{"name": "api"}],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add CORS middleware
|
|
||||||
app.add_middleware(
|
|
||||||
CORSMiddleware,
|
|
||||||
allow_origins=["*"],
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["*"],
|
|
||||||
allow_headers=["*"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create the optional API key dependency
|
|
||||||
optional_api_key = get_api_key_dependency(api_key)
|
|
||||||
|
|
||||||
# Add CORS middleware
|
|
||||||
app.add_middleware(
|
|
||||||
CORSMiddleware,
|
|
||||||
allow_origins=["*"],
|
|
||||||
allow_credentials=True,
|
|
||||||
allow_methods=["*"],
|
|
||||||
allow_headers=["*"],
|
|
||||||
)
|
|
||||||
|
|
||||||
# Create working directory if it doesn't exist
|
|
||||||
Path(args.working_dir).mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
# Initialize document manager
|
|
||||||
doc_manager = DocumentManager(args.input_dir)
|
|
||||||
|
|
||||||
# Get embedding dimensions
|
|
||||||
embedding_dim = asyncio.run(get_embedding_dim(args.embedding_model))
|
|
||||||
|
|
||||||
async def async_openai_complete(
|
|
||||||
prompt, system_prompt=None, history_messages=[], **kwargs
|
|
||||||
):
|
|
||||||
"""Async wrapper for OpenAI completion"""
|
|
||||||
return await openai_complete_if_cache(
|
|
||||||
args.model,
|
|
||||||
prompt,
|
|
||||||
system_prompt=system_prompt,
|
|
||||||
history_messages=history_messages,
|
|
||||||
**kwargs,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Initialize RAG with OpenAI configuration
|
|
||||||
rag = LightRAG(
|
|
||||||
working_dir=args.working_dir,
|
|
||||||
llm_model_func=async_openai_complete,
|
|
||||||
llm_model_name=args.model,
|
|
||||||
llm_model_max_token_size=args.max_tokens,
|
|
||||||
embedding_func=EmbeddingFunc(
|
|
||||||
embedding_dim=embedding_dim,
|
|
||||||
max_token_size=args.max_embed_tokens,
|
|
||||||
func=lambda texts: openai_embedding(texts, model=args.embedding_model),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
@app.on_event("startup")
|
|
||||||
async def startup_event():
|
|
||||||
"""Index all files in input directory during startup"""
|
|
||||||
try:
|
|
||||||
new_files = doc_manager.scan_directory()
|
|
||||||
for file_path in new_files:
|
|
||||||
try:
|
|
||||||
# Use async file reading
|
|
||||||
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = await f.read()
|
|
||||||
# Use the async version of insert directly
|
|
||||||
await rag.ainsert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
logging.info(f"Indexed file: {file_path}")
|
|
||||||
except Exception as e:
|
|
||||||
trace_exception(e)
|
|
||||||
logging.error(f"Error indexing file {file_path}: {str(e)}")
|
|
||||||
|
|
||||||
logging.info(f"Indexed {len(new_files)} documents from {args.input_dir}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error during startup indexing: {str(e)}")
|
|
||||||
|
|
||||||
@app.post("/documents/scan", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def scan_for_new_documents():
|
|
||||||
"""Manually trigger scanning for new documents"""
|
|
||||||
try:
|
|
||||||
new_files = doc_manager.scan_directory()
|
|
||||||
indexed_count = 0
|
|
||||||
|
|
||||||
for file_path in new_files:
|
|
||||||
try:
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = f.read()
|
|
||||||
rag.insert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
indexed_count += 1
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error indexing file {file_path}: {str(e)}")
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": "success",
|
|
||||||
"indexed_count": indexed_count,
|
|
||||||
"total_documents": len(doc_manager.indexed_files),
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post("/documents/upload", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def upload_to_input_dir(file: UploadFile = File(...)):
|
|
||||||
"""Upload a file to the input directory"""
|
|
||||||
try:
|
|
||||||
if not doc_manager.is_supported_file(file.filename):
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail=f"Unsupported file type. Supported types: {doc_manager.supported_extensions}",
|
|
||||||
)
|
|
||||||
|
|
||||||
file_path = doc_manager.input_dir / file.filename
|
|
||||||
with open(file_path, "wb") as buffer:
|
|
||||||
shutil.copyfileobj(file.file, buffer)
|
|
||||||
|
|
||||||
# Immediately index the uploaded file
|
|
||||||
with open(file_path, "r", encoding="utf-8") as f:
|
|
||||||
content = f.read()
|
|
||||||
rag.insert(content)
|
|
||||||
doc_manager.mark_as_indexed(file_path)
|
|
||||||
|
|
||||||
return {
|
|
||||||
"status": "success",
|
|
||||||
"message": f"File uploaded and indexed: {file.filename}",
|
|
||||||
"total_documents": len(doc_manager.indexed_files),
|
|
||||||
}
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/query", response_model=QueryResponse, dependencies=[Depends(optional_api_key)]
|
|
||||||
)
|
|
||||||
async def query_text(request: QueryRequest):
|
|
||||||
try:
|
|
||||||
response = await rag.aquery(
|
|
||||||
request.query,
|
|
||||||
param=QueryParam(
|
|
||||||
mode=request.mode,
|
|
||||||
stream=request.stream,
|
|
||||||
only_need_context=request.only_need_context,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
if request.stream:
|
|
||||||
result = ""
|
|
||||||
async for chunk in response:
|
|
||||||
result += chunk
|
|
||||||
return QueryResponse(response=result)
|
|
||||||
else:
|
|
||||||
return QueryResponse(response=response)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post("/query/stream", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def query_text_stream(request: QueryRequest):
|
|
||||||
try:
|
|
||||||
response = rag.query(
|
|
||||||
request.query,
|
|
||||||
param=QueryParam(
|
|
||||||
mode=request.mode,
|
|
||||||
stream=True,
|
|
||||||
only_need_context=request.only_need_context,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
async def stream_generator():
|
|
||||||
async for chunk in response:
|
|
||||||
yield chunk
|
|
||||||
|
|
||||||
return stream_generator()
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/text",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_text(request: InsertTextRequest):
|
|
||||||
try:
|
|
||||||
rag.insert(request.text)
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message="Text successfully inserted",
|
|
||||||
document_count=1,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/file",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_file(file: UploadFile = File(...), description: str = Form(None)):
|
|
||||||
try:
|
|
||||||
content = await file.read()
|
|
||||||
|
|
||||||
if file.filename.endswith((".txt", ".md")):
|
|
||||||
text = content.decode("utf-8")
|
|
||||||
rag.insert(text)
|
|
||||||
else:
|
|
||||||
raise HTTPException(
|
|
||||||
status_code=400,
|
|
||||||
detail="Unsupported file type. Only .txt and .md files are supported",
|
|
||||||
)
|
|
||||||
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message=f"File '{file.filename}' successfully inserted",
|
|
||||||
document_count=1,
|
|
||||||
)
|
|
||||||
except UnicodeDecodeError:
|
|
||||||
raise HTTPException(status_code=400, detail="File encoding not supported")
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.post(
|
|
||||||
"/documents/batch",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def insert_batch(files: List[UploadFile] = File(...)):
|
|
||||||
try:
|
|
||||||
inserted_count = 0
|
|
||||||
failed_files = []
|
|
||||||
|
|
||||||
for file in files:
|
|
||||||
try:
|
|
||||||
content = await file.read()
|
|
||||||
if file.filename.endswith((".txt", ".md")):
|
|
||||||
text = content.decode("utf-8")
|
|
||||||
rag.insert(text)
|
|
||||||
inserted_count += 1
|
|
||||||
else:
|
|
||||||
failed_files.append(f"{file.filename} (unsupported type)")
|
|
||||||
except Exception as e:
|
|
||||||
failed_files.append(f"{file.filename} ({str(e)})")
|
|
||||||
|
|
||||||
status_message = f"Successfully inserted {inserted_count} documents"
|
|
||||||
if failed_files:
|
|
||||||
status_message += f". Failed files: {', '.join(failed_files)}"
|
|
||||||
|
|
||||||
return InsertResponse(
|
|
||||||
status="success" if inserted_count > 0 else "partial_success",
|
|
||||||
message=status_message,
|
|
||||||
document_count=len(files),
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.delete(
|
|
||||||
"/documents",
|
|
||||||
response_model=InsertResponse,
|
|
||||||
dependencies=[Depends(optional_api_key)],
|
|
||||||
)
|
|
||||||
async def clear_documents():
|
|
||||||
try:
|
|
||||||
rag.text_chunks = []
|
|
||||||
rag.entities_vdb = None
|
|
||||||
rag.relationships_vdb = None
|
|
||||||
return InsertResponse(
|
|
||||||
status="success",
|
|
||||||
message="All documents cleared successfully",
|
|
||||||
document_count=0,
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise HTTPException(status_code=500, detail=str(e))
|
|
||||||
|
|
||||||
@app.get("/health", dependencies=[Depends(optional_api_key)])
|
|
||||||
async def get_status():
|
|
||||||
"""Get current system status"""
|
|
||||||
return {
|
|
||||||
"status": "healthy",
|
|
||||||
"working_directory": str(args.working_dir),
|
|
||||||
"input_directory": str(args.input_dir),
|
|
||||||
"indexed_files": len(doc_manager.indexed_files),
|
|
||||||
"configuration": {
|
|
||||||
"model": args.model,
|
|
||||||
"embedding_model": args.embedding_model,
|
|
||||||
"max_tokens": args.max_tokens,
|
|
||||||
"embedding_dim": embedding_dim,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
return app
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
args = parse_args()
|
|
||||||
import uvicorn
|
|
||||||
|
|
||||||
app = create_app(args)
|
|
||||||
uvicorn.run(app, host=args.host, port=args.port)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
@@ -130,6 +130,7 @@ class PostgreSQLDB:
|
|||||||
data: Union[list, dict] = None,
|
data: Union[list, dict] = None,
|
||||||
for_age: bool = False,
|
for_age: bool = False,
|
||||||
graph_name: str = None,
|
graph_name: str = None,
|
||||||
|
upsert: bool = False,
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
async with self.pool.acquire() as connection:
|
async with self.pool.acquire() as connection:
|
||||||
@@ -140,6 +141,11 @@ class PostgreSQLDB:
|
|||||||
await connection.execute(sql)
|
await connection.execute(sql)
|
||||||
else:
|
else:
|
||||||
await connection.execute(sql, *data.values())
|
await connection.execute(sql, *data.values())
|
||||||
|
except asyncpg.exceptions.UniqueViolationError as e:
|
||||||
|
if upsert:
|
||||||
|
print("Key value duplicate, but upsert succeeded.")
|
||||||
|
else:
|
||||||
|
logger.error(f"Upsert error: {e}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"PostgreSQL database error: {e}")
|
logger.error(f"PostgreSQL database error: {e}")
|
||||||
print(sql)
|
print(sql)
|
||||||
@@ -568,10 +574,10 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
|
|
||||||
if dtype == "vertex":
|
if dtype == "vertex":
|
||||||
vertex = json.loads(v)
|
vertex = json.loads(v)
|
||||||
field = json.loads(v).get("properties")
|
field = vertex.get("properties")
|
||||||
if not field:
|
if not field:
|
||||||
field = {}
|
field = {}
|
||||||
field["label"] = PGGraphStorage._decode_graph_label(vertex["label"])
|
field["label"] = PGGraphStorage._decode_graph_label(field["node_id"])
|
||||||
d[k] = field
|
d[k] = field
|
||||||
# convert edge from id-label->id by replacing id with node information
|
# convert edge from id-label->id by replacing id with node information
|
||||||
# we only do this if the vertex was also returned in the query
|
# we only do this if the vertex was also returned in the query
|
||||||
@@ -666,73 +672,8 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
# otherwise return the value stripping out some common special chars
|
# otherwise return the value stripping out some common special chars
|
||||||
return field.replace("(", "_").replace(")", "")
|
return field.replace("(", "_").replace(")", "")
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _wrap_query(query: str, graph_name: str, **params: str) -> str:
|
|
||||||
"""
|
|
||||||
Convert a cypher query to an Apache Age compatible
|
|
||||||
sql query by wrapping the cypher query in ag_catalog.cypher,
|
|
||||||
casting results to agtype and building a select statement
|
|
||||||
|
|
||||||
Args:
|
|
||||||
query (str): a valid cypher query
|
|
||||||
graph_name (str): the name of the graph to query
|
|
||||||
params (dict): parameters for the query
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
str: an equivalent pgsql query
|
|
||||||
"""
|
|
||||||
|
|
||||||
# pgsql template
|
|
||||||
template = """SELECT {projection} FROM ag_catalog.cypher('{graph_name}', $$
|
|
||||||
{query}
|
|
||||||
$$) AS ({fields})"""
|
|
||||||
|
|
||||||
# if there are any returned fields they must be added to the pgsql query
|
|
||||||
if "return" in query.lower():
|
|
||||||
# parse return statement to identify returned fields
|
|
||||||
fields = (
|
|
||||||
query.lower()
|
|
||||||
.split("return")[-1]
|
|
||||||
.split("distinct")[-1]
|
|
||||||
.split("order by")[0]
|
|
||||||
.split("skip")[0]
|
|
||||||
.split("limit")[0]
|
|
||||||
.split(",")
|
|
||||||
)
|
|
||||||
|
|
||||||
# raise exception if RETURN * is found as we can't resolve the fields
|
|
||||||
if "*" in [x.strip() for x in fields]:
|
|
||||||
raise ValueError(
|
|
||||||
"AGE graph does not support 'RETURN *'"
|
|
||||||
+ " statements in Cypher queries"
|
|
||||||
)
|
|
||||||
|
|
||||||
# get pgsql formatted field names
|
|
||||||
fields = [
|
|
||||||
PGGraphStorage._get_col_name(field, idx)
|
|
||||||
for idx, field in enumerate(fields)
|
|
||||||
]
|
|
||||||
|
|
||||||
# build resulting pgsql relation
|
|
||||||
fields_str = ", ".join(
|
|
||||||
[field.split(".")[-1] + " agtype" for field in fields]
|
|
||||||
)
|
|
||||||
|
|
||||||
# if no return statement we still need to return a single field of type agtype
|
|
||||||
else:
|
|
||||||
fields_str = "a agtype"
|
|
||||||
|
|
||||||
select_str = "*"
|
|
||||||
|
|
||||||
return template.format(
|
|
||||||
graph_name=graph_name,
|
|
||||||
query=query.format(**params),
|
|
||||||
fields=fields_str,
|
|
||||||
projection=select_str,
|
|
||||||
)
|
|
||||||
|
|
||||||
async def _query(
|
async def _query(
|
||||||
self, query: str, readonly=True, upsert_edge=False, **params: str
|
self, query: str, readonly: bool = True, upsert: bool = False
|
||||||
) -> List[Dict[str, Any]]:
|
) -> List[Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Query the graph by taking a cypher query, converting it to an
|
Query the graph by taking a cypher query, converting it to an
|
||||||
@@ -746,7 +687,7 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
List[Dict[str, Any]]: a list of dictionaries containing the result set
|
List[Dict[str, Any]]: a list of dictionaries containing the result set
|
||||||
"""
|
"""
|
||||||
# convert cypher query to pgsql/age query
|
# convert cypher query to pgsql/age query
|
||||||
wrapped_query = self._wrap_query(query, self.graph_name, **params)
|
wrapped_query = query
|
||||||
|
|
||||||
# execute the query, rolling back on an error
|
# execute the query, rolling back on an error
|
||||||
try:
|
try:
|
||||||
@@ -758,22 +699,16 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
graph_name=self.graph_name,
|
graph_name=self.graph_name,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# for upserting edge, need to run the SQL twice, otherwise cannot update the properties. (First time it will try to create the edge, second time is MERGING)
|
data = await self.db.execute(
|
||||||
# It is a bug of AGE as of 2025-01-03, hope it can be resolved in the future.
|
wrapped_query,
|
||||||
if upsert_edge:
|
for_age=True,
|
||||||
data = await self.db.execute(
|
graph_name=self.graph_name,
|
||||||
f"{wrapped_query};{wrapped_query};",
|
upsert=upsert,
|
||||||
for_age=True,
|
)
|
||||||
graph_name=self.graph_name,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
data = await self.db.execute(
|
|
||||||
wrapped_query, for_age=True, graph_name=self.graph_name
|
|
||||||
)
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise PGGraphQueryException(
|
raise PGGraphQueryException(
|
||||||
{
|
{
|
||||||
"message": f"Error executing graph query: {query.format(**params)}",
|
"message": f"Error executing graph query: {query}",
|
||||||
"wrapped": wrapped_query,
|
"wrapped": wrapped_query,
|
||||||
"detail": str(e),
|
"detail": str(e),
|
||||||
}
|
}
|
||||||
@@ -788,77 +723,85 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
async def has_node(self, node_id: str) -> bool:
|
async def has_node(self, node_id: str) -> bool:
|
||||||
entity_name_label = node_id.strip('"')
|
entity_name_label = PGGraphStorage._encode_graph_label(node_id.strip('"'))
|
||||||
|
|
||||||
query = """MATCH (n:`{label}`) RETURN count(n) > 0 AS node_exists"""
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
params = {"label": PGGraphStorage._encode_graph_label(entity_name_label)}
|
MATCH (n:Entity {node_id: "%s"})
|
||||||
single_result = (await self._query(query, **params))[0]
|
RETURN count(n) > 0 AS node_exists
|
||||||
|
$$) AS (node_exists bool)""" % (self.graph_name, entity_name_label)
|
||||||
|
|
||||||
|
single_result = (await self._query(query))[0]
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"{%s}:query:{%s}:result:{%s}",
|
"{%s}:query:{%s}:result:{%s}",
|
||||||
inspect.currentframe().f_code.co_name,
|
inspect.currentframe().f_code.co_name,
|
||||||
query.format(**params),
|
query,
|
||||||
single_result["node_exists"],
|
single_result["node_exists"],
|
||||||
)
|
)
|
||||||
|
|
||||||
return single_result["node_exists"]
|
return single_result["node_exists"]
|
||||||
|
|
||||||
async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
|
async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
|
||||||
entity_name_label_source = source_node_id.strip('"')
|
src_label = PGGraphStorage._encode_graph_label(source_node_id.strip('"'))
|
||||||
entity_name_label_target = target_node_id.strip('"')
|
tgt_label = PGGraphStorage._encode_graph_label(target_node_id.strip('"'))
|
||||||
|
|
||||||
query = """MATCH (a:`{src_label}`)-[r]-(b:`{tgt_label}`)
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
RETURN COUNT(r) > 0 AS edge_exists"""
|
MATCH (a:Entity {node_id: "%s"})-[r]-(b:Entity {node_id: "%s"})
|
||||||
params = {
|
RETURN COUNT(r) > 0 AS edge_exists
|
||||||
"src_label": PGGraphStorage._encode_graph_label(entity_name_label_source),
|
$$) AS (edge_exists bool)""" % (
|
||||||
"tgt_label": PGGraphStorage._encode_graph_label(entity_name_label_target),
|
self.graph_name,
|
||||||
}
|
src_label,
|
||||||
single_result = (await self._query(query, **params))[0]
|
tgt_label,
|
||||||
|
)
|
||||||
|
|
||||||
|
single_result = (await self._query(query))[0]
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"{%s}:query:{%s}:result:{%s}",
|
"{%s}:query:{%s}:result:{%s}",
|
||||||
inspect.currentframe().f_code.co_name,
|
inspect.currentframe().f_code.co_name,
|
||||||
query.format(**params),
|
query,
|
||||||
single_result["edge_exists"],
|
single_result["edge_exists"],
|
||||||
)
|
)
|
||||||
return single_result["edge_exists"]
|
return single_result["edge_exists"]
|
||||||
|
|
||||||
async def get_node(self, node_id: str) -> Union[dict, None]:
|
async def get_node(self, node_id: str) -> Union[dict, None]:
|
||||||
entity_name_label = node_id.strip('"')
|
label = PGGraphStorage._encode_graph_label(node_id.strip('"'))
|
||||||
query = """MATCH (n:`{label}`) RETURN n"""
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
params = {"label": PGGraphStorage._encode_graph_label(entity_name_label)}
|
MATCH (n:Entity {node_id: "%s"})
|
||||||
record = await self._query(query, **params)
|
RETURN n
|
||||||
|
$$) AS (n agtype)""" % (self.graph_name, label)
|
||||||
|
record = await self._query(query)
|
||||||
if record:
|
if record:
|
||||||
node = record[0]
|
node = record[0]
|
||||||
node_dict = node["n"]
|
node_dict = node["n"]
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"{%s}: query: {%s}, result: {%s}",
|
"{%s}: query: {%s}, result: {%s}",
|
||||||
inspect.currentframe().f_code.co_name,
|
inspect.currentframe().f_code.co_name,
|
||||||
query.format(**params),
|
query,
|
||||||
node_dict,
|
node_dict,
|
||||||
)
|
)
|
||||||
return node_dict
|
return node_dict
|
||||||
return None
|
return None
|
||||||
|
|
||||||
async def node_degree(self, node_id: str) -> int:
|
async def node_degree(self, node_id: str) -> int:
|
||||||
entity_name_label = node_id.strip('"')
|
label = PGGraphStorage._encode_graph_label(node_id.strip('"'))
|
||||||
|
|
||||||
query = """MATCH (n:`{label}`)-[]->(x) RETURN count(x) AS total_edge_count"""
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
params = {"label": PGGraphStorage._encode_graph_label(entity_name_label)}
|
MATCH (n:Entity {node_id: "%s"})-[]->(x)
|
||||||
record = (await self._query(query, **params))[0]
|
RETURN count(x) AS total_edge_count
|
||||||
|
$$) AS (total_edge_count integer)""" % (self.graph_name, label)
|
||||||
|
record = (await self._query(query))[0]
|
||||||
if record:
|
if record:
|
||||||
edge_count = int(record["total_edge_count"])
|
edge_count = int(record["total_edge_count"])
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"{%s}:query:{%s}:result:{%s}",
|
"{%s}:query:{%s}:result:{%s}",
|
||||||
inspect.currentframe().f_code.co_name,
|
inspect.currentframe().f_code.co_name,
|
||||||
query.format(**params),
|
query,
|
||||||
edge_count,
|
edge_count,
|
||||||
)
|
)
|
||||||
return edge_count
|
return edge_count
|
||||||
|
|
||||||
async def edge_degree(self, src_id: str, tgt_id: str) -> int:
|
async def edge_degree(self, src_id: str, tgt_id: str) -> int:
|
||||||
entity_name_label_source = src_id.strip('"')
|
src_degree = await self.node_degree(src_id)
|
||||||
entity_name_label_target = tgt_id.strip('"')
|
trg_degree = await self.node_degree(tgt_id)
|
||||||
src_degree = await self.node_degree(entity_name_label_source)
|
|
||||||
trg_degree = await self.node_degree(entity_name_label_target)
|
|
||||||
|
|
||||||
# Convert None to 0 for addition
|
# Convert None to 0 for addition
|
||||||
src_degree = 0 if src_degree is None else src_degree
|
src_degree = 0 if src_degree is None else src_degree
|
||||||
@@ -885,23 +828,25 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
Returns:
|
Returns:
|
||||||
list: List of all relationships/edges found
|
list: List of all relationships/edges found
|
||||||
"""
|
"""
|
||||||
entity_name_label_source = source_node_id.strip('"')
|
src_label = PGGraphStorage._encode_graph_label(source_node_id.strip('"'))
|
||||||
entity_name_label_target = target_node_id.strip('"')
|
tgt_label = PGGraphStorage._encode_graph_label(target_node_id.strip('"'))
|
||||||
|
|
||||||
query = """MATCH (a:`{src_label}`)-[r]->(b:`{tgt_label}`)
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
RETURN properties(r) as edge_properties
|
MATCH (a:Entity {node_id: "%s"})-[r]->(b:Entity {node_id: "%s"})
|
||||||
LIMIT 1"""
|
RETURN properties(r) as edge_properties
|
||||||
params = {
|
LIMIT 1
|
||||||
"src_label": PGGraphStorage._encode_graph_label(entity_name_label_source),
|
$$) AS (edge_properties agtype)""" % (
|
||||||
"tgt_label": PGGraphStorage._encode_graph_label(entity_name_label_target),
|
self.graph_name,
|
||||||
}
|
src_label,
|
||||||
record = await self._query(query, **params)
|
tgt_label,
|
||||||
|
)
|
||||||
|
record = await self._query(query)
|
||||||
if record and record[0] and record[0]["edge_properties"]:
|
if record and record[0] and record[0]["edge_properties"]:
|
||||||
result = record[0]["edge_properties"]
|
result = record[0]["edge_properties"]
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"{%s}:query:{%s}:result:{%s}",
|
"{%s}:query:{%s}:result:{%s}",
|
||||||
inspect.currentframe().f_code.co_name,
|
inspect.currentframe().f_code.co_name,
|
||||||
query.format(**params),
|
query,
|
||||||
result,
|
result,
|
||||||
)
|
)
|
||||||
return result
|
return result
|
||||||
@@ -911,24 +856,31 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
Retrieves all edges (relationships) for a particular node identified by its label.
|
Retrieves all edges (relationships) for a particular node identified by its label.
|
||||||
:return: List of dictionaries containing edge information
|
:return: List of dictionaries containing edge information
|
||||||
"""
|
"""
|
||||||
node_label = source_node_id.strip('"')
|
label = PGGraphStorage._encode_graph_label(source_node_id.strip('"'))
|
||||||
|
|
||||||
query = """MATCH (n:`{label}`)
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
OPTIONAL MATCH (n)-[r]-(connected)
|
MATCH (n:Entity {node_id: "%s"})
|
||||||
RETURN n, r, connected"""
|
OPTIONAL MATCH (n)-[r]-(connected)
|
||||||
params = {"label": PGGraphStorage._encode_graph_label(node_label)}
|
RETURN n, r, connected
|
||||||
results = await self._query(query, **params)
|
$$) AS (n agtype, r agtype, connected agtype)""" % (
|
||||||
|
self.graph_name,
|
||||||
|
label,
|
||||||
|
)
|
||||||
|
|
||||||
|
results = await self._query(query)
|
||||||
edges = []
|
edges = []
|
||||||
for record in results:
|
for record in results:
|
||||||
source_node = record["n"] if record["n"] else None
|
source_node = record["n"] if record["n"] else None
|
||||||
connected_node = record["connected"] if record["connected"] else None
|
connected_node = record["connected"] if record["connected"] else None
|
||||||
|
|
||||||
source_label = (
|
source_label = (
|
||||||
source_node["label"] if source_node and source_node["label"] else None
|
source_node["node_id"]
|
||||||
|
if source_node and source_node["node_id"]
|
||||||
|
else None
|
||||||
)
|
)
|
||||||
target_label = (
|
target_label = (
|
||||||
connected_node["label"]
|
connected_node["node_id"]
|
||||||
if connected_node and connected_node["label"]
|
if connected_node and connected_node["node_id"]
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -950,17 +902,21 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
node_id: The unique identifier for the node (used as label)
|
node_id: The unique identifier for the node (used as label)
|
||||||
node_data: Dictionary of node properties
|
node_data: Dictionary of node properties
|
||||||
"""
|
"""
|
||||||
label = node_id.strip('"')
|
label = PGGraphStorage._encode_graph_label(node_id.strip('"'))
|
||||||
properties = node_data
|
properties = node_data
|
||||||
|
|
||||||
query = """MERGE (n:`{label}`)
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
SET n += {properties}"""
|
MERGE (n:Entity {node_id: "%s"})
|
||||||
params = {
|
SET n += %s
|
||||||
"label": PGGraphStorage._encode_graph_label(label),
|
RETURN n
|
||||||
"properties": PGGraphStorage._format_properties(properties),
|
$$) AS (n agtype)""" % (
|
||||||
}
|
self.graph_name,
|
||||||
|
label,
|
||||||
|
PGGraphStorage._format_properties(properties),
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
await self._query(query, readonly=False, **params)
|
await self._query(query, readonly=False, upsert=True)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Upserted node with label '{%s}' and properties: {%s}",
|
"Upserted node with label '{%s}' and properties: {%s}",
|
||||||
label,
|
label,
|
||||||
@@ -986,28 +942,30 @@ class PGGraphStorage(BaseGraphStorage):
|
|||||||
target_node_id (str): Label of the target node (used as identifier)
|
target_node_id (str): Label of the target node (used as identifier)
|
||||||
edge_data (dict): Dictionary of properties to set on the edge
|
edge_data (dict): Dictionary of properties to set on the edge
|
||||||
"""
|
"""
|
||||||
source_node_label = source_node_id.strip('"')
|
src_label = PGGraphStorage._encode_graph_label(source_node_id.strip('"'))
|
||||||
target_node_label = target_node_id.strip('"')
|
tgt_label = PGGraphStorage._encode_graph_label(target_node_id.strip('"'))
|
||||||
edge_properties = edge_data
|
edge_properties = edge_data
|
||||||
|
|
||||||
query = """MATCH (source:`{src_label}`)
|
query = """SELECT * FROM cypher('%s', $$
|
||||||
WITH source
|
MATCH (source:Entity {node_id: "%s"})
|
||||||
MATCH (target:`{tgt_label}`)
|
WITH source
|
||||||
MERGE (source)-[r:DIRECTED]->(target)
|
MATCH (target:Entity {node_id: "%s"})
|
||||||
SET r += {properties}
|
MERGE (source)-[r:DIRECTED]->(target)
|
||||||
RETURN r"""
|
SET r += %s
|
||||||
params = {
|
RETURN r
|
||||||
"src_label": PGGraphStorage._encode_graph_label(source_node_label),
|
$$) AS (r agtype)""" % (
|
||||||
"tgt_label": PGGraphStorage._encode_graph_label(target_node_label),
|
self.graph_name,
|
||||||
"properties": PGGraphStorage._format_properties(edge_properties),
|
src_label,
|
||||||
}
|
tgt_label,
|
||||||
|
PGGraphStorage._format_properties(edge_properties),
|
||||||
|
)
|
||||||
# logger.info(f"-- inserting edge after formatted: {params}")
|
# logger.info(f"-- inserting edge after formatted: {params}")
|
||||||
try:
|
try:
|
||||||
await self._query(query, readonly=False, upsert_edge=True, **params)
|
await self._query(query, readonly=False, upsert=True)
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Upserted edge from '{%s}' to '{%s}' with properties: {%s}",
|
"Upserted edge from '{%s}' to '{%s}' with properties: {%s}",
|
||||||
source_node_label,
|
src_label,
|
||||||
target_node_label,
|
tgt_label,
|
||||||
edge_properties,
|
edge_properties,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@@ -406,8 +406,9 @@ async def lollms_model_if_cache(
|
|||||||
full_prompt += prompt
|
full_prompt += prompt
|
||||||
|
|
||||||
request_data["prompt"] = full_prompt
|
request_data["prompt"] = full_prompt
|
||||||
|
timeout = aiohttp.ClientTimeout(total=kwargs.get("timeout", None))
|
||||||
|
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession(timeout=timeout) as session:
|
||||||
if stream:
|
if stream:
|
||||||
|
|
||||||
async def inner():
|
async def inner():
|
||||||
|
5
setup.py
5
setup.py
@@ -100,10 +100,7 @@ setuptools.setup(
|
|||||||
},
|
},
|
||||||
entry_points={
|
entry_points={
|
||||||
"console_scripts": [
|
"console_scripts": [
|
||||||
"lollms-lightrag-server=lightrag.api.lollms_lightrag_server:main [api]",
|
"lightrag-server=lightrag.api.lightrag_server:main [api]",
|
||||||
"ollama-lightrag-server=lightrag.api.ollama_lightrag_server:main [api]",
|
|
||||||
"openai-lightrag-server=lightrag.api.openai_lightrag_server:main [api]",
|
|
||||||
"azure-openai-lightrag-server=lightrag.api.azure_openai_lightrag_server:main [api]",
|
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
Reference in New Issue
Block a user