Merge branch 'HKUDS:main' into main

This commit is contained in:
Saifeddine ALOUI
2025-03-21 14:20:51 +01:00
committed by GitHub
62 changed files with 2851 additions and 820 deletions

3
.gitattributes vendored
View File

@@ -1 +1,2 @@
lightrag/api/webui/** -diff lightrag/api/webui/** binary
lightrag/api/webui/** linguist-generated

View File

@@ -45,6 +45,7 @@ This repository hosts the code of LightRAG. The structure of this code is based
🎉 News 🎉 News
</summary> </summary>
- [X] [2025.03.18]🎯📢LightRAG now supports citation functionality.
- [X] [2025.02.05]🎯📢Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG) understanding extremely long-context videos. - [X] [2025.02.05]🎯📢Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG) understanding extremely long-context videos.
- [X] [2025.01.13]🎯📢Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models. - [X] [2025.01.13]🎯📢Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models.
- [X] [2025.01.06]🎯📢You can now [use PostgreSQL for Storage](#using-postgresql-for-storage). - [X] [2025.01.06]🎯📢You can now [use PostgreSQL for Storage](#using-postgresql-for-storage).
@@ -673,6 +674,22 @@ rag.insert(text_content.decode('utf-8'))
</details> </details>
<details>
<summary><b>Citation Functionality</b></summary>
By providing file paths, the system ensures that sources can be traced back to their original documents.
```python
# Define documents and their file paths
documents = ["Document content 1", "Document content 2"]
file_paths = ["path/to/doc1.txt", "path/to/doc2.txt"]
# Insert documents with file paths
rag.insert(documents, file_paths=file_paths)
```
</details>
## Storage ## Storage
<details> <details>

View File

@@ -73,6 +73,8 @@ LLM_BINDING_HOST=http://localhost:11434
### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal) ### Embedding Configuration (Use valid host. For local services installed with docker, you can use host.docker.internal)
EMBEDDING_MODEL=bge-m3:latest EMBEDDING_MODEL=bge-m3:latest
EMBEDDING_DIM=1024 EMBEDDING_DIM=1024
EMBEDDING_BATCH_NUM=32
EMBEDDING_FUNC_MAX_ASYNC=16
# EMBEDDING_BINDING_API_KEY=your_api_key # EMBEDDING_BINDING_API_KEY=your_api_key
### ollama example ### ollama example
EMBEDDING_BINDING=ollama EMBEDDING_BINDING=ollama
@@ -151,9 +153,9 @@ QDRANT_URL=http://localhost:16333
### Redis ### Redis
REDIS_URI=redis://localhost:6379 REDIS_URI=redis://localhost:6379
# For jwt auth ### For JWTt Auth
AUTH_USERNAME=admin # login name AUTH_USERNAME=admin # login name
AUTH_PASSWORD=admin123 # password AUTH_PASSWORD=admin123 # password
TOKEN_SECRET=your-key # JWT key TOKEN_SECRET=your-key-for-LightRAG-API-Server # JWT key
TOKEN_EXPIRE_HOURS=4 # expire duration TOKEN_EXPIRE_HOURS=4 # expire duration
WHITELIST_PATHS=/login,/health # white list WHITELIST_PATHS=/login,/health # white list

View File

@@ -1,5 +1,5 @@
from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam
__version__ = "1.2.6" __version__ = "1.2.7"
__author__ = "Zirui Guo" __author__ = "Zirui Guo"
__url__ = "https://github.com/HKUDS/LightRAG" __url__ = "https://github.com/HKUDS/LightRAG"

View File

@@ -3,11 +3,16 @@ from datetime import datetime, timedelta
import jwt import jwt
from fastapi import HTTPException, status from fastapi import HTTPException, status
from pydantic import BaseModel from pydantic import BaseModel
from dotenv import load_dotenv
load_dotenv()
class TokenPayload(BaseModel): class TokenPayload(BaseModel):
sub: str sub: str # Username
exp: datetime exp: datetime # Expiration time
role: str = "user" # User role, default is regular user
metadata: dict = {} # Additional metadata
class AuthHandler: class AuthHandler:
@@ -15,13 +20,60 @@ class AuthHandler:
self.secret = os.getenv("TOKEN_SECRET", "4f85ds4f56dsf46") self.secret = os.getenv("TOKEN_SECRET", "4f85ds4f56dsf46")
self.algorithm = "HS256" self.algorithm = "HS256"
self.expire_hours = int(os.getenv("TOKEN_EXPIRE_HOURS", 4)) self.expire_hours = int(os.getenv("TOKEN_EXPIRE_HOURS", 4))
self.guest_expire_hours = int(
os.getenv("GUEST_TOKEN_EXPIRE_HOURS", 2)
) # Guest token default expiration time
def create_token(
self,
username: str,
role: str = "user",
custom_expire_hours: int = None,
metadata: dict = None,
) -> str:
"""
Create JWT token
Args:
username: Username
role: User role, default is "user", guest is "guest"
custom_expire_hours: Custom expiration time (hours), if None use default value
metadata: Additional metadata
Returns:
str: Encoded JWT token
"""
# Choose default expiration time based on role
if custom_expire_hours is None:
if role == "guest":
expire_hours = self.guest_expire_hours
else:
expire_hours = self.expire_hours
else:
expire_hours = custom_expire_hours
expire = datetime.utcnow() + timedelta(hours=expire_hours)
# Create payload
payload = TokenPayload(
sub=username, exp=expire, role=role, metadata=metadata or {}
)
def create_token(self, username: str) -> str:
expire = datetime.utcnow() + timedelta(hours=self.expire_hours)
payload = TokenPayload(sub=username, exp=expire)
return jwt.encode(payload.dict(), self.secret, algorithm=self.algorithm) return jwt.encode(payload.dict(), self.secret, algorithm=self.algorithm)
def validate_token(self, token: str) -> str: def validate_token(self, token: str) -> dict:
"""
Validate JWT token
Args:
token: JWT token
Returns:
dict: Dictionary containing user information
Raises:
HTTPException: If token is invalid or expired
"""
try: try:
payload = jwt.decode(token, self.secret, algorithms=[self.algorithm]) payload = jwt.decode(token, self.secret, algorithms=[self.algorithm])
expire_timestamp = payload["exp"] expire_timestamp = payload["exp"]
@@ -31,7 +83,14 @@ class AuthHandler:
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Token expired" status_code=status.HTTP_401_UNAUTHORIZED, detail="Token expired"
) )
return payload["sub"]
# Return complete payload instead of just username
return {
"username": payload["sub"],
"role": payload.get("role", "user"),
"metadata": payload.get("metadata", {}),
"exp": expire_time,
}
except jwt.PyJWTError: except jwt.PyJWTError:
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token" status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token"

View File

@@ -29,7 +29,9 @@ preload_app = True
worker_class = "uvicorn.workers.UvicornWorker" worker_class = "uvicorn.workers.UvicornWorker"
# Other Gunicorn configurations # Other Gunicorn configurations
timeout = int(os.getenv("TIMEOUT", 150)) # Default 150s to match run_with_gunicorn.py timeout = int(
os.getenv("TIMEOUT", 150 * 2)
) # Default 150s *2 to match run_with_gunicorn.py
keepalive = int(os.getenv("KEEPALIVE", 5)) # Default 5s keepalive = int(os.getenv("KEEPALIVE", 5)) # Default 5s
# Logging configuration # Logging configuration

View File

@@ -10,6 +10,7 @@ import logging.config
import uvicorn import uvicorn
import pipmaster as pm import pipmaster as pm
from fastapi.staticfiles import StaticFiles from fastapi.staticfiles import StaticFiles
from fastapi.responses import RedirectResponse
from pathlib import Path from pathlib import Path
import configparser import configparser
from ascii_colors import ASCIIColors from ascii_colors import ASCIIColors
@@ -48,7 +49,7 @@ from .auth import auth_handler
# Load environment variables # Load environment variables
# Updated to use the .env that is inside the current folder # Updated to use the .env that is inside the current folder
# This update allows the user to put a different.env file for each lightrag folder # This update allows the user to put a different.env file for each lightrag folder
load_dotenv(".env", override=True) load_dotenv()
# Initialize config parser # Initialize config parser
config = configparser.ConfigParser() config = configparser.ConfigParser()
@@ -341,25 +342,62 @@ def create_app(args):
ollama_api = OllamaAPI(rag, top_k=args.top_k) ollama_api = OllamaAPI(rag, top_k=args.top_k)
app.include_router(ollama_api.router, prefix="/api") app.include_router(ollama_api.router, prefix="/api")
@app.post("/login") @app.get("/")
async def redirect_to_webui():
"""Redirect root path to /webui"""
return RedirectResponse(url="/webui")
@app.get("/auth-status", dependencies=[Depends(optional_api_key)])
async def get_auth_status():
"""Get authentication status and guest token if auth is not configured"""
username = os.getenv("AUTH_USERNAME")
password = os.getenv("AUTH_PASSWORD")
if not (username and password):
# Authentication not configured, return guest token
guest_token = auth_handler.create_token(
username="guest", role="guest", metadata={"auth_mode": "disabled"}
)
return {
"auth_configured": False,
"access_token": guest_token,
"token_type": "bearer",
"auth_mode": "disabled",
"message": "Authentication is disabled. Using guest access.",
}
return {"auth_configured": True, "auth_mode": "enabled"}
@app.post("/login", dependencies=[Depends(optional_api_key)])
async def login(form_data: OAuth2PasswordRequestForm = Depends()): async def login(form_data: OAuth2PasswordRequestForm = Depends()):
username = os.getenv("AUTH_USERNAME") username = os.getenv("AUTH_USERNAME")
password = os.getenv("AUTH_PASSWORD") password = os.getenv("AUTH_PASSWORD")
if not (username and password): if not (username and password):
raise HTTPException( # Authentication not configured, return guest token
status_code=status.HTTP_501_NOT_IMPLEMENTED, guest_token = auth_handler.create_token(
detail="Authentication not configured", username="guest", role="guest", metadata={"auth_mode": "disabled"}
) )
return {
"access_token": guest_token,
"token_type": "bearer",
"auth_mode": "disabled",
"message": "Authentication is disabled. Using guest access.",
}
if form_data.username != username or form_data.password != password: if form_data.username != username or form_data.password != password:
raise HTTPException( raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect credentials" status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect credentials"
) )
# Regular user login
user_token = auth_handler.create_token(
username=username, role="user", metadata={"auth_mode": "enabled"}
)
return { return {
"access_token": auth_handler.create_token(username), "access_token": user_token,
"token_type": "bearer", "token_type": "bearer",
"auth_mode": "enabled",
} }
@app.get("/health", dependencies=[Depends(optional_api_key)]) @app.get("/health", dependencies=[Depends(optional_api_key)])

View File

@@ -405,7 +405,7 @@ async def pipeline_index_file(rag: LightRAG, file_path: Path):
async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]): async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]):
"""Index multiple files concurrently """Index multiple files sequentially to avoid high CPU load
Args: Args:
rag: LightRAG instance rag: LightRAG instance
@@ -416,12 +416,12 @@ async def pipeline_index_files(rag: LightRAG, file_paths: List[Path]):
try: try:
enqueued = False enqueued = False
if len(file_paths) == 1: # Process files sequentially
enqueued = await pipeline_enqueue_file(rag, file_paths[0]) for file_path in file_paths:
else: if await pipeline_enqueue_file(rag, file_path):
tasks = [pipeline_enqueue_file(rag, path) for path in file_paths] enqueued = True
enqueued = any(await asyncio.gather(*tasks))
# Process the queue only if at least one file was successfully enqueued
if enqueued: if enqueued:
await rag.apipeline_process_enqueue_documents() await rag.apipeline_process_enqueue_documents()
except Exception as e: except Exception as e:
@@ -472,14 +472,34 @@ async def run_scanning_process(rag: LightRAG, doc_manager: DocumentManager):
total_files = len(new_files) total_files = len(new_files)
logger.info(f"Found {total_files} new files to index.") logger.info(f"Found {total_files} new files to index.")
for idx, file_path in enumerate(new_files): if not new_files:
try: return
await pipeline_index_file(rag, file_path)
except Exception as e: # Get MAX_PARALLEL_INSERT from global_args
logger.error(f"Error indexing file {file_path}: {str(e)}") max_parallel = global_args["max_parallel_insert"]
# Calculate batch size as 2 * MAX_PARALLEL_INSERT
batch_size = 2 * max_parallel
# Process files in batches
for i in range(0, total_files, batch_size):
batch_files = new_files[i : i + batch_size]
batch_num = i // batch_size + 1
total_batches = (total_files + batch_size - 1) // batch_size
logger.info(
f"Processing batch {batch_num}/{total_batches} with {len(batch_files)} files"
)
await pipeline_index_files(rag, batch_files)
# Log progress
processed = min(i + batch_size, total_files)
logger.info(
f"Processed {processed}/{total_files} files ({processed/total_files*100:.1f}%)"
)
except Exception as e: except Exception as e:
logger.error(f"Error during scanning process: {str(e)}") logger.error(f"Error during scanning process: {str(e)}")
logger.error(traceback.format_exc())
def create_document_routes( def create_document_routes(

View File

@@ -13,7 +13,7 @@ from dotenv import load_dotenv
# Updated to use the .env that is inside the current folder # Updated to use the .env that is inside the current folder
# This update allows the user to put a different.env file for each lightrag folder # This update allows the user to put a different.env file for each lightrag folder
load_dotenv(".env") load_dotenv()
def check_and_install_dependencies(): def check_and_install_dependencies():
@@ -140,7 +140,7 @@ def main():
# Timeout configuration prioritizes command line arguments # Timeout configuration prioritizes command line arguments
gunicorn_config.timeout = ( gunicorn_config.timeout = (
args.timeout if args.timeout else int(os.getenv("TIMEOUT", 150)) args.timeout if args.timeout * 2 else int(os.getenv("TIMEOUT", 150 * 2))
) )
# Keepalive configuration # Keepalive configuration

View File

@@ -9,14 +9,14 @@ import sys
import logging import logging
from ascii_colors import ASCIIColors from ascii_colors import ASCIIColors
from lightrag.api import __api_version__ from lightrag.api import __api_version__
from fastapi import HTTPException, Security, Depends, Request from fastapi import HTTPException, Security, Depends, Request, status
from dotenv import load_dotenv from dotenv import load_dotenv
from fastapi.security import APIKeyHeader, OAuth2PasswordBearer from fastapi.security import APIKeyHeader, OAuth2PasswordBearer
from starlette.status import HTTP_403_FORBIDDEN from starlette.status import HTTP_403_FORBIDDEN
from .auth import auth_handler from .auth import auth_handler
# Load environment variables # Load environment variables
load_dotenv(override=True) load_dotenv()
global_args = {"main_args": None} global_args = {"main_args": None}
@@ -35,19 +35,46 @@ ollama_server_infos = OllamaServerInfos()
def get_auth_dependency(): def get_auth_dependency():
whitelist = os.getenv("WHITELIST_PATHS", "").split(",") # Set default whitelist paths
whitelist = os.getenv("WHITELIST_PATHS", "/login,/health").split(",")
async def dependency( async def dependency(
request: Request, request: Request,
token: str = Depends(OAuth2PasswordBearer(tokenUrl="login", auto_error=False)), token: str = Depends(OAuth2PasswordBearer(tokenUrl="login", auto_error=False)),
): ):
# Check if authentication is configured
auth_configured = bool(
os.getenv("AUTH_USERNAME") and os.getenv("AUTH_PASSWORD")
)
# If authentication is not configured, skip all validation
if not auth_configured:
return
# For configured auth, allow whitelist paths without token
if request.url.path in whitelist: if request.url.path in whitelist:
return return
if not (os.getenv("AUTH_USERNAME") and os.getenv("AUTH_PASSWORD")): # Require token for all other paths when auth is configured
return if not token:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Token required"
)
auth_handler.validate_token(token) try:
token_info = auth_handler.validate_token(token)
# Reject guest tokens when authentication is configured
if token_info.get("role") == "guest":
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Authentication required. Guest access not allowed when authentication is configured.",
)
except Exception:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid token"
)
return
return dependency return dependency
@@ -338,6 +365,9 @@ def parse_args(is_uvicorn_mode: bool = False) -> argparse.Namespace:
"LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE "LIGHTRAG_VECTOR_STORAGE", DefaultRAGStorageConfig.VECTOR_STORAGE
) )
# Get MAX_PARALLEL_INSERT from environment
global_args["max_parallel_insert"] = get_env_value("MAX_PARALLEL_INSERT", 2, int)
# Handle openai-ollama special case # Handle openai-ollama special case
if args.llm_binding == "openai-ollama": if args.llm_binding == "openai-ollama":
args.llm_binding = "openai" args.llm_binding = "openai"
@@ -414,8 +444,8 @@ def display_splash_screen(args: argparse.Namespace) -> None:
ASCIIColors.yellow(f"{args.log_level}") ASCIIColors.yellow(f"{args.log_level}")
ASCIIColors.white(" ├─ Verbose Debug: ", end="") ASCIIColors.white(" ├─ Verbose Debug: ", end="")
ASCIIColors.yellow(f"{args.verbose}") ASCIIColors.yellow(f"{args.verbose}")
ASCIIColors.white(" ├─ Timeout: ", end="") ASCIIColors.white(" ├─ History Turns: ", end="")
ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}") ASCIIColors.yellow(f"{args.history_turns}")
ASCIIColors.white(" └─ API Key: ", end="") ASCIIColors.white(" └─ API Key: ", end="")
ASCIIColors.yellow("Set" if args.key else "Not Set") ASCIIColors.yellow("Set" if args.key else "Not Set")
@@ -432,8 +462,10 @@ def display_splash_screen(args: argparse.Namespace) -> None:
ASCIIColors.yellow(f"{args.llm_binding}") ASCIIColors.yellow(f"{args.llm_binding}")
ASCIIColors.white(" ├─ Host: ", end="") ASCIIColors.white(" ├─ Host: ", end="")
ASCIIColors.yellow(f"{args.llm_binding_host}") ASCIIColors.yellow(f"{args.llm_binding_host}")
ASCIIColors.white(" ─ Model: ", end="") ASCIIColors.white(" ─ Model: ", end="")
ASCIIColors.yellow(f"{args.llm_model}") ASCIIColors.yellow(f"{args.llm_model}")
ASCIIColors.white(" └─ Timeout: ", end="")
ASCIIColors.yellow(f"{args.timeout if args.timeout else 'None (infinite)'}")
# Embedding Configuration # Embedding Configuration
ASCIIColors.magenta("\n📊 Embedding Configuration:") ASCIIColors.magenta("\n📊 Embedding Configuration:")
@@ -448,8 +480,10 @@ def display_splash_screen(args: argparse.Namespace) -> None:
# RAG Configuration # RAG Configuration
ASCIIColors.magenta("\n⚙️ RAG Configuration:") ASCIIColors.magenta("\n⚙️ RAG Configuration:")
ASCIIColors.white(" ├─ Max Async Operations: ", end="") ASCIIColors.white(" ├─ Max Async for LLM: ", end="")
ASCIIColors.yellow(f"{args.max_async}") ASCIIColors.yellow(f"{args.max_async}")
ASCIIColors.white(" ├─ Max Parallel Insert: ", end="")
ASCIIColors.yellow(f"{global_args['max_parallel_insert']}")
ASCIIColors.white(" ├─ Max Tokens: ", end="") ASCIIColors.white(" ├─ Max Tokens: ", end="")
ASCIIColors.yellow(f"{args.max_tokens}") ASCIIColors.yellow(f"{args.max_tokens}")
ASCIIColors.white(" ├─ Max Embed Tokens: ", end="") ASCIIColors.white(" ├─ Max Embed Tokens: ", end="")
@@ -458,8 +492,6 @@ def display_splash_screen(args: argparse.Namespace) -> None:
ASCIIColors.yellow(f"{args.chunk_size}") ASCIIColors.yellow(f"{args.chunk_size}")
ASCIIColors.white(" ├─ Chunk Overlap Size: ", end="") ASCIIColors.white(" ├─ Chunk Overlap Size: ", end="")
ASCIIColors.yellow(f"{args.chunk_overlap_size}") ASCIIColors.yellow(f"{args.chunk_overlap_size}")
ASCIIColors.white(" ├─ History Turns: ", end="")
ASCIIColors.yellow(f"{args.history_turns}")
ASCIIColors.white(" ├─ Cosine Threshold: ", end="") ASCIIColors.white(" ├─ Cosine Threshold: ", end="")
ASCIIColors.yellow(f"{args.cosine_threshold}") ASCIIColors.yellow(f"{args.cosine_threshold}")
ASCIIColors.white(" ├─ Top-K: ", end="") ASCIIColors.white(" ├─ Top-K: ", end="")

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -5,11 +5,11 @@
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" /> <meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
<meta http-equiv="Pragma" content="no-cache" /> <meta http-equiv="Pragma" content="no-cache" />
<meta http-equiv="Expires" content="0" /> <meta http-equiv="Expires" content="0" />
<link rel="icon" type="image/svg+xml" href="./logo.png" /> <link rel="icon" type="image/svg+xml" href="logo.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Lightrag</title> <title>Lightrag</title>
<script type="module" crossorigin src="./assets/index-DwcJE583.js"></script> <script type="module" crossorigin src="/webui/assets/index-4I5HV9Fr.js"></script>
<link rel="stylesheet" crossorigin href="./assets/index-BV5s8k-a.css"> <link rel="stylesheet" crossorigin href="/webui/assets/index-BSOt8Nur.css">
</head> </head>
<body> <body>
<div id="root"></div> <div id="root"></div>

View File

@@ -257,6 +257,8 @@ class DocProcessingStatus:
"""First 100 chars of document content, used for preview""" """First 100 chars of document content, used for preview"""
content_length: int content_length: int
"""Total length of document""" """Total length of document"""
file_path: str
"""File path of the document"""
status: DocStatus status: DocStatus
"""Current processing status""" """Current processing status"""
created_at: str created_at: str

View File

@@ -87,6 +87,9 @@ class JsonDocStatusStorage(DocStatusStorage):
# If content is missing, use content_summary as content # If content is missing, use content_summary as content
if "content" not in data and "content_summary" in data: if "content" not in data and "content_summary" in data:
data["content"] = data["content_summary"] data["content"] = data["content_summary"]
# If file_path is not in data, use document id as file path
if "file_path" not in data:
data["file_path"] = "no-file-path"
result[k] = DocProcessingStatus(**data) result[k] = DocProcessingStatus(**data)
except KeyError as e: except KeyError as e:
logger.error(f"Missing required field for document {k}: {e}") logger.error(f"Missing required field for document {k}: {e}")

View File

@@ -373,6 +373,9 @@ class NetworkXStorage(BaseGraphStorage):
# Add edges to result # Add edges to result
for edge in subgraph.edges(): for edge in subgraph.edges():
source, target = edge source, target = edge
# Esure unique edge_id for undirect graph
if source > target:
source, target = target, source
edge_id = f"{source}-{target}" edge_id = f"{source}-{target}"
if edge_id in seen_edges: if edge_id in seen_edges:
continue continue

View File

@@ -423,6 +423,7 @@ class PGVectorStorage(BaseVectorStorage):
"full_doc_id": item["full_doc_id"], "full_doc_id": item["full_doc_id"],
"content": item["content"], "content": item["content"],
"content_vector": json.dumps(item["__vector__"].tolist()), "content_vector": json.dumps(item["__vector__"].tolist()),
"file_path": item["file_path"],
} }
except Exception as e: except Exception as e:
logger.error(f"Error to prepare upsert,\nsql: {e}\nitem: {item}") logger.error(f"Error to prepare upsert,\nsql: {e}\nitem: {item}")
@@ -445,6 +446,7 @@ class PGVectorStorage(BaseVectorStorage):
"content": item["content"], "content": item["content"],
"content_vector": json.dumps(item["__vector__"].tolist()), "content_vector": json.dumps(item["__vector__"].tolist()),
"chunk_ids": chunk_ids, "chunk_ids": chunk_ids,
"file_path": item["file_path"],
# TODO: add document_id # TODO: add document_id
} }
return upsert_sql, data return upsert_sql, data
@@ -465,6 +467,7 @@ class PGVectorStorage(BaseVectorStorage):
"content": item["content"], "content": item["content"],
"content_vector": json.dumps(item["__vector__"].tolist()), "content_vector": json.dumps(item["__vector__"].tolist()),
"chunk_ids": chunk_ids, "chunk_ids": chunk_ids,
"file_path": item["file_path"],
# TODO: add document_id # TODO: add document_id
} }
return upsert_sql, data return upsert_sql, data
@@ -732,7 +735,7 @@ class PGDocStatusStorage(DocStatusStorage):
if result is None or result == []: if result is None or result == []:
return None return None
else: else:
return DocProcessingStatus( return dict(
content=result[0]["content"], content=result[0]["content"],
content_length=result[0]["content_length"], content_length=result[0]["content_length"],
content_summary=result[0]["content_summary"], content_summary=result[0]["content_summary"],
@@ -740,11 +743,34 @@ class PGDocStatusStorage(DocStatusStorage):
chunks_count=result[0]["chunks_count"], chunks_count=result[0]["chunks_count"],
created_at=result[0]["created_at"], created_at=result[0]["created_at"],
updated_at=result[0]["updated_at"], updated_at=result[0]["updated_at"],
file_path=result[0]["file_path"],
) )
async def get_by_ids(self, ids: list[str]) -> list[dict[str, Any]]: async def get_by_ids(self, ids: list[str]) -> list[dict[str, Any]]:
"""Get doc_chunks data by id""" """Get doc_chunks data by multiple IDs."""
raise NotImplementedError if not ids:
return []
sql = "SELECT * FROM LIGHTRAG_DOC_STATUS WHERE workspace=$1 AND id = ANY($2)"
params = {"workspace": self.db.workspace, "ids": ids}
results = await self.db.query(sql, params, True)
if not results:
return []
return [
{
"content": row["content"],
"content_length": row["content_length"],
"content_summary": row["content_summary"],
"status": row["status"],
"chunks_count": row["chunks_count"],
"created_at": row["created_at"],
"updated_at": row["updated_at"],
"file_path": row["file_path"],
}
for row in results
]
async def get_status_counts(self) -> dict[str, int]: async def get_status_counts(self) -> dict[str, int]:
"""Get counts of documents in each status""" """Get counts of documents in each status"""
@@ -774,6 +800,7 @@ class PGDocStatusStorage(DocStatusStorage):
created_at=element["created_at"], created_at=element["created_at"],
updated_at=element["updated_at"], updated_at=element["updated_at"],
chunks_count=element["chunks_count"], chunks_count=element["chunks_count"],
file_path=element["file_path"],
) )
for element in result for element in result
} }
@@ -793,14 +820,15 @@ class PGDocStatusStorage(DocStatusStorage):
if not data: if not data:
return return
sql = """insert into LIGHTRAG_DOC_STATUS(workspace,id,content,content_summary,content_length,chunks_count,status) sql = """insert into LIGHTRAG_DOC_STATUS(workspace,id,content,content_summary,content_length,chunks_count,status,file_path)
values($1,$2,$3,$4,$5,$6,$7) values($1,$2,$3,$4,$5,$6,$7,$8)
on conflict(id,workspace) do update set on conflict(id,workspace) do update set
content = EXCLUDED.content, content = EXCLUDED.content,
content_summary = EXCLUDED.content_summary, content_summary = EXCLUDED.content_summary,
content_length = EXCLUDED.content_length, content_length = EXCLUDED.content_length,
chunks_count = EXCLUDED.chunks_count, chunks_count = EXCLUDED.chunks_count,
status = EXCLUDED.status, status = EXCLUDED.status,
file_path = EXCLUDED.file_path,
updated_at = CURRENT_TIMESTAMP""" updated_at = CURRENT_TIMESTAMP"""
for k, v in data.items(): for k, v in data.items():
# chunks_count is optional # chunks_count is optional
@@ -814,6 +842,7 @@ class PGDocStatusStorage(DocStatusStorage):
"content_length": v["content_length"], "content_length": v["content_length"],
"chunks_count": v["chunks_count"] if "chunks_count" in v else -1, "chunks_count": v["chunks_count"] if "chunks_count" in v else -1,
"status": v["status"], "status": v["status"],
"file_path": v["file_path"],
}, },
) )
@@ -1058,7 +1087,6 @@ class PGGraphStorage(BaseGraphStorage):
Args: Args:
query (str): a cypher query to be executed query (str): a cypher query to be executed
params (dict): parameters for the query
Returns: Returns:
list[dict[str, Any]]: a list of dictionaries containing the result set list[dict[str, Any]]: a list of dictionaries containing the result set
@@ -1549,6 +1577,7 @@ TABLES = {
tokens INTEGER, tokens INTEGER,
content TEXT, content TEXT,
content_vector VECTOR, content_vector VECTOR,
file_path VARCHAR(256),
create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP, update_time TIMESTAMP,
CONSTRAINT LIGHTRAG_DOC_CHUNKS_PK PRIMARY KEY (workspace, id) CONSTRAINT LIGHTRAG_DOC_CHUNKS_PK PRIMARY KEY (workspace, id)
@@ -1563,7 +1592,8 @@ TABLES = {
content_vector VECTOR, content_vector VECTOR,
create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP, update_time TIMESTAMP,
chunk_id TEXT NULL, chunk_ids VARCHAR(255)[] NULL,
file_path TEXT NULL,
CONSTRAINT LIGHTRAG_VDB_ENTITY_PK PRIMARY KEY (workspace, id) CONSTRAINT LIGHTRAG_VDB_ENTITY_PK PRIMARY KEY (workspace, id)
)""" )"""
}, },
@@ -1577,7 +1607,8 @@ TABLES = {
content_vector VECTOR, content_vector VECTOR,
create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP, update_time TIMESTAMP,
chunk_id TEXT NULL, chunk_ids VARCHAR(255)[] NULL,
file_path TEXT NULL,
CONSTRAINT LIGHTRAG_VDB_RELATION_PK PRIMARY KEY (workspace, id) CONSTRAINT LIGHTRAG_VDB_RELATION_PK PRIMARY KEY (workspace, id)
)""" )"""
}, },
@@ -1602,6 +1633,7 @@ TABLES = {
content_length int4 NULL, content_length int4 NULL,
chunks_count int4 NULL, chunks_count int4 NULL,
status varchar(64) NULL, status varchar(64) NULL,
file_path TEXT NULL,
created_at timestamp DEFAULT CURRENT_TIMESTAMP NULL, created_at timestamp DEFAULT CURRENT_TIMESTAMP NULL,
updated_at timestamp DEFAULT CURRENT_TIMESTAMP NULL, updated_at timestamp DEFAULT CURRENT_TIMESTAMP NULL,
CONSTRAINT LIGHTRAG_DOC_STATUS_PK PRIMARY KEY (workspace, id) CONSTRAINT LIGHTRAG_DOC_STATUS_PK PRIMARY KEY (workspace, id)
@@ -1650,35 +1682,38 @@ SQL_TEMPLATES = {
update_time = CURRENT_TIMESTAMP update_time = CURRENT_TIMESTAMP
""", """,
"upsert_chunk": """INSERT INTO LIGHTRAG_DOC_CHUNKS (workspace, id, tokens, "upsert_chunk": """INSERT INTO LIGHTRAG_DOC_CHUNKS (workspace, id, tokens,
chunk_order_index, full_doc_id, content, content_vector) chunk_order_index, full_doc_id, content, content_vector, file_path)
VALUES ($1, $2, $3, $4, $5, $6, $7) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (workspace,id) DO UPDATE ON CONFLICT (workspace,id) DO UPDATE
SET tokens=EXCLUDED.tokens, SET tokens=EXCLUDED.tokens,
chunk_order_index=EXCLUDED.chunk_order_index, chunk_order_index=EXCLUDED.chunk_order_index,
full_doc_id=EXCLUDED.full_doc_id, full_doc_id=EXCLUDED.full_doc_id,
content = EXCLUDED.content, content = EXCLUDED.content,
content_vector=EXCLUDED.content_vector, content_vector=EXCLUDED.content_vector,
file_path=EXCLUDED.file_path,
update_time = CURRENT_TIMESTAMP update_time = CURRENT_TIMESTAMP
""", """,
"upsert_entity": """INSERT INTO LIGHTRAG_VDB_ENTITY (workspace, id, entity_name, content, "upsert_entity": """INSERT INTO LIGHTRAG_VDB_ENTITY (workspace, id, entity_name, content,
content_vector, chunk_ids) content_vector, chunk_ids, file_path)
VALUES ($1, $2, $3, $4, $5, $6::varchar[]) VALUES ($1, $2, $3, $4, $5, $6::varchar[], $7)
ON CONFLICT (workspace,id) DO UPDATE ON CONFLICT (workspace,id) DO UPDATE
SET entity_name=EXCLUDED.entity_name, SET entity_name=EXCLUDED.entity_name,
content=EXCLUDED.content, content=EXCLUDED.content,
content_vector=EXCLUDED.content_vector, content_vector=EXCLUDED.content_vector,
chunk_ids=EXCLUDED.chunk_ids, chunk_ids=EXCLUDED.chunk_ids,
file_path=EXCLUDED.file_path,
update_time=CURRENT_TIMESTAMP update_time=CURRENT_TIMESTAMP
""", """,
"upsert_relationship": """INSERT INTO LIGHTRAG_VDB_RELATION (workspace, id, source_id, "upsert_relationship": """INSERT INTO LIGHTRAG_VDB_RELATION (workspace, id, source_id,
target_id, content, content_vector, chunk_ids) target_id, content, content_vector, chunk_ids, file_path)
VALUES ($1, $2, $3, $4, $5, $6, $7::varchar[]) VALUES ($1, $2, $3, $4, $5, $6, $7::varchar[], $8)
ON CONFLICT (workspace,id) DO UPDATE ON CONFLICT (workspace,id) DO UPDATE
SET source_id=EXCLUDED.source_id, SET source_id=EXCLUDED.source_id,
target_id=EXCLUDED.target_id, target_id=EXCLUDED.target_id,
content=EXCLUDED.content, content=EXCLUDED.content,
content_vector=EXCLUDED.content_vector, content_vector=EXCLUDED.content_vector,
chunk_ids=EXCLUDED.chunk_ids, chunk_ids=EXCLUDED.chunk_ids,
file_path=EXCLUDED.file_path,
update_time = CURRENT_TIMESTAMP update_time = CURRENT_TIMESTAMP
""", """,
# SQL for VectorStorage # SQL for VectorStorage

View File

@@ -41,6 +41,9 @@ _pipeline_status_lock: Optional[LockType] = None
_graph_db_lock: Optional[LockType] = None _graph_db_lock: Optional[LockType] = None
_data_init_lock: Optional[LockType] = None _data_init_lock: Optional[LockType] = None
# async locks for coroutine synchronization in multiprocess mode
_async_locks: Optional[Dict[str, asyncio.Lock]] = None
class UnifiedLock(Generic[T]): class UnifiedLock(Generic[T]):
"""Provide a unified lock interface type for asyncio.Lock and multiprocessing.Lock""" """Provide a unified lock interface type for asyncio.Lock and multiprocessing.Lock"""
@@ -51,12 +54,14 @@ class UnifiedLock(Generic[T]):
is_async: bool, is_async: bool,
name: str = "unnamed", name: str = "unnamed",
enable_logging: bool = True, enable_logging: bool = True,
async_lock: Optional[asyncio.Lock] = None,
): ):
self._lock = lock self._lock = lock
self._is_async = is_async self._is_async = is_async
self._pid = os.getpid() # for debug only self._pid = os.getpid() # for debug only
self._name = name # for debug only self._name = name # for debug only
self._enable_logging = enable_logging # for debug only self._enable_logging = enable_logging # for debug only
self._async_lock = async_lock # auxiliary lock for coroutine synchronization
async def __aenter__(self) -> "UnifiedLock[T]": async def __aenter__(self) -> "UnifiedLock[T]":
try: try:
@@ -64,16 +69,39 @@ class UnifiedLock(Generic[T]):
f"== Lock == Process {self._pid}: Acquiring lock '{self._name}' (async={self._is_async})", f"== Lock == Process {self._pid}: Acquiring lock '{self._name}' (async={self._is_async})",
enable_output=self._enable_logging, enable_output=self._enable_logging,
) )
# If in multiprocess mode and async lock exists, acquire it first
if not self._is_async and self._async_lock is not None:
direct_log(
f"== Lock == Process {self._pid}: Acquiring async lock for '{self._name}'",
enable_output=self._enable_logging,
)
await self._async_lock.acquire()
direct_log(
f"== Lock == Process {self._pid}: Async lock for '{self._name}' acquired",
enable_output=self._enable_logging,
)
# Then acquire the main lock
if self._is_async: if self._is_async:
await self._lock.acquire() await self._lock.acquire()
else: else:
self._lock.acquire() self._lock.acquire()
direct_log( direct_log(
f"== Lock == Process {self._pid}: Lock '{self._name}' acquired (async={self._is_async})", f"== Lock == Process {self._pid}: Lock '{self._name}' acquired (async={self._is_async})",
enable_output=self._enable_logging, enable_output=self._enable_logging,
) )
return self return self
except Exception as e: except Exception as e:
# If main lock acquisition fails, release the async lock if it was acquired
if (
not self._is_async
and self._async_lock is not None
and self._async_lock.locked()
):
self._async_lock.release()
direct_log( direct_log(
f"== Lock == Process {self._pid}: Failed to acquire lock '{self._name}': {e}", f"== Lock == Process {self._pid}: Failed to acquire lock '{self._name}': {e}",
level="ERROR", level="ERROR",
@@ -82,15 +110,29 @@ class UnifiedLock(Generic[T]):
raise raise
async def __aexit__(self, exc_type, exc_val, exc_tb): async def __aexit__(self, exc_type, exc_val, exc_tb):
main_lock_released = False
try: try:
direct_log( direct_log(
f"== Lock == Process {self._pid}: Releasing lock '{self._name}' (async={self._is_async})", f"== Lock == Process {self._pid}: Releasing lock '{self._name}' (async={self._is_async})",
enable_output=self._enable_logging, enable_output=self._enable_logging,
) )
# Release main lock first
if self._is_async: if self._is_async:
self._lock.release() self._lock.release()
else: else:
self._lock.release() self._lock.release()
main_lock_released = True
# Then release async lock if in multiprocess mode
if not self._is_async and self._async_lock is not None:
direct_log(
f"== Lock == Process {self._pid}: Releasing async lock for '{self._name}'",
enable_output=self._enable_logging,
)
self._async_lock.release()
direct_log( direct_log(
f"== Lock == Process {self._pid}: Lock '{self._name}' released (async={self._is_async})", f"== Lock == Process {self._pid}: Lock '{self._name}' released (async={self._is_async})",
enable_output=self._enable_logging, enable_output=self._enable_logging,
@@ -101,6 +143,31 @@ class UnifiedLock(Generic[T]):
level="ERROR", level="ERROR",
enable_output=self._enable_logging, enable_output=self._enable_logging,
) )
# If main lock release failed but async lock hasn't been released, try to release it
if (
not main_lock_released
and not self._is_async
and self._async_lock is not None
):
try:
direct_log(
f"== Lock == Process {self._pid}: Attempting to release async lock after main lock failure",
level="WARNING",
enable_output=self._enable_logging,
)
self._async_lock.release()
direct_log(
f"== Lock == Process {self._pid}: Successfully released async lock after main lock failure",
enable_output=self._enable_logging,
)
except Exception as inner_e:
direct_log(
f"== Lock == Process {self._pid}: Failed to release async lock after main lock failure: {inner_e}",
level="ERROR",
enable_output=self._enable_logging,
)
raise raise
def __enter__(self) -> "UnifiedLock[T]": def __enter__(self) -> "UnifiedLock[T]":
@@ -151,51 +218,61 @@ class UnifiedLock(Generic[T]):
def get_internal_lock(enable_logging: bool = False) -> UnifiedLock: def get_internal_lock(enable_logging: bool = False) -> UnifiedLock:
"""return unified storage lock for data consistency""" """return unified storage lock for data consistency"""
async_lock = _async_locks.get("internal_lock") if is_multiprocess else None
return UnifiedLock( return UnifiedLock(
lock=_internal_lock, lock=_internal_lock,
is_async=not is_multiprocess, is_async=not is_multiprocess,
name="internal_lock", name="internal_lock",
enable_logging=enable_logging, enable_logging=enable_logging,
async_lock=async_lock,
) )
def get_storage_lock(enable_logging: bool = False) -> UnifiedLock: def get_storage_lock(enable_logging: bool = False) -> UnifiedLock:
"""return unified storage lock for data consistency""" """return unified storage lock for data consistency"""
async_lock = _async_locks.get("storage_lock") if is_multiprocess else None
return UnifiedLock( return UnifiedLock(
lock=_storage_lock, lock=_storage_lock,
is_async=not is_multiprocess, is_async=not is_multiprocess,
name="storage_lock", name="storage_lock",
enable_logging=enable_logging, enable_logging=enable_logging,
async_lock=async_lock,
) )
def get_pipeline_status_lock(enable_logging: bool = False) -> UnifiedLock: def get_pipeline_status_lock(enable_logging: bool = False) -> UnifiedLock:
"""return unified storage lock for data consistency""" """return unified storage lock for data consistency"""
async_lock = _async_locks.get("pipeline_status_lock") if is_multiprocess else None
return UnifiedLock( return UnifiedLock(
lock=_pipeline_status_lock, lock=_pipeline_status_lock,
is_async=not is_multiprocess, is_async=not is_multiprocess,
name="pipeline_status_lock", name="pipeline_status_lock",
enable_logging=enable_logging, enable_logging=enable_logging,
async_lock=async_lock,
) )
def get_graph_db_lock(enable_logging: bool = False) -> UnifiedLock: def get_graph_db_lock(enable_logging: bool = False) -> UnifiedLock:
"""return unified graph database lock for ensuring atomic operations""" """return unified graph database lock for ensuring atomic operations"""
async_lock = _async_locks.get("graph_db_lock") if is_multiprocess else None
return UnifiedLock( return UnifiedLock(
lock=_graph_db_lock, lock=_graph_db_lock,
is_async=not is_multiprocess, is_async=not is_multiprocess,
name="graph_db_lock", name="graph_db_lock",
enable_logging=enable_logging, enable_logging=enable_logging,
async_lock=async_lock,
) )
def get_data_init_lock(enable_logging: bool = False) -> UnifiedLock: def get_data_init_lock(enable_logging: bool = False) -> UnifiedLock:
"""return unified data initialization lock for ensuring atomic data initialization""" """return unified data initialization lock for ensuring atomic data initialization"""
async_lock = _async_locks.get("data_init_lock") if is_multiprocess else None
return UnifiedLock( return UnifiedLock(
lock=_data_init_lock, lock=_data_init_lock,
is_async=not is_multiprocess, is_async=not is_multiprocess,
name="data_init_lock", name="data_init_lock",
enable_logging=enable_logging, enable_logging=enable_logging,
async_lock=async_lock,
) )
@@ -229,7 +306,8 @@ def initialize_share_data(workers: int = 1):
_shared_dicts, \ _shared_dicts, \
_init_flags, \ _init_flags, \
_initialized, \ _initialized, \
_update_flags _update_flags, \
_async_locks
# Check if already initialized # Check if already initialized
if _initialized: if _initialized:
@@ -251,6 +329,16 @@ def initialize_share_data(workers: int = 1):
_shared_dicts = _manager.dict() _shared_dicts = _manager.dict()
_init_flags = _manager.dict() _init_flags = _manager.dict()
_update_flags = _manager.dict() _update_flags = _manager.dict()
# Initialize async locks for multiprocess mode
_async_locks = {
"internal_lock": asyncio.Lock(),
"storage_lock": asyncio.Lock(),
"pipeline_status_lock": asyncio.Lock(),
"graph_db_lock": asyncio.Lock(),
"data_init_lock": asyncio.Lock(),
}
direct_log( direct_log(
f"Process {os.getpid()} Shared-Data created for Multiple Process (workers={workers})" f"Process {os.getpid()} Shared-Data created for Multiple Process (workers={workers})"
) )
@@ -264,6 +352,7 @@ def initialize_share_data(workers: int = 1):
_shared_dicts = {} _shared_dicts = {}
_init_flags = {} _init_flags = {}
_update_flags = {} _update_flags = {}
_async_locks = None # No need for async locks in single process mode
direct_log(f"Process {os.getpid()} Shared-Data created for Single Process") direct_log(f"Process {os.getpid()} Shared-Data created for Single Process")
# Mark as initialized # Mark as initialized
@@ -458,7 +547,8 @@ def finalize_share_data():
_shared_dicts, \ _shared_dicts, \
_init_flags, \ _init_flags, \
_initialized, \ _initialized, \
_update_flags _update_flags, \
_async_locks
# Check if already initialized # Check if already initialized
if not _initialized: if not _initialized:
@@ -523,5 +613,6 @@ def finalize_share_data():
_graph_db_lock = None _graph_db_lock = None
_data_init_lock = None _data_init_lock = None
_update_flags = None _update_flags = None
_async_locks = None
direct_log(f"Process {os.getpid()} storage data finalization complete") direct_log(f"Process {os.getpid()} storage data finalization complete")

View File

@@ -183,10 +183,10 @@ class LightRAG:
embedding_func: EmbeddingFunc | None = field(default=None) embedding_func: EmbeddingFunc | None = field(default=None)
"""Function for computing text embeddings. Must be set before use.""" """Function for computing text embeddings. Must be set before use."""
embedding_batch_num: int = field(default=32) embedding_batch_num: int = field(default=int(os.getenv("EMBEDDING_BATCH_NUM", 32)))
"""Batch size for embedding computations.""" """Batch size for embedding computations."""
embedding_func_max_async: int = field(default=16) embedding_func_max_async: int = field(default=int(os.getenv("EMBEDDING_FUNC_MAX_ASYNC", 16)))
"""Maximum number of concurrent embedding function calls.""" """Maximum number of concurrent embedding function calls."""
embedding_cache_config: dict[str, Any] = field( embedding_cache_config: dict[str, Any] = field(
@@ -389,20 +389,21 @@ class LightRAG:
self.namespace_prefix, NameSpace.VECTOR_STORE_ENTITIES self.namespace_prefix, NameSpace.VECTOR_STORE_ENTITIES
), ),
embedding_func=self.embedding_func, embedding_func=self.embedding_func,
meta_fields={"entity_name", "source_id", "content"}, meta_fields={"entity_name", "source_id", "content", "file_path"},
) )
self.relationships_vdb: BaseVectorStorage = self.vector_db_storage_cls( # type: ignore self.relationships_vdb: BaseVectorStorage = self.vector_db_storage_cls( # type: ignore
namespace=make_namespace( namespace=make_namespace(
self.namespace_prefix, NameSpace.VECTOR_STORE_RELATIONSHIPS self.namespace_prefix, NameSpace.VECTOR_STORE_RELATIONSHIPS
), ),
embedding_func=self.embedding_func, embedding_func=self.embedding_func,
meta_fields={"src_id", "tgt_id", "source_id", "content"}, meta_fields={"src_id", "tgt_id", "source_id", "content", "file_path"},
) )
self.chunks_vdb: BaseVectorStorage = self.vector_db_storage_cls( # type: ignore self.chunks_vdb: BaseVectorStorage = self.vector_db_storage_cls( # type: ignore
namespace=make_namespace( namespace=make_namespace(
self.namespace_prefix, NameSpace.VECTOR_STORE_CHUNKS self.namespace_prefix, NameSpace.VECTOR_STORE_CHUNKS
), ),
embedding_func=self.embedding_func, embedding_func=self.embedding_func,
meta_fields={"full_doc_id", "content", "file_path"},
) )
# Initialize document status storage # Initialize document status storage
@@ -547,6 +548,7 @@ class LightRAG:
split_by_character: str | None = None, split_by_character: str | None = None,
split_by_character_only: bool = False, split_by_character_only: bool = False,
ids: str | list[str] | None = None, ids: str | list[str] | None = None,
file_paths: str | list[str] | None = None,
) -> None: ) -> None:
"""Sync Insert documents with checkpoint support """Sync Insert documents with checkpoint support
@@ -557,10 +559,13 @@ class LightRAG:
split_by_character_only: if split_by_character_only is True, split the string by character only, when split_by_character_only: if split_by_character_only is True, split the string by character only, when
split_by_character is None, this parameter is ignored. split_by_character is None, this parameter is ignored.
ids: single string of the document ID or list of unique document IDs, if not provided, MD5 hash IDs will be generated ids: single string of the document ID or list of unique document IDs, if not provided, MD5 hash IDs will be generated
file_paths: single string of the file path or list of file paths, used for citation
""" """
loop = always_get_an_event_loop() loop = always_get_an_event_loop()
loop.run_until_complete( loop.run_until_complete(
self.ainsert(input, split_by_character, split_by_character_only, ids) self.ainsert(
input, split_by_character, split_by_character_only, ids, file_paths
)
) )
async def ainsert( async def ainsert(
@@ -569,6 +574,7 @@ class LightRAG:
split_by_character: str | None = None, split_by_character: str | None = None,
split_by_character_only: bool = False, split_by_character_only: bool = False,
ids: str | list[str] | None = None, ids: str | list[str] | None = None,
file_paths: str | list[str] | None = None,
) -> None: ) -> None:
"""Async Insert documents with checkpoint support """Async Insert documents with checkpoint support
@@ -579,8 +585,9 @@ class LightRAG:
split_by_character_only: if split_by_character_only is True, split the string by character only, when split_by_character_only: if split_by_character_only is True, split the string by character only, when
split_by_character is None, this parameter is ignored. split_by_character is None, this parameter is ignored.
ids: list of unique document IDs, if not provided, MD5 hash IDs will be generated ids: list of unique document IDs, if not provided, MD5 hash IDs will be generated
file_paths: list of file paths corresponding to each document, used for citation
""" """
await self.apipeline_enqueue_documents(input, ids) await self.apipeline_enqueue_documents(input, ids, file_paths)
await self.apipeline_process_enqueue_documents( await self.apipeline_process_enqueue_documents(
split_by_character, split_by_character_only split_by_character, split_by_character_only
) )
@@ -654,7 +661,10 @@ class LightRAG:
await self._insert_done() await self._insert_done()
async def apipeline_enqueue_documents( async def apipeline_enqueue_documents(
self, input: str | list[str], ids: list[str] | None = None self,
input: str | list[str],
ids: list[str] | None = None,
file_paths: str | list[str] | None = None,
) -> None: ) -> None:
""" """
Pipeline for Processing Documents Pipeline for Processing Documents
@@ -664,11 +674,30 @@ class LightRAG:
3. Generate document initial status 3. Generate document initial status
4. Filter out already processed documents 4. Filter out already processed documents
5. Enqueue document in status 5. Enqueue document in status
Args:
input: Single document string or list of document strings
ids: list of unique document IDs, if not provided, MD5 hash IDs will be generated
file_paths: list of file paths corresponding to each document, used for citation
""" """
if isinstance(input, str): if isinstance(input, str):
input = [input] input = [input]
if isinstance(ids, str): if isinstance(ids, str):
ids = [ids] ids = [ids]
if isinstance(file_paths, str):
file_paths = [file_paths]
# If file_paths is provided, ensure it matches the number of documents
if file_paths is not None:
if isinstance(file_paths, str):
file_paths = [file_paths]
if len(file_paths) != len(input):
raise ValueError(
"Number of file paths must match the number of documents"
)
else:
# If no file paths provided, use placeholder
file_paths = ["unknown_source"] * len(input)
# 1. Validate ids if provided or generate MD5 hash IDs # 1. Validate ids if provided or generate MD5 hash IDs
if ids is not None: if ids is not None:
@@ -681,32 +710,59 @@ class LightRAG:
raise ValueError("IDs must be unique") raise ValueError("IDs must be unique")
# Generate contents dict of IDs provided by user and documents # Generate contents dict of IDs provided by user and documents
contents = {id_: doc for id_, doc in zip(ids, input)} contents = {
id_: {"content": doc, "file_path": path}
for id_, doc, path in zip(ids, input, file_paths)
}
else: else:
# Clean input text and remove duplicates # Clean input text and remove duplicates
input = list(set(clean_text(doc) for doc in input)) cleaned_input = [
# Generate contents dict of MD5 hash IDs and documents (clean_text(doc), path) for doc, path in zip(input, file_paths)
contents = {compute_mdhash_id(doc, prefix="doc-"): doc for doc in input} ]
unique_content_with_paths = {}
# Keep track of unique content and their paths
for content, path in cleaned_input:
if content not in unique_content_with_paths:
unique_content_with_paths[content] = path
# Generate contents dict of MD5 hash IDs and documents with paths
contents = {
compute_mdhash_id(content, prefix="doc-"): {
"content": content,
"file_path": path,
}
for content, path in unique_content_with_paths.items()
}
# 2. Remove duplicate contents # 2. Remove duplicate contents
unique_contents = { unique_contents = {}
id_: content for id_, content_data in contents.items():
for content, id_ in { content = content_data["content"]
content: id_ for id_, content in contents.items() file_path = content_data["file_path"]
}.items() if content not in unique_contents:
unique_contents[content] = (id_, file_path)
# Reconstruct contents with unique content
contents = {
id_: {"content": content, "file_path": file_path}
for content, (id_, file_path) in unique_contents.items()
} }
# 3. Generate document initial status # 3. Generate document initial status
new_docs: dict[str, Any] = { new_docs: dict[str, Any] = {
id_: { id_: {
"content": content,
"content_summary": get_content_summary(content),
"content_length": len(content),
"status": DocStatus.PENDING, "status": DocStatus.PENDING,
"content": content_data["content"],
"content_summary": get_content_summary(content_data["content"]),
"content_length": len(content_data["content"]),
"created_at": datetime.now().isoformat(), "created_at": datetime.now().isoformat(),
"updated_at": datetime.now().isoformat(), "updated_at": datetime.now().isoformat(),
"file_path": content_data[
"file_path"
], # Store file path in document status
} }
for id_, content in unique_contents.items() for id_, content_data in contents.items()
} }
# 4. Filter out already processed documents # 4. Filter out already processed documents
@@ -841,11 +897,15 @@ class LightRAG:
) -> None: ) -> None:
"""Process single document""" """Process single document"""
try: try:
# Get file path from status document
file_path = getattr(status_doc, "file_path", "unknown_source")
# Generate chunks from document # Generate chunks from document
chunks: dict[str, Any] = { chunks: dict[str, Any] = {
compute_mdhash_id(dp["content"], prefix="chunk-"): { compute_mdhash_id(dp["content"], prefix="chunk-"): {
**dp, **dp,
"full_doc_id": doc_id, "full_doc_id": doc_id,
"file_path": file_path, # Add file path to each chunk
} }
for dp in self.chunking_func( for dp in self.chunking_func(
status_doc.content, status_doc.content,
@@ -856,6 +916,7 @@ class LightRAG:
self.tiktoken_model_name, self.tiktoken_model_name,
) )
} }
# Process document (text chunks and full docs) in parallel # Process document (text chunks and full docs) in parallel
# Create tasks with references for potential cancellation # Create tasks with references for potential cancellation
doc_status_task = asyncio.create_task( doc_status_task = asyncio.create_task(
@@ -863,11 +924,13 @@ class LightRAG:
{ {
doc_id: { doc_id: {
"status": DocStatus.PROCESSING, "status": DocStatus.PROCESSING,
"updated_at": datetime.now().isoformat(), "chunks_count": len(chunks),
"content": status_doc.content, "content": status_doc.content,
"content_summary": status_doc.content_summary, "content_summary": status_doc.content_summary,
"content_length": status_doc.content_length, "content_length": status_doc.content_length,
"created_at": status_doc.created_at, "created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(),
"file_path": file_path,
} }
} }
) )
@@ -906,6 +969,7 @@ class LightRAG:
"content_length": status_doc.content_length, "content_length": status_doc.content_length,
"created_at": status_doc.created_at, "created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(), "updated_at": datetime.now().isoformat(),
"file_path": file_path,
} }
} }
) )
@@ -937,6 +1001,7 @@ class LightRAG:
"content_length": status_doc.content_length, "content_length": status_doc.content_length,
"created_at": status_doc.created_at, "created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(), "updated_at": datetime.now().isoformat(),
"file_path": file_path,
} }
} }
) )
@@ -1063,7 +1128,10 @@ class LightRAG:
loop.run_until_complete(self.ainsert_custom_kg(custom_kg, full_doc_id)) loop.run_until_complete(self.ainsert_custom_kg(custom_kg, full_doc_id))
async def ainsert_custom_kg( async def ainsert_custom_kg(
self, custom_kg: dict[str, Any], full_doc_id: str = None self,
custom_kg: dict[str, Any],
full_doc_id: str = None,
file_path: str = "custom_kg",
) -> None: ) -> None:
update_storage = False update_storage = False
try: try:
@@ -1093,6 +1161,7 @@ class LightRAG:
"full_doc_id": full_doc_id "full_doc_id": full_doc_id
if full_doc_id is not None if full_doc_id is not None
else source_id, else source_id,
"file_path": file_path, # Add file path
"status": DocStatus.PROCESSED, "status": DocStatus.PROCESSED,
} }
all_chunks_data[chunk_id] = chunk_entry all_chunks_data[chunk_id] = chunk_entry
@@ -1197,6 +1266,7 @@ class LightRAG:
"source_id": dp["source_id"], "source_id": dp["source_id"],
"description": dp["description"], "description": dp["description"],
"entity_type": dp["entity_type"], "entity_type": dp["entity_type"],
"file_path": file_path, # Add file path
} }
for dp in all_entities_data for dp in all_entities_data
} }
@@ -1212,6 +1282,7 @@ class LightRAG:
"keywords": dp["keywords"], "keywords": dp["keywords"],
"description": dp["description"], "description": dp["description"],
"weight": dp["weight"], "weight": dp["weight"],
"file_path": file_path, # Add file path
} }
for dp in all_relationships_data for dp in all_relationships_data
} }
@@ -1473,8 +1544,7 @@ class LightRAG:
""" """
try: try:
# 1. Get the document status and related data # 1. Get the document status and related data
doc_status = await self.doc_status.get_by_id(doc_id) if not await self.doc_status.get_by_id(doc_id):
if not doc_status:
logger.warning(f"Document {doc_id} not found") logger.warning(f"Document {doc_id} not found")
return return
@@ -1877,6 +1947,8 @@ class LightRAG:
# 2. Update entity information in the graph # 2. Update entity information in the graph
new_node_data = {**node_data, **updated_data} new_node_data = {**node_data, **updated_data}
new_node_data["entity_id"] = new_entity_name
if "entity_name" in new_node_data: if "entity_name" in new_node_data:
del new_node_data[ del new_node_data[
"entity_name" "entity_name"
@@ -1893,7 +1965,7 @@ class LightRAG:
# Store relationships that need to be updated # Store relationships that need to be updated
relations_to_update = [] relations_to_update = []
relations_to_delete = []
# Get all edges related to the original entity # Get all edges related to the original entity
edges = await self.chunk_entity_relation_graph.get_node_edges( edges = await self.chunk_entity_relation_graph.get_node_edges(
entity_name entity_name
@@ -1905,6 +1977,12 @@ class LightRAG:
source, target source, target
) )
if edge_data: if edge_data:
relations_to_delete.append(
compute_mdhash_id(source + target, prefix="rel-")
)
relations_to_delete.append(
compute_mdhash_id(target + source, prefix="rel-")
)
if source == entity_name: if source == entity_name:
await self.chunk_entity_relation_graph.upsert_edge( await self.chunk_entity_relation_graph.upsert_edge(
new_entity_name, target, edge_data new_entity_name, target, edge_data
@@ -1930,6 +2008,12 @@ class LightRAG:
f"Deleted old entity '{entity_name}' and its vector embedding from database" f"Deleted old entity '{entity_name}' and its vector embedding from database"
) )
# Delete old relation records from vector database
await self.relationships_vdb.delete(relations_to_delete)
logger.info(
f"Deleted {len(relations_to_delete)} relation records for entity '{entity_name}' from vector database"
)
# Update relationship vector representations # Update relationship vector representations
for src, tgt, edge_data in relations_to_update: for src, tgt, edge_data in relations_to_update:
description = edge_data.get("description", "") description = edge_data.get("description", "")
@@ -2220,7 +2304,6 @@ class LightRAG:
"""Synchronously create a new entity. """Synchronously create a new entity.
Creates a new entity in the knowledge graph and adds it to the vector database. Creates a new entity in the knowledge graph and adds it to the vector database.
Args: Args:
entity_name: Name of the new entity entity_name: Name of the new entity
entity_data: Dictionary containing entity attributes, e.g. {"description": "description", "entity_type": "type"} entity_data: Dictionary containing entity attributes, e.g. {"description": "description", "entity_type": "type"}
@@ -2429,39 +2512,21 @@ class LightRAG:
# 4. Get all relationships of the source entities # 4. Get all relationships of the source entities
all_relations = [] all_relations = []
for entity_name in source_entities: for entity_name in source_entities:
# Get all relationships where this entity is the source # Get all relationships of the source entities
outgoing_edges = await self.chunk_entity_relation_graph.get_node_edges( edges = await self.chunk_entity_relation_graph.get_node_edges(
entity_name entity_name
) )
if outgoing_edges: if edges:
for src, tgt in outgoing_edges: for src, tgt in edges:
# Ensure src is the current entity # Ensure src is the current entity
if src == entity_name: if src == entity_name:
edge_data = await self.chunk_entity_relation_graph.get_edge( edge_data = await self.chunk_entity_relation_graph.get_edge(
src, tgt src, tgt
) )
all_relations.append(("outgoing", src, tgt, edge_data)) all_relations.append((src, tgt, edge_data))
# Get all relationships where this entity is the target
incoming_edges = []
all_labels = await self.chunk_entity_relation_graph.get_all_labels()
for label in all_labels:
if label == entity_name:
continue
node_edges = await self.chunk_entity_relation_graph.get_node_edges(
label
)
for src, tgt in node_edges or []:
if tgt == entity_name:
incoming_edges.append((src, tgt))
for src, tgt in incoming_edges:
edge_data = await self.chunk_entity_relation_graph.get_edge(
src, tgt
)
all_relations.append(("incoming", src, tgt, edge_data))
# 5. Create or update the target entity # 5. Create or update the target entity
merged_entity_data["entity_id"] = target_entity
if not target_exists: if not target_exists:
await self.chunk_entity_relation_graph.upsert_node( await self.chunk_entity_relation_graph.upsert_node(
target_entity, merged_entity_data target_entity, merged_entity_data
@@ -2475,8 +2540,11 @@ class LightRAG:
# 6. Recreate all relationships, pointing to the target entity # 6. Recreate all relationships, pointing to the target entity
relation_updates = {} # Track relationships that need to be merged relation_updates = {} # Track relationships that need to be merged
relations_to_delete = []
for rel_type, src, tgt, edge_data in all_relations: for src, tgt, edge_data in all_relations:
relations_to_delete.append(compute_mdhash_id(src + tgt, prefix="rel-"))
relations_to_delete.append(compute_mdhash_id(tgt + src, prefix="rel-"))
new_src = target_entity if src in source_entities else src new_src = target_entity if src in source_entities else src
new_tgt = target_entity if tgt in source_entities else tgt new_tgt = target_entity if tgt in source_entities else tgt
@@ -2521,6 +2589,12 @@ class LightRAG:
f"Created or updated relationship: {rel_data['src']} -> {rel_data['tgt']}" f"Created or updated relationship: {rel_data['src']} -> {rel_data['tgt']}"
) )
# Delete relationships records from vector database
await self.relationships_vdb.delete(relations_to_delete)
logger.info(
f"Deleted {len(relations_to_delete)} relation records for entity '{entity_name}' from vector database"
)
# 7. Update entity vector representation # 7. Update entity vector representation
description = merged_entity_data.get("description", "") description = merged_entity_data.get("description", "")
source_id = merged_entity_data.get("source_id", "") source_id = merged_entity_data.get("source_id", "")
@@ -2583,19 +2657,6 @@ class LightRAG:
entity_id = compute_mdhash_id(entity_name, prefix="ent-") entity_id = compute_mdhash_id(entity_name, prefix="ent-")
await self.entities_vdb.delete([entity_id]) await self.entities_vdb.delete([entity_id])
# Also ensure any relationships specific to this entity are deleted from vector DB
# This is a safety check, as these should have been transformed to the target entity already
entity_relation_prefix = compute_mdhash_id(entity_name, prefix="rel-")
relations_with_entity = await self.relationships_vdb.search_by_prefix(
entity_relation_prefix
)
if relations_with_entity:
relation_ids = [r["id"] for r in relations_with_entity]
await self.relationships_vdb.delete(relation_ids)
logger.info(
f"Deleted {len(relation_ids)} relation records for entity '{entity_name}' from vector database"
)
logger.info( logger.info(
f"Deleted source entity '{entity_name}' and its vector embedding from database" f"Deleted source entity '{entity_name}' and its vector embedding from database"
) )

View File

@@ -138,16 +138,31 @@ async def hf_model_complete(
async def hf_embed(texts: list[str], tokenizer, embed_model) -> np.ndarray: async def hf_embed(texts: list[str], tokenizer, embed_model) -> np.ndarray:
device = next(embed_model.parameters()).device # Detect the appropriate device
if torch.cuda.is_available():
device = next(embed_model.parameters()).device # Use CUDA if available
elif torch.backends.mps.is_available():
device = torch.device("mps") # Use MPS for Apple Silicon
else:
device = torch.device("cpu") # Fallback to CPU
# Move the model to the detected device
embed_model = embed_model.to(device)
# Tokenize the input texts and move them to the same device
encoded_texts = tokenizer( encoded_texts = tokenizer(
texts, return_tensors="pt", padding=True, truncation=True texts, return_tensors="pt", padding=True, truncation=True
).to(device) ).to(device)
# Perform inference
with torch.no_grad(): with torch.no_grad():
outputs = embed_model( outputs = embed_model(
input_ids=encoded_texts["input_ids"], input_ids=encoded_texts["input_ids"],
attention_mask=encoded_texts["attention_mask"], attention_mask=encoded_texts["attention_mask"],
) )
embeddings = outputs.last_hidden_state.mean(dim=1) embeddings = outputs.last_hidden_state.mean(dim=1)
# Convert embeddings to NumPy
if embeddings.dtype == torch.bfloat16: if embeddings.dtype == torch.bfloat16:
return embeddings.detach().to(torch.float32).cpu().numpy() return embeddings.detach().to(torch.float32).cpu().numpy()
else: else:

View File

@@ -138,6 +138,7 @@ async def _handle_entity_relation_summary(
async def _handle_single_entity_extraction( async def _handle_single_entity_extraction(
record_attributes: list[str], record_attributes: list[str],
chunk_key: str, chunk_key: str,
file_path: str = "unknown_source",
): ):
if len(record_attributes) < 4 or record_attributes[0] != '"entity"': if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
return None return None
@@ -171,13 +172,14 @@ async def _handle_single_entity_extraction(
entity_type=entity_type, entity_type=entity_type,
description=entity_description, description=entity_description,
source_id=chunk_key, source_id=chunk_key,
metadata={"created_at": time.time()}, file_path=file_path,
) )
async def _handle_single_relationship_extraction( async def _handle_single_relationship_extraction(
record_attributes: list[str], record_attributes: list[str],
chunk_key: str, chunk_key: str,
file_path: str = "unknown_source",
): ):
if len(record_attributes) < 5 or record_attributes[0] != '"relationship"': if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
return None return None
@@ -199,7 +201,7 @@ async def _handle_single_relationship_extraction(
description=edge_description, description=edge_description,
keywords=edge_keywords, keywords=edge_keywords,
source_id=edge_source_id, source_id=edge_source_id,
metadata={"created_at": time.time()}, file_path=file_path,
) )
@@ -213,6 +215,7 @@ async def _merge_nodes_then_upsert(
already_entity_types = [] already_entity_types = []
already_source_ids = [] already_source_ids = []
already_description = [] already_description = []
already_file_paths = []
already_node = await knowledge_graph_inst.get_node(entity_name) already_node = await knowledge_graph_inst.get_node(entity_name)
if already_node is not None: if already_node is not None:
@@ -220,6 +223,9 @@ async def _merge_nodes_then_upsert(
already_source_ids.extend( already_source_ids.extend(
split_string_by_multi_markers(already_node["source_id"], [GRAPH_FIELD_SEP]) split_string_by_multi_markers(already_node["source_id"], [GRAPH_FIELD_SEP])
) )
already_file_paths.extend(
split_string_by_multi_markers(already_node["file_path"], [GRAPH_FIELD_SEP])
)
already_description.append(already_node["description"]) already_description.append(already_node["description"])
entity_type = sorted( entity_type = sorted(
@@ -235,6 +241,11 @@ async def _merge_nodes_then_upsert(
source_id = GRAPH_FIELD_SEP.join( source_id = GRAPH_FIELD_SEP.join(
set([dp["source_id"] for dp in nodes_data] + already_source_ids) set([dp["source_id"] for dp in nodes_data] + already_source_ids)
) )
file_path = GRAPH_FIELD_SEP.join(
set([dp["file_path"] for dp in nodes_data] + already_file_paths)
)
logger.debug(f"file_path: {file_path}")
description = await _handle_entity_relation_summary( description = await _handle_entity_relation_summary(
entity_name, description, global_config entity_name, description, global_config
) )
@@ -243,6 +254,7 @@ async def _merge_nodes_then_upsert(
entity_type=entity_type, entity_type=entity_type,
description=description, description=description,
source_id=source_id, source_id=source_id,
file_path=file_path,
) )
await knowledge_graph_inst.upsert_node( await knowledge_graph_inst.upsert_node(
entity_name, entity_name,
@@ -263,6 +275,7 @@ async def _merge_edges_then_upsert(
already_source_ids = [] already_source_ids = []
already_description = [] already_description = []
already_keywords = [] already_keywords = []
already_file_paths = []
if await knowledge_graph_inst.has_edge(src_id, tgt_id): if await knowledge_graph_inst.has_edge(src_id, tgt_id):
already_edge = await knowledge_graph_inst.get_edge(src_id, tgt_id) already_edge = await knowledge_graph_inst.get_edge(src_id, tgt_id)
@@ -279,6 +292,14 @@ async def _merge_edges_then_upsert(
) )
) )
# Get file_path with empty string default if missing or None
if already_edge.get("file_path") is not None:
already_file_paths.extend(
split_string_by_multi_markers(
already_edge["file_path"], [GRAPH_FIELD_SEP]
)
)
# Get description with empty string default if missing or None # Get description with empty string default if missing or None
if already_edge.get("description") is not None: if already_edge.get("description") is not None:
already_description.append(already_edge["description"]) already_description.append(already_edge["description"])
@@ -315,6 +336,12 @@ async def _merge_edges_then_upsert(
+ already_source_ids + already_source_ids
) )
) )
file_path = GRAPH_FIELD_SEP.join(
set(
[dp["file_path"] for dp in edges_data if dp.get("file_path")]
+ already_file_paths
)
)
for need_insert_id in [src_id, tgt_id]: for need_insert_id in [src_id, tgt_id]:
if not (await knowledge_graph_inst.has_node(need_insert_id)): if not (await knowledge_graph_inst.has_node(need_insert_id)):
@@ -325,6 +352,7 @@ async def _merge_edges_then_upsert(
"source_id": source_id, "source_id": source_id,
"description": description, "description": description,
"entity_type": "UNKNOWN", "entity_type": "UNKNOWN",
"file_path": file_path,
}, },
) )
description = await _handle_entity_relation_summary( description = await _handle_entity_relation_summary(
@@ -338,6 +366,7 @@ async def _merge_edges_then_upsert(
description=description, description=description,
keywords=keywords, keywords=keywords,
source_id=source_id, source_id=source_id,
file_path=file_path,
), ),
) )
@@ -347,6 +376,7 @@ async def _merge_edges_then_upsert(
description=description, description=description,
keywords=keywords, keywords=keywords,
source_id=source_id, source_id=source_id,
file_path=file_path,
) )
return edge_data return edge_data
@@ -456,11 +486,14 @@ async def extract_entities(
else: else:
return await use_llm_func(input_text) return await use_llm_func(input_text)
async def _process_extraction_result(result: str, chunk_key: str): async def _process_extraction_result(
result: str, chunk_key: str, file_path: str = "unknown_source"
):
"""Process a single extraction result (either initial or gleaning) """Process a single extraction result (either initial or gleaning)
Args: Args:
result (str): The extraction result to process result (str): The extraction result to process
chunk_key (str): The chunk key for source tracking chunk_key (str): The chunk key for source tracking
file_path (str): The file path for citation
Returns: Returns:
tuple: (nodes_dict, edges_dict) containing the extracted entities and relationships tuple: (nodes_dict, edges_dict) containing the extracted entities and relationships
""" """
@@ -482,14 +515,14 @@ async def extract_entities(
) )
if_entities = await _handle_single_entity_extraction( if_entities = await _handle_single_entity_extraction(
record_attributes, chunk_key record_attributes, chunk_key, file_path
) )
if if_entities is not None: if if_entities is not None:
maybe_nodes[if_entities["entity_name"]].append(if_entities) maybe_nodes[if_entities["entity_name"]].append(if_entities)
continue continue
if_relation = await _handle_single_relationship_extraction( if_relation = await _handle_single_relationship_extraction(
record_attributes, chunk_key record_attributes, chunk_key, file_path
) )
if if_relation is not None: if if_relation is not None:
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append( maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(
@@ -508,6 +541,8 @@ async def extract_entities(
chunk_key = chunk_key_dp[0] chunk_key = chunk_key_dp[0]
chunk_dp = chunk_key_dp[1] chunk_dp = chunk_key_dp[1]
content = chunk_dp["content"] content = chunk_dp["content"]
# Get file path from chunk data or use default
file_path = chunk_dp.get("file_path", "unknown_source")
# Get initial extraction # Get initial extraction
hint_prompt = entity_extract_prompt.format( hint_prompt = entity_extract_prompt.format(
@@ -517,9 +552,9 @@ async def extract_entities(
final_result = await _user_llm_func_with_cache(hint_prompt) final_result = await _user_llm_func_with_cache(hint_prompt)
history = pack_user_ass_to_openai_messages(hint_prompt, final_result) history = pack_user_ass_to_openai_messages(hint_prompt, final_result)
# Process initial extraction # Process initial extraction with file path
maybe_nodes, maybe_edges = await _process_extraction_result( maybe_nodes, maybe_edges = await _process_extraction_result(
final_result, chunk_key final_result, chunk_key, file_path
) )
# Process additional gleaning results # Process additional gleaning results
@@ -530,9 +565,9 @@ async def extract_entities(
history += pack_user_ass_to_openai_messages(continue_prompt, glean_result) history += pack_user_ass_to_openai_messages(continue_prompt, glean_result)
# Process gleaning result separately # Process gleaning result separately with file path
glean_nodes, glean_edges = await _process_extraction_result( glean_nodes, glean_edges = await _process_extraction_result(
glean_result, chunk_key glean_result, chunk_key, file_path
) )
# Merge results # Merge results
@@ -637,9 +672,7 @@ async def extract_entities(
"entity_type": dp["entity_type"], "entity_type": dp["entity_type"],
"content": f"{dp['entity_name']}\n{dp['description']}", "content": f"{dp['entity_name']}\n{dp['description']}",
"source_id": dp["source_id"], "source_id": dp["source_id"],
"metadata": { "file_path": dp.get("file_path", "unknown_source"),
"created_at": dp.get("metadata", {}).get("created_at", time.time())
},
} }
for dp in all_entities_data for dp in all_entities_data
} }
@@ -653,9 +686,7 @@ async def extract_entities(
"keywords": dp["keywords"], "keywords": dp["keywords"],
"content": f"{dp['src_id']}\t{dp['tgt_id']}\n{dp['keywords']}\n{dp['description']}", "content": f"{dp['src_id']}\t{dp['tgt_id']}\n{dp['keywords']}\n{dp['description']}",
"source_id": dp["source_id"], "source_id": dp["source_id"],
"metadata": { "file_path": dp.get("file_path", "unknown_source"),
"created_at": dp.get("metadata", {}).get("created_at", time.time())
},
} }
for dp in all_relationships_data for dp in all_relationships_data
} }
@@ -1232,12 +1263,17 @@ async def _get_node_data(
"description", "description",
"rank", "rank",
"created_at", "created_at",
"file_path",
] ]
] ]
for i, n in enumerate(node_datas): for i, n in enumerate(node_datas):
created_at = n.get("created_at", "UNKNOWN") created_at = n.get("created_at", "UNKNOWN")
if isinstance(created_at, (int, float)): if isinstance(created_at, (int, float)):
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at)) created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
# Get file path from node data
file_path = n.get("file_path", "unknown_source")
entites_section_list.append( entites_section_list.append(
[ [
i, i,
@@ -1246,6 +1282,7 @@ async def _get_node_data(
n.get("description", "UNKNOWN"), n.get("description", "UNKNOWN"),
n["rank"], n["rank"],
created_at, created_at,
file_path,
] ]
) )
entities_context = list_of_list_to_csv(entites_section_list) entities_context = list_of_list_to_csv(entites_section_list)
@@ -1260,6 +1297,7 @@ async def _get_node_data(
"weight", "weight",
"rank", "rank",
"created_at", "created_at",
"file_path",
] ]
] ]
for i, e in enumerate(use_relations): for i, e in enumerate(use_relations):
@@ -1267,6 +1305,10 @@ async def _get_node_data(
# Convert timestamp to readable format # Convert timestamp to readable format
if isinstance(created_at, (int, float)): if isinstance(created_at, (int, float)):
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at)) created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
# Get file path from edge data
file_path = e.get("file_path", "unknown_source")
relations_section_list.append( relations_section_list.append(
[ [
i, i,
@@ -1277,6 +1319,7 @@ async def _get_node_data(
e["weight"], e["weight"],
e["rank"], e["rank"],
created_at, created_at,
file_path,
] ]
) )
relations_context = list_of_list_to_csv(relations_section_list) relations_context = list_of_list_to_csv(relations_section_list)
@@ -1492,6 +1535,7 @@ async def _get_edge_data(
"weight", "weight",
"rank", "rank",
"created_at", "created_at",
"file_path",
] ]
] ]
for i, e in enumerate(edge_datas): for i, e in enumerate(edge_datas):
@@ -1499,6 +1543,10 @@ async def _get_edge_data(
# Convert timestamp to readable format # Convert timestamp to readable format
if isinstance(created_at, (int, float)): if isinstance(created_at, (int, float)):
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at)) created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
# Get file path from edge data
file_path = e.get("file_path", "unknown_source")
relations_section_list.append( relations_section_list.append(
[ [
i, i,
@@ -1509,16 +1557,23 @@ async def _get_edge_data(
e["weight"], e["weight"],
e["rank"], e["rank"],
created_at, created_at,
file_path,
] ]
) )
relations_context = list_of_list_to_csv(relations_section_list) relations_context = list_of_list_to_csv(relations_section_list)
entites_section_list = [["id", "entity", "type", "description", "rank"]] entites_section_list = [
["id", "entity", "type", "description", "rank", "created_at", "file_path"]
]
for i, n in enumerate(use_entities): for i, n in enumerate(use_entities):
created_at = e.get("created_at", "Unknown") created_at = n.get("created_at", "Unknown")
# Convert timestamp to readable format # Convert timestamp to readable format
if isinstance(created_at, (int, float)): if isinstance(created_at, (int, float)):
created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at)) created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(created_at))
# Get file path from node data
file_path = n.get("file_path", "unknown_source")
entites_section_list.append( entites_section_list.append(
[ [
i, i,
@@ -1527,6 +1582,7 @@ async def _get_edge_data(
n.get("description", "UNKNOWN"), n.get("description", "UNKNOWN"),
n["rank"], n["rank"],
created_at, created_at,
file_path,
] ]
) )
entities_context = list_of_list_to_csv(entites_section_list) entities_context = list_of_list_to_csv(entites_section_list)
@@ -1882,13 +1938,14 @@ async def kg_query_with_keywords(
len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt)) len_of_prompts = len(encode_string_by_tiktoken(query + sys_prompt))
logger.debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}") logger.debug(f"[kg_query_with_keywords]Prompt Tokens: {len_of_prompts}")
# 6. Generate response
response = await use_model_func( response = await use_model_func(
query, query,
system_prompt=sys_prompt, system_prompt=sys_prompt,
stream=query_param.stream, stream=query_param.stream,
) )
# 清理响应内容 # Clean up response content
if isinstance(response, str) and len(response) > len(sys_prompt): if isinstance(response, str) and len(response) > len(sys_prompt):
response = ( response = (
response.replace(sys_prompt, "") response.replace(sys_prompt, "")

View File

@@ -61,7 +61,7 @@ Text:
``` ```
while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order. while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order.
Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us. Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. "If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us."
The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce. The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce.
@@ -92,7 +92,7 @@ Among the hardest hit, Nexon Technologies saw its stock plummet by 7.8% after re
Meanwhile, commodity markets reflected a mixed sentiment. Gold futures rose by 1.5%, reaching $2,080 per ounce, as investors sought safe-haven assets. Crude oil prices continued their rally, climbing to $87.60 per barrel, supported by supply constraints and strong demand. Meanwhile, commodity markets reflected a mixed sentiment. Gold futures rose by 1.5%, reaching $2,080 per ounce, as investors sought safe-haven assets. Crude oil prices continued their rally, climbing to $87.60 per barrel, supported by supply constraints and strong demand.
Financial experts are closely watching the Federal Reserves next move, as speculation grows over potential rate hikes. The upcoming policy announcement is expected to influence investor confidence and overall market stability. Financial experts are closely watching the Federal Reserve's next move, as speculation grows over potential rate hikes. The upcoming policy announcement is expected to influence investor confidence and overall market stability.
``` ```
Output: Output:
@@ -222,6 +222,7 @@ When handling relationships with timestamps:
- Use markdown formatting with appropriate section headings - Use markdown formatting with appropriate section headings
- Please respond in the same language as the user's question. - Please respond in the same language as the user's question.
- Ensure the response maintains continuity with the conversation history. - Ensure the response maintains continuity with the conversation history.
- List up to 5 most important reference sources at the end under "References" section. Clearly indicating whether each source is from Knowledge Graph (KG) or Vector Data (DC), and include the file path if available, in the following format: [KG/DC] Source content (File: file_path)
- If you don't know the answer, just say so. - If you don't know the answer, just say so.
- Do not make anything up. Do not include information not provided by the Knowledge Base.""" - Do not make anything up. Do not include information not provided by the Knowledge Base."""
@@ -319,6 +320,7 @@ When handling content with timestamps:
- Use markdown formatting with appropriate section headings - Use markdown formatting with appropriate section headings
- Please respond in the same language as the user's question. - Please respond in the same language as the user's question.
- Ensure the response maintains continuity with the conversation history. - Ensure the response maintains continuity with the conversation history.
- List up to 5 most important reference sources at the end under "References" section. Clearly indicating whether each source is from Knowledge Graph (KG) or Vector Data (DC), and include the file path if available, in the following format: [KG/DC] Source content (File: file_path)
- If you don't know the answer, just say so. - If you don't know the answer, just say so.
- Do not include information not provided by the Document Chunks.""" - Do not include information not provided by the Document Chunks."""
@@ -378,8 +380,8 @@ When handling information with timestamps:
- Use markdown formatting with appropriate section headings - Use markdown formatting with appropriate section headings
- Please respond in the same language as the user's question. - Please respond in the same language as the user's question.
- Ensure the response maintains continuity with the conversation history. - Ensure the response maintains continuity with the conversation history.
- Organize answer in sesctions focusing on one main point or aspect of the answer - Organize answer in sections focusing on one main point or aspect of the answer
- Use clear and descriptive section titles that reflect the content - Use clear and descriptive section titles that reflect the content
- List up to 5 most important reference sources at the end under "References" sesction. Clearly indicating whether each source is from Knowledge Graph (KG) or Vector Data (DC), in the following format: [KG/DC] Source content - List up to 5 most important reference sources at the end under "References" section. Clearly indicating whether each source is from Knowledge Graph (KG) or Vector Data (DC), and include the file path if available, in the following format: [KG/DC] Source content (File: file_path)
- If you don't know the answer, just say so. Do not make anything up. - If you don't know the answer, just say so. Do not make anything up.
- Do not include information not provided by the Data Sources.""" - Do not include information not provided by the Data Sources."""

View File

@@ -109,15 +109,17 @@ def setup_logger(
logger_name: str, logger_name: str,
level: str = "INFO", level: str = "INFO",
add_filter: bool = False, add_filter: bool = False,
log_file_path: str = None, log_file_path: str | None = None,
enable_file_logging: bool = True,
): ):
"""Set up a logger with console and file handlers """Set up a logger with console and optionally file handlers
Args: Args:
logger_name: Name of the logger to set up logger_name: Name of the logger to set up
level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
add_filter: Whether to add LightragPathFilter to the logger add_filter: Whether to add LightragPathFilter to the logger
log_file_path: Path to the log file. If None, will use current directory/lightrag.log log_file_path: Path to the log file. If None and file logging is enabled, defaults to lightrag.log in LOG_DIR or cwd
enable_file_logging: Whether to enable logging to a file (defaults to True)
""" """
# Configure formatters # Configure formatters
detailed_formatter = logging.Formatter( detailed_formatter = logging.Formatter(
@@ -125,18 +127,6 @@ def setup_logger(
) )
simple_formatter = logging.Formatter("%(levelname)s: %(message)s") simple_formatter = logging.Formatter("%(levelname)s: %(message)s")
# Get log file path
if log_file_path is None:
log_dir = os.getenv("LOG_DIR", os.getcwd())
log_file_path = os.path.abspath(os.path.join(log_dir, "lightrag.log"))
# Ensure log directory exists
os.makedirs(os.path.dirname(log_file_path), exist_ok=True)
# Get log file max size and backup count from environment variables
log_max_bytes = int(os.getenv("LOG_MAX_BYTES", 10485760)) # Default 10MB
log_backup_count = int(os.getenv("LOG_BACKUP_COUNT", 5)) # Default 5 backups
logger_instance = logging.getLogger(logger_name) logger_instance = logging.getLogger(logger_name)
logger_instance.setLevel(level) logger_instance.setLevel(level)
logger_instance.handlers = [] # Clear existing handlers logger_instance.handlers = [] # Clear existing handlers
@@ -148,6 +138,21 @@ def setup_logger(
console_handler.setLevel(level) console_handler.setLevel(level)
logger_instance.addHandler(console_handler) logger_instance.addHandler(console_handler)
# Add file handler by default unless explicitly disabled
if enable_file_logging:
# Get log file path
if log_file_path is None:
log_dir = os.getenv("LOG_DIR", os.getcwd())
log_file_path = os.path.abspath(os.path.join(log_dir, "lightrag.log"))
# Ensure log directory exists
os.makedirs(os.path.dirname(log_file_path), exist_ok=True)
# Get log file max size and backup count from environment variables
log_max_bytes = int(os.getenv("LOG_MAX_BYTES", 10485760)) # Default 10MB
log_backup_count = int(os.getenv("LOG_BACKUP_COUNT", 5)) # Default 5 backups
try:
# Add file handler # Add file handler
file_handler = logging.handlers.RotatingFileHandler( file_handler = logging.handlers.RotatingFileHandler(
filename=log_file_path, filename=log_file_path,
@@ -158,6 +163,9 @@ def setup_logger(
file_handler.setFormatter(detailed_formatter) file_handler.setFormatter(detailed_formatter)
file_handler.setLevel(level) file_handler.setLevel(level)
logger_instance.addHandler(file_handler) logger_instance.addHandler(file_handler)
except PermissionError as e:
logger.warning(f"Could not create log file at {log_file_path}: {str(e)}")
logger.warning("Continuing with console logging only")
# Add path filter if requested # Add path filter if requested
if add_filter: if add_filter:

View File

@@ -40,9 +40,11 @@
"react": "^19.0.0", "react": "^19.0.0",
"react-dom": "^19.0.0", "react-dom": "^19.0.0",
"react-dropzone": "^14.3.6", "react-dropzone": "^14.3.6",
"react-error-boundary": "^5.0.0",
"react-i18next": "^15.4.1", "react-i18next": "^15.4.1",
"react-markdown": "^9.1.0", "react-markdown": "^9.1.0",
"react-number-format": "^5.4.3", "react-number-format": "^5.4.3",
"react-router-dom": "^7.3.0",
"react-syntax-highlighter": "^15.6.1", "react-syntax-highlighter": "^15.6.1",
"rehype-react": "^8.0.0", "rehype-react": "^8.0.0",
"remark-gfm": "^4.0.1", "remark-gfm": "^4.0.1",
@@ -418,6 +420,8 @@
"@types/bun": ["@types/bun@1.2.3", "", { "dependencies": { "bun-types": "1.2.3" } }, "sha512-054h79ipETRfjtsCW9qJK8Ipof67Pw9bodFWmkfkaUaRiIQ1dIV2VTlheshlBx3mpKr0KeK8VqnMMCtgN9rQtw=="], "@types/bun": ["@types/bun@1.2.3", "", { "dependencies": { "bun-types": "1.2.3" } }, "sha512-054h79ipETRfjtsCW9qJK8Ipof67Pw9bodFWmkfkaUaRiIQ1dIV2VTlheshlBx3mpKr0KeK8VqnMMCtgN9rQtw=="],
"@types/cookie": ["@types/cookie@0.6.0", "https://registry.npmmirror.com/@types/cookie/-/cookie-0.6.0.tgz", {}, "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA=="],
"@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="], "@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="],
"@types/estree": ["@types/estree@1.0.6", "", {}, "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw=="], "@types/estree": ["@types/estree@1.0.6", "", {}, "sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw=="],
@@ -566,6 +570,8 @@
"convert-source-map": ["convert-source-map@1.9.0", "", {}, "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="], "convert-source-map": ["convert-source-map@1.9.0", "", {}, "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="],
"cookie": ["cookie@1.0.2", "https://registry.npmmirror.com/cookie/-/cookie-1.0.2.tgz", {}, "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA=="],
"cosmiconfig": ["cosmiconfig@7.1.0", "", { "dependencies": { "@types/parse-json": "^4.0.0", "import-fresh": "^3.2.1", "parse-json": "^5.0.0", "path-type": "^4.0.0", "yaml": "^1.10.0" } }, "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA=="], "cosmiconfig": ["cosmiconfig@7.1.0", "", { "dependencies": { "@types/parse-json": "^4.0.0", "import-fresh": "^3.2.1", "parse-json": "^5.0.0", "path-type": "^4.0.0", "yaml": "^1.10.0" } }, "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA=="],
"cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="],
@@ -1102,6 +1108,8 @@
"react-dropzone": ["react-dropzone@14.3.6", "", { "dependencies": { "attr-accept": "^2.2.4", "file-selector": "^2.1.0", "prop-types": "^15.8.1" }, "peerDependencies": { "react": ">= 16.8 || 18.0.0" } }, "sha512-U792j+x0rcwH/U/Slv/OBNU/LGFYbDLHKKiJoPhNaOianayZevCt4Y5S0CraPssH/6/wT6xhKDfzdXUgCBS0HQ=="], "react-dropzone": ["react-dropzone@14.3.6", "", { "dependencies": { "attr-accept": "^2.2.4", "file-selector": "^2.1.0", "prop-types": "^15.8.1" }, "peerDependencies": { "react": ">= 16.8 || 18.0.0" } }, "sha512-U792j+x0rcwH/U/Slv/OBNU/LGFYbDLHKKiJoPhNaOianayZevCt4Y5S0CraPssH/6/wT6xhKDfzdXUgCBS0HQ=="],
"react-error-boundary": ["react-error-boundary@5.0.0", "", { "dependencies": { "@babel/runtime": "^7.12.5" }, "peerDependencies": { "react": ">=16.13.1" } }, "sha512-tnjAxG+IkpLephNcePNA7v6F/QpWLH8He65+DmedchDwg162JZqx4NmbXj0mlAYVVEd81OW7aFhmbsScYfiAFQ=="],
"react-i18next": ["react-i18next@15.4.1", "", { "dependencies": { "@babel/runtime": "^7.25.0", "html-parse-stringify": "^3.0.1" }, "peerDependencies": { "i18next": ">= 23.2.3", "react": ">= 16.8.0" } }, "sha512-ahGab+IaSgZmNPYXdV1n+OYky95TGpFwnKRflX/16dY04DsYYKHtVLjeny7sBSCREEcoMbAgSkFiGLF5g5Oofw=="], "react-i18next": ["react-i18next@15.4.1", "", { "dependencies": { "@babel/runtime": "^7.25.0", "html-parse-stringify": "^3.0.1" }, "peerDependencies": { "i18next": ">= 23.2.3", "react": ">= 16.8.0" } }, "sha512-ahGab+IaSgZmNPYXdV1n+OYky95TGpFwnKRflX/16dY04DsYYKHtVLjeny7sBSCREEcoMbAgSkFiGLF5g5Oofw=="],
"react-is": ["react-is@16.13.1", "", {}, "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="], "react-is": ["react-is@16.13.1", "", {}, "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="],
@@ -1114,6 +1122,10 @@
"react-remove-scroll-bar": ["react-remove-scroll-bar@2.3.8", "", { "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react"] }, "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q=="], "react-remove-scroll-bar": ["react-remove-scroll-bar@2.3.8", "", { "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react"] }, "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q=="],
"react-router": ["react-router@7.3.0", "https://registry.npmmirror.com/react-router/-/react-router-7.3.0.tgz", { "dependencies": { "@types/cookie": "^0.6.0", "cookie": "^1.0.1", "set-cookie-parser": "^2.6.0", "turbo-stream": "2.4.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" }, "optionalPeers": ["react-dom"] }, "sha512-466f2W7HIWaNXTKM5nHTqNxLrHTyXybm7R0eBlVSt0k/u55tTCDO194OIx/NrYD4TS5SXKTNekXfT37kMKUjgw=="],
"react-router-dom": ["react-router-dom@7.3.0", "https://registry.npmmirror.com/react-router-dom/-/react-router-dom-7.3.0.tgz", { "dependencies": { "react-router": "7.3.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" } }, "sha512-z7Q5FTiHGgQfEurX/FBinkOXhWREJIAB2RiU24lvcBa82PxUpwqvs/PAXb9lJyPjTs2jrl6UkLvCZVGJPeNuuQ=="],
"react-select": ["react-select@5.10.0", "", { "dependencies": { "@babel/runtime": "^7.12.0", "@emotion/cache": "^11.4.0", "@emotion/react": "^11.8.1", "@floating-ui/dom": "^1.0.1", "@types/react-transition-group": "^4.4.0", "memoize-one": "^6.0.0", "prop-types": "^15.6.0", "react-transition-group": "^4.3.0", "use-isomorphic-layout-effect": "^1.2.0" }, "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-k96gw+i6N3ExgDwPIg0lUPmexl1ygPe6u5BdQFNBhkpbwroIgCNXdubtIzHfThYXYYTubwOBafoMnn7ruEP1xA=="], "react-select": ["react-select@5.10.0", "", { "dependencies": { "@babel/runtime": "^7.12.0", "@emotion/cache": "^11.4.0", "@emotion/react": "^11.8.1", "@floating-ui/dom": "^1.0.1", "@types/react-transition-group": "^4.4.0", "memoize-one": "^6.0.0", "prop-types": "^15.6.0", "react-transition-group": "^4.3.0", "use-isomorphic-layout-effect": "^1.2.0" }, "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-k96gw+i6N3ExgDwPIg0lUPmexl1ygPe6u5BdQFNBhkpbwroIgCNXdubtIzHfThYXYYTubwOBafoMnn7ruEP1xA=="],
"react-style-singleton": ["react-style-singleton@2.2.3", "", { "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ=="], "react-style-singleton": ["react-style-singleton@2.2.3", "", { "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ=="],
@@ -1164,6 +1176,8 @@
"semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], "semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="],
"set-cookie-parser": ["set-cookie-parser@2.7.1", "https://registry.npmmirror.com/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz", {}, "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ=="],
"set-function-length": ["set-function-length@1.2.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2" } }, "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg=="], "set-function-length": ["set-function-length@1.2.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "function-bind": "^1.1.2", "get-intrinsic": "^1.2.4", "gopd": "^1.0.1", "has-property-descriptors": "^1.0.2" } }, "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg=="],
"set-function-name": ["set-function-name@2.0.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "functions-have-names": "^1.2.3", "has-property-descriptors": "^1.0.2" } }, "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ=="], "set-function-name": ["set-function-name@2.0.2", "", { "dependencies": { "define-data-property": "^1.1.4", "es-errors": "^1.3.0", "functions-have-names": "^1.2.3", "has-property-descriptors": "^1.0.2" } }, "sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ=="],
@@ -1234,6 +1248,8 @@
"tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
"turbo-stream": ["turbo-stream@2.4.0", "https://registry.npmmirror.com/turbo-stream/-/turbo-stream-2.4.0.tgz", {}, "sha512-FHncC10WpBd2eOmGwpmQsWLDoK4cqsA/UT/GqNoaKOQnT8uzhtCbg3EoUDMvqpOSAI0S26mr0rkjzbOO6S3v1g=="],
"type-check": ["type-check@0.4.0", "", { "dependencies": { "prelude-ls": "^1.2.1" } }, "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew=="], "type-check": ["type-check@0.4.0", "", { "dependencies": { "prelude-ls": "^1.2.1" } }, "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew=="],
"typed-array-buffer": ["typed-array-buffer@1.0.3", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-typed-array": "^1.1.14" } }, "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw=="], "typed-array-buffer": ["typed-array-buffer@1.0.3", "", { "dependencies": { "call-bound": "^1.0.3", "es-errors": "^1.3.0", "is-typed-array": "^1.1.14" } }, "sha512-nAYYwfY3qnzX30IkA6AQZjVbtK6duGontcQm1WSG1MD94YLqK0515GNApXkoxKOWMusVssAHWLh9SeaoefYFGw=="],

View File

@@ -0,0 +1,2 @@
# Development environment configuration
VITE_BACKEND_URL=/api

View File

@@ -0,0 +1,3 @@
VITE_BACKEND_URL=http://localhost:9621
VITE_API_PROXY=true
VITE_API_ENDPOINTS=/,/api,/documents,/graphs,/graph,/health,/query,/docs,/openapi.json,/login,/auth-status

View File

@@ -5,7 +5,7 @@
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" /> <meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
<meta http-equiv="Pragma" content="no-cache" /> <meta http-equiv="Pragma" content="no-cache" />
<meta http-equiv="Expires" content="0" /> <meta http-equiv="Expires" content="0" />
<link rel="icon" type="image/svg+xml" href="/logo.png" /> <link rel="icon" type="image/svg+xml" href="logo.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Lightrag</title> <title>Lightrag</title>
</head> </head>

View File

@@ -49,9 +49,11 @@
"react": "^19.0.0", "react": "^19.0.0",
"react-dom": "^19.0.0", "react-dom": "^19.0.0",
"react-dropzone": "^14.3.6", "react-dropzone": "^14.3.6",
"react-error-boundary": "^5.0.0",
"react-i18next": "^15.4.1", "react-i18next": "^15.4.1",
"react-markdown": "^9.1.0", "react-markdown": "^9.1.0",
"react-number-format": "^5.4.3", "react-number-format": "^5.4.3",
"react-router-dom": "^7.3.0",
"react-syntax-highlighter": "^15.6.1", "react-syntax-highlighter": "^15.6.1",
"rehype-react": "^8.0.0", "rehype-react": "^8.0.0",
"remark-gfm": "^4.0.1", "remark-gfm": "^4.0.1",

View File

@@ -8,7 +8,6 @@ import { healthCheckInterval } from '@/lib/constants'
import { useBackendState } from '@/stores/state' import { useBackendState } from '@/stores/state'
import { useSettingsStore } from '@/stores/settings' import { useSettingsStore } from '@/stores/settings'
import { useEffect } from 'react' import { useEffect } from 'react'
import { Toaster } from 'sonner'
import SiteHeader from '@/features/SiteHeader' import SiteHeader from '@/features/SiteHeader'
import { InvalidApiKeyError, RequireApiKeError } from '@/api/lightrag' import { InvalidApiKeyError, RequireApiKeError } from '@/api/lightrag'
@@ -27,8 +26,6 @@ function App() {
// Health check // Health check
useEffect(() => { useEffect(() => {
if (!enableHealthCheck) return
// Check immediately // Check immediately
useBackendState.getState().check() useBackendState.getState().check()
@@ -56,24 +53,24 @@ function App() {
return ( return (
<ThemeProvider> <ThemeProvider>
<TabVisibilityProvider> <TabVisibilityProvider>
<main className="flex h-screen w-screen overflow-x-hidden"> <main className="flex h-screen w-screen overflow-hidden">
<Tabs <Tabs
defaultValue={currentTab} defaultValue={currentTab}
className="!m-0 flex grow flex-col !p-0" className="!m-0 flex grow flex-col !p-0 overflow-hidden"
onValueChange={handleTabChange} onValueChange={handleTabChange}
> >
<SiteHeader /> <SiteHeader />
<div className="relative grow"> <div className="relative grow">
<TabsContent value="documents" className="absolute top-0 right-0 bottom-0 left-0"> <TabsContent value="documents" className="absolute top-0 right-0 bottom-0 left-0 overflow-auto">
<DocumentManager /> <DocumentManager />
</TabsContent> </TabsContent>
<TabsContent value="knowledge-graph" className="absolute top-0 right-0 bottom-0 left-0"> <TabsContent value="knowledge-graph" className="absolute top-0 right-0 bottom-0 left-0 overflow-hidden">
<GraphViewer /> <GraphViewer />
</TabsContent> </TabsContent>
<TabsContent value="retrieval" className="absolute top-0 right-0 bottom-0 left-0"> <TabsContent value="retrieval" className="absolute top-0 right-0 bottom-0 left-0 overflow-hidden">
<RetrievalTesting /> <RetrievalTesting />
</TabsContent> </TabsContent>
<TabsContent value="api" className="absolute top-0 right-0 bottom-0 left-0"> <TabsContent value="api" className="absolute top-0 right-0 bottom-0 left-0 overflow-hidden">
<ApiSite /> <ApiSite />
</TabsContent> </TabsContent>
</div> </div>
@@ -81,7 +78,6 @@ function App() {
{enableHealthCheck && <StatusIndicator />} {enableHealthCheck && <StatusIndicator />}
{message !== null && !apiKeyInvalid && <MessageAlert />} {message !== null && !apiKeyInvalid && <MessageAlert />}
{apiKeyInvalid && <ApiKeyAlert />} {apiKeyInvalid && <ApiKeyAlert />}
<Toaster />
</main> </main>
</TabVisibilityProvider> </TabVisibilityProvider>
</ThemeProvider> </ThemeProvider>

View File

@@ -0,0 +1,190 @@
import { HashRouter as Router, Routes, Route, useNavigate } from 'react-router-dom'
import { useEffect, useState } from 'react'
import { useAuthStore } from '@/stores/state'
import { navigationService } from '@/services/navigation'
import { getAuthStatus } from '@/api/lightrag'
import { toast } from 'sonner'
import { Toaster } from 'sonner'
import App from './App'
import LoginPage from '@/features/LoginPage'
import ThemeProvider from '@/components/ThemeProvider'
interface ProtectedRouteProps {
children: React.ReactNode
}
const ProtectedRoute = ({ children }: ProtectedRouteProps) => {
const { isAuthenticated } = useAuthStore()
const [isChecking, setIsChecking] = useState(true)
const navigate = useNavigate()
// Set navigate function for navigation service
useEffect(() => {
navigationService.setNavigate(navigate)
}, [navigate])
useEffect(() => {
let isMounted = true; // Flag to prevent state updates after unmount
// This effect will run when the component mounts
// and will check if authentication is required
const checkAuthStatus = async () => {
try {
// Skip check if already authenticated
if (isAuthenticated) {
if (isMounted) setIsChecking(false);
return;
}
const status = await getAuthStatus()
// Only proceed if component is still mounted
if (!isMounted) return;
if (!status.auth_configured && status.access_token) {
// If auth is not configured, use the guest token
useAuthStore.getState().login(status.access_token, true)
if (status.message) {
toast.info(status.message)
}
}
} catch (error) {
console.error('Failed to check auth status:', error)
} finally {
// Only update state if component is still mounted
if (isMounted) {
setIsChecking(false)
}
}
}
// Execute immediately
checkAuthStatus()
// Cleanup function to prevent state updates after unmount
return () => {
isMounted = false;
}
}, [isAuthenticated])
// Handle navigation when authentication status changes
useEffect(() => {
if (!isChecking && !isAuthenticated) {
const currentPath = window.location.hash.slice(1); // Remove the '#' from hash
const isLoginPage = currentPath === '/login';
if (!isLoginPage) {
// Use navigation service for redirection
console.log('Not authenticated, redirecting to login');
navigationService.navigateToLogin();
}
}
}, [isChecking, isAuthenticated]);
// Show nothing while checking auth status or when not authenticated on login page
if (isChecking || (!isAuthenticated && window.location.hash.slice(1) === '/login')) {
return null;
}
// Show children only when authenticated
if (!isAuthenticated) {
return null;
}
return <>{children}</>;
}
const AppContent = () => {
const [initializing, setInitializing] = useState(true)
const { isAuthenticated } = useAuthStore()
const navigate = useNavigate()
// Set navigate function for navigation service
useEffect(() => {
navigationService.setNavigate(navigate)
}, [navigate])
// Check token validity and auth configuration on app initialization
useEffect(() => {
let isMounted = true; // Flag to prevent state updates after unmount
const checkAuth = async () => {
try {
const token = localStorage.getItem('LIGHTRAG-API-TOKEN')
// If we have a token, we're already authenticated
if (token && isAuthenticated) {
if (isMounted) setInitializing(false);
return;
}
// If no token or not authenticated, check if auth is configured
const status = await getAuthStatus()
// Only proceed if component is still mounted
if (!isMounted) return;
if (!status.auth_configured && status.access_token) {
// If auth is not configured, use the guest token
useAuthStore.getState().login(status.access_token, true)
if (status.message) {
toast.info(status.message)
}
} else if (!token) {
// Only logout if we don't have a token
useAuthStore.getState().logout()
}
} catch (error) {
console.error('Auth initialization error:', error)
if (isMounted && !isAuthenticated) {
useAuthStore.getState().logout()
}
} finally {
// Only update state if component is still mounted
if (isMounted) {
setInitializing(false)
}
}
}
// Execute immediately
checkAuth()
// Cleanup function to prevent state updates after unmount
return () => {
isMounted = false;
}
}, [isAuthenticated])
// Show nothing while initializing
if (initializing) {
return null
}
return (
<Routes>
<Route path="/login" element={<LoginPage />} />
<Route
path="/*"
element={
<ProtectedRoute>
<App />
</ProtectedRoute>
}
/>
</Routes>
)
}
const AppRouter = () => {
return (
<ThemeProvider>
<Router>
<AppContent />
<Toaster position="bottom-center" />
</Router>
</ThemeProvider>
)
}
export default AppRouter

View File

@@ -2,6 +2,7 @@ import axios, { AxiosError } from 'axios'
import { backendBaseUrl } from '@/lib/constants' import { backendBaseUrl } from '@/lib/constants'
import { errorMessage } from '@/lib/utils' import { errorMessage } from '@/lib/utils'
import { useSettingsStore } from '@/stores/settings' import { useSettingsStore } from '@/stores/settings'
import { navigationService } from '@/services/navigation'
// Types // Types
export type LightragNodeType = { export type LightragNodeType = {
@@ -125,6 +126,21 @@ export type DocsStatusesResponse = {
statuses: Record<DocStatus, DocStatusResponse[]> statuses: Record<DocStatus, DocStatusResponse[]>
} }
export type AuthStatusResponse = {
auth_configured: boolean
access_token?: string
token_type?: string
auth_mode?: 'enabled' | 'disabled'
message?: string
}
export type LoginResponse = {
access_token: string
token_type: string
auth_mode?: 'enabled' | 'disabled' // Authentication mode identifier
message?: string // Optional message
}
export const InvalidApiKeyError = 'Invalid API Key' export const InvalidApiKeyError = 'Invalid API Key'
export const RequireApiKeError = 'API Key required' export const RequireApiKeError = 'API Key required'
@@ -136,9 +152,15 @@ const axiosInstance = axios.create({
} }
}) })
// Interceptoradd api key // Interceptor: add api key and check authentication
axiosInstance.interceptors.request.use((config) => { axiosInstance.interceptors.request.use((config) => {
const apiKey = useSettingsStore.getState().apiKey const apiKey = useSettingsStore.getState().apiKey
const token = localStorage.getItem('LIGHTRAG-API-TOKEN');
// Always include token if it exists, regardless of path
if (token) {
config.headers['Authorization'] = `Bearer ${token}`
}
if (apiKey) { if (apiKey) {
config.headers['X-API-Key'] = apiKey config.headers['X-API-Key'] = apiKey
} }
@@ -150,6 +172,16 @@ axiosInstance.interceptors.response.use(
(response) => response, (response) => response,
(error: AxiosError) => { (error: AxiosError) => {
if (error.response) { if (error.response) {
if (error.response?.status === 401) {
// For login API, throw error directly
if (error.config?.url?.includes('/login')) {
throw error;
}
// For other APIs, navigate to login page
navigationService.navigateToLogin();
// Return a never-resolving promise to prevent further execution
return new Promise(() => {});
}
throw new Error( throw new Error(
`${error.response.status} ${error.response.statusText}\n${JSON.stringify( `${error.response.status} ${error.response.statusText}\n${JSON.stringify(
error.response.data error.response.data
@@ -324,3 +356,74 @@ export const clearDocuments = async (): Promise<DocActionResponse> => {
const response = await axiosInstance.delete('/documents') const response = await axiosInstance.delete('/documents')
return response.data return response.data
} }
export const getAuthStatus = async (): Promise<AuthStatusResponse> => {
try {
// Add a timeout to the request to prevent hanging
const response = await axiosInstance.get('/auth-status', {
timeout: 5000, // 5 second timeout
headers: {
'Accept': 'application/json' // Explicitly request JSON
}
});
// Check if response is HTML (which indicates a redirect or wrong endpoint)
const contentType = response.headers['content-type'] || '';
if (contentType.includes('text/html')) {
console.warn('Received HTML response instead of JSON for auth-status endpoint');
return {
auth_configured: true,
auth_mode: 'enabled'
};
}
// Strict validation of the response data
if (response.data &&
typeof response.data === 'object' &&
'auth_configured' in response.data &&
typeof response.data.auth_configured === 'boolean') {
// For unconfigured auth, ensure we have an access token
if (!response.data.auth_configured) {
if (response.data.access_token && typeof response.data.access_token === 'string') {
return response.data;
} else {
console.warn('Auth not configured but no valid access token provided');
}
} else {
// For configured auth, just return the data
return response.data;
}
}
// If response data is invalid but we got a response, log it
console.warn('Received invalid auth status response:', response.data);
// Default to auth configured if response is invalid
return {
auth_configured: true,
auth_mode: 'enabled'
};
} catch (error) {
// If the request fails, assume authentication is configured
console.error('Failed to get auth status:', errorMessage(error));
return {
auth_configured: true,
auth_mode: 'enabled'
};
}
}
export const loginToServer = async (username: string, password: string): Promise<LoginResponse> => {
const formData = new FormData();
formData.append('username', username);
formData.append('password', password);
const response = await axiosInstance.post('/login', formData, {
headers: {
'Content-Type': 'multipart/form-data'
}
});
return response.data;
}

View File

@@ -5,8 +5,13 @@ import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@
import { useSettingsStore } from '@/stores/settings' import { useSettingsStore } from '@/stores/settings'
import { PaletteIcon } from 'lucide-react' import { PaletteIcon } from 'lucide-react'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { cn } from '@/lib/utils'
export default function AppSettings() { interface AppSettingsProps {
className?: string
}
export default function AppSettings({ className }: AppSettingsProps) {
const [opened, setOpened] = useState<boolean>(false) const [opened, setOpened] = useState<boolean>(false)
const { t } = useTranslation() const { t } = useTranslation()
@@ -27,7 +32,7 @@ export default function AppSettings() {
return ( return (
<Popover open={opened} onOpenChange={setOpened}> <Popover open={opened} onOpenChange={setOpened}>
<PopoverTrigger asChild> <PopoverTrigger asChild>
<Button variant="outline" size="icon" className="h-9 w-9"> <Button variant="ghost" size="icon" className={cn('h-9 w-9', className)}>
<PaletteIcon className="h-5 w-5" /> <PaletteIcon className="h-5 w-5" />
</Button> </Button>
</PopoverTrigger> </PopoverTrigger>

View File

@@ -0,0 +1,49 @@
import Button from '@/components/ui/Button'
import { useCallback } from 'react'
import { controlButtonVariant } from '@/lib/constants'
import { useTranslation } from 'react-i18next'
import { useSettingsStore } from '@/stores/settings'
/**
* Component that toggles the language between English and Chinese.
*/
export default function LanguageToggle() {
const { i18n } = useTranslation()
const currentLanguage = i18n.language
const setLanguage = useSettingsStore.use.setLanguage()
const setEnglish = useCallback(() => {
i18n.changeLanguage('en')
setLanguage('en')
}, [i18n, setLanguage])
const setChinese = useCallback(() => {
i18n.changeLanguage('zh')
setLanguage('zh')
}, [i18n, setLanguage])
if (currentLanguage === 'zh') {
return (
<Button
onClick={setEnglish}
variant={controlButtonVariant}
tooltip="Switch to English"
size="icon"
side="bottom"
>
</Button>
)
}
return (
<Button
onClick={setChinese}
variant={controlButtonVariant}
tooltip="切换到中文"
size="icon"
side="bottom"
>
EN
</Button>
)
}

View File

@@ -13,23 +13,37 @@ const FocusOnNode = ({ node, move }: { node: string | null; move?: boolean }) =>
* When the selected item changes, highlighted the node and center the camera on it. * When the selected item changes, highlighted the node and center the camera on it.
*/ */
useEffect(() => { useEffect(() => {
const graph = sigma.getGraph();
if (move) { if (move) {
if (node) { if (node && graph.hasNode(node)) {
sigma.getGraph().setNodeAttribute(node, 'highlighted', true) try {
gotoNode(node) graph.setNodeAttribute(node, 'highlighted', true);
gotoNode(node);
} catch (error) {
console.error('Error focusing on node:', error);
}
} else { } else {
// If no node is selected but move is true, reset to default view // If no node is selected but move is true, reset to default view
sigma.setCustomBBox(null) sigma.setCustomBBox(null);
sigma.getCamera().animate({ x: 0.5, y: 0.5, ratio: 1 }, { duration: 0 }) sigma.getCamera().animate({ x: 0.5, y: 0.5, ratio: 1 }, { duration: 0 });
}
useGraphStore.getState().setMoveToSelectedNode(false);
} else if (node && graph.hasNode(node)) {
try {
graph.setNodeAttribute(node, 'highlighted', true);
} catch (error) {
console.error('Error highlighting node:', error);
} }
useGraphStore.getState().setMoveToSelectedNode(false)
} else if (node) {
sigma.getGraph().setNodeAttribute(node, 'highlighted', true)
} }
return () => { return () => {
if (node) { if (node && graph.hasNode(node)) {
sigma.getGraph().setNodeAttribute(node, 'highlighted', false) try {
graph.setNodeAttribute(node, 'highlighted', false);
} catch (error) {
console.error('Error cleaning up node highlight:', error);
}
} }
} }
}, [node, move, sigma, gotoNode]) }, [node, move, sigma, gotoNode])

View File

@@ -1,5 +1,5 @@
import { useLoadGraph, useRegisterEvents, useSetSettings, useSigma } from '@react-sigma/core' import { useRegisterEvents, useSetSettings, useSigma } from '@react-sigma/core'
import Graph from 'graphology' import { AbstractGraph } from 'graphology-types'
// import { useLayoutCircular } from '@react-sigma/layout-circular' // import { useLayoutCircular } from '@react-sigma/layout-circular'
import { useLayoutForceAtlas2 } from '@react-sigma/layout-forceatlas2' import { useLayoutForceAtlas2 } from '@react-sigma/layout-forceatlas2'
import { useEffect } from 'react' import { useEffect } from 'react'
@@ -25,7 +25,6 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
const sigma = useSigma<NodeType, EdgeType>() const sigma = useSigma<NodeType, EdgeType>()
const registerEvents = useRegisterEvents<NodeType, EdgeType>() const registerEvents = useRegisterEvents<NodeType, EdgeType>()
const setSettings = useSetSettings<NodeType, EdgeType>() const setSettings = useSetSettings<NodeType, EdgeType>()
const loadGraph = useLoadGraph<NodeType, EdgeType>()
const maxIterations = useSettingsStore.use.graphLayoutMaxIterations() const maxIterations = useSettingsStore.use.graphLayoutMaxIterations()
const { assign: assignLayout } = useLayoutForceAtlas2({ const { assign: assignLayout } = useLayoutForceAtlas2({
@@ -45,14 +44,42 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
/** /**
* When component mount or maxIterations changes * When component mount or maxIterations changes
* => load the graph and apply layout * => ensure graph reference and apply layout
*/ */
useEffect(() => { useEffect(() => {
if (sigmaGraph) { if (sigmaGraph && sigma) {
loadGraph(sigmaGraph as unknown as Graph<NodeType, EdgeType>) // Ensure sigma binding to sigmaGraph
assignLayout() try {
if (typeof sigma.setGraph === 'function') {
sigma.setGraph(sigmaGraph as unknown as AbstractGraph<NodeType, EdgeType>);
console.log('Binding graph to sigma instance');
} else {
(sigma as any).graph = sigmaGraph;
console.warn('Simgma missing setGraph function, set graph property directly');
} }
}, [assignLayout, loadGraph, sigmaGraph, maxIterations]) } catch (error) {
console.error('Error setting graph on sigma instance:', error);
}
assignLayout();
console.log('Initial layout applied to graph');
}
}, [sigma, sigmaGraph, assignLayout, maxIterations])
/**
* Ensure the sigma instance is set in the store
* This provides a backup in case the instance wasn't set in GraphViewer
*/
useEffect(() => {
if (sigma) {
// Double-check that the store has the sigma instance
const currentInstance = useGraphStore.getState().sigmaInstance;
if (!currentInstance) {
console.log('Setting sigma instance from GraphControl');
useGraphStore.getState().setSigmaInstance(sigma);
}
}
}, [sigma]);
/** /**
* When component mount * When component mount
@@ -138,14 +165,18 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
const _focusedNode = focusedNode || selectedNode const _focusedNode = focusedNode || selectedNode
const _focusedEdge = focusedEdge || selectedEdge const _focusedEdge = focusedEdge || selectedEdge
if (_focusedNode) { if (_focusedNode && graph.hasNode(_focusedNode)) {
try {
if (node === _focusedNode || graph.neighbors(_focusedNode).includes(node)) { if (node === _focusedNode || graph.neighbors(_focusedNode).includes(node)) {
newData.highlighted = true newData.highlighted = true
if (node === selectedNode) { if (node === selectedNode) {
newData.borderColor = Constants.nodeBorderColorSelected newData.borderColor = Constants.nodeBorderColorSelected
} }
} }
} else if (_focusedEdge) { } catch (error) {
console.error('Error in nodeReducer:', error);
}
} else if (_focusedEdge && graph.hasEdge(_focusedEdge)) {
if (graph.extremities(_focusedEdge).includes(node)) { if (graph.extremities(_focusedEdge).includes(node)) {
newData.highlighted = true newData.highlighted = true
newData.size = 3 newData.size = 3
@@ -173,7 +204,8 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
if (!disableHoverEffect) { if (!disableHoverEffect) {
const _focusedNode = focusedNode || selectedNode const _focusedNode = focusedNode || selectedNode
if (_focusedNode) { if (_focusedNode && graph.hasNode(_focusedNode)) {
try {
if (hideUnselectedEdges) { if (hideUnselectedEdges) {
if (!graph.extremities(edge).includes(_focusedNode)) { if (!graph.extremities(edge).includes(_focusedNode)) {
newData.hidden = true newData.hidden = true
@@ -183,11 +215,17 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
newData.color = Constants.edgeColorHighlighted newData.color = Constants.edgeColorHighlighted
} }
} }
} catch (error) {
console.error('Error in edgeReducer:', error);
}
} else { } else {
if (focusedEdge || selectedEdge) { const _selectedEdge = selectedEdge && graph.hasEdge(selectedEdge) ? selectedEdge : null;
if (edge === selectedEdge) { const _focusedEdge = focusedEdge && graph.hasEdge(focusedEdge) ? focusedEdge : null;
if (_selectedEdge || _focusedEdge) {
if (edge === _selectedEdge) {
newData.color = Constants.edgeColorSelected newData.color = Constants.edgeColorSelected
} else if (edge === focusedEdge) { } else if (edge === _focusedEdge) {
newData.color = Constants.edgeColorHighlighted newData.color = Constants.edgeColorHighlighted
} else if (hideUnselectedEdges) { } else if (hideUnselectedEdges) {
newData.hidden = true newData.hidden = true

View File

@@ -2,20 +2,23 @@ import { useCallback, useEffect, useRef } from 'react'
import { AsyncSelect } from '@/components/ui/AsyncSelect' import { AsyncSelect } from '@/components/ui/AsyncSelect'
import { useSettingsStore } from '@/stores/settings' import { useSettingsStore } from '@/stores/settings'
import { useGraphStore } from '@/stores/graph' import { useGraphStore } from '@/stores/graph'
import { labelListLimit } from '@/lib/constants' import { labelListLimit, controlButtonVariant } from '@/lib/constants'
import MiniSearch from 'minisearch' import MiniSearch from 'minisearch'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { RefreshCw } from 'lucide-react'
import Button from '@/components/ui/Button'
const GraphLabels = () => { const GraphLabels = () => {
const { t } = useTranslation() const { t } = useTranslation()
const label = useSettingsStore.use.queryLabel() const label = useSettingsStore.use.queryLabel()
const allDatabaseLabels = useGraphStore.use.allDatabaseLabels() const allDatabaseLabels = useGraphStore.use.allDatabaseLabels()
const rawGraph = useGraphStore.use.rawGraph()
const labelsLoadedRef = useRef(false) const labelsLoadedRef = useRef(false)
// Track if a fetch is in progress to prevent multiple simultaneous fetches // Track if a fetch is in progress to prevent multiple simultaneous fetches
const fetchInProgressRef = useRef(false) const fetchInProgressRef = useRef(false)
// Fetch labels once on component mount, using global flag to prevent duplicates // Fetch labels and trigger initial data load
useEffect(() => { useEffect(() => {
// Check if we've already attempted to fetch labels in this session // Check if we've already attempted to fetch labels in this session
const labelsFetchAttempted = useGraphStore.getState().labelsFetchAttempted const labelsFetchAttempted = useGraphStore.getState().labelsFetchAttempted
@@ -26,8 +29,6 @@ const GraphLabels = () => {
// Set global flag to indicate we've attempted to fetch in this session // Set global flag to indicate we've attempted to fetch in this session
useGraphStore.getState().setLabelsFetchAttempted(true) useGraphStore.getState().setLabelsFetchAttempted(true)
console.log('Fetching graph labels (once per session)...')
useGraphStore.getState().fetchAllDatabaseLabels() useGraphStore.getState().fetchAllDatabaseLabels()
.then(() => { .then(() => {
labelsLoadedRef.current = true labelsLoadedRef.current = true
@@ -42,6 +43,14 @@ const GraphLabels = () => {
} }
}, []) // Empty dependency array ensures this only runs once on mount }, []) // Empty dependency array ensures this only runs once on mount
// Trigger data load when labels are loaded
useEffect(() => {
if (labelsLoadedRef.current) {
// Reset the fetch attempted flag to force a new data fetch
useGraphStore.getState().setGraphDataFetchAttempted(false)
}
}, [label])
const getSearchEngine = useCallback(() => { const getSearchEngine = useCallback(() => {
// Create search engine // Create search engine
const searchEngine = new MiniSearch({ const searchEngine = new MiniSearch({
@@ -83,7 +92,41 @@ const GraphLabels = () => {
[getSearchEngine] [getSearchEngine]
) )
const handleRefresh = useCallback(() => {
// Reset labels fetch status to allow fetching labels again
useGraphStore.getState().setLabelsFetchAttempted(false)
// Reset graph data fetch status directly, not depending on allDatabaseLabels changes
useGraphStore.getState().setGraphDataFetchAttempted(false)
// Fetch all labels again
useGraphStore.getState().fetchAllDatabaseLabels()
.then(() => {
// Trigger a graph data reload by changing the query label back and forth
const currentLabel = useSettingsStore.getState().queryLabel
useSettingsStore.getState().setQueryLabel('')
setTimeout(() => {
useSettingsStore.getState().setQueryLabel(currentLabel)
}, 0)
})
.catch((error) => {
console.error('Failed to refresh labels:', error)
})
}, [])
return ( return (
<div className="flex items-center">
{rawGraph && (
<Button
size="icon"
variant={controlButtonVariant}
onClick={handleRefresh}
tooltip={t('graphPanel.graphLabels.refreshTooltip')}
className="mr-1"
>
<RefreshCw className="h-4 w-4" />
</Button>
)}
<AsyncSelect<string> <AsyncSelect<string>
className="ml-2" className="ml-2"
triggerClassName="max-h-8" triggerClassName="max-h-8"
@@ -105,30 +148,17 @@ const GraphLabels = () => {
newLabel = '*' newLabel = '*'
} }
// Reset the fetch attempted flag to force a new data fetch // Handle reselecting the same label
useGraphStore.getState().setGraphDataFetchAttempted(false)
// Clear current graph data to ensure complete reload when label changes
if (newLabel !== currentLabel) {
const graphStore = useGraphStore.getState();
graphStore.clearSelection();
// Reset the graph state but preserve the instance
if (graphStore.sigmaGraph) {
const nodes = Array.from(graphStore.sigmaGraph.nodes());
nodes.forEach(node => graphStore.sigmaGraph?.dropNode(node));
}
}
if (newLabel === currentLabel && newLabel !== '*') { if (newLabel === currentLabel && newLabel !== '*') {
// reselect the same itme means qery all newLabel = '*'
useSettingsStore.getState().setQueryLabel('*')
} else {
useSettingsStore.getState().setQueryLabel(newLabel)
} }
// Update the label, which will trigger the useEffect to handle data loading
useSettingsStore.getState().setQueryLabel(newLabel)
}} }}
clearable={false} // Prevent clearing value on reselect clearable={false} // Prevent clearing value on reselect
/> />
</div>
) )
} }

View File

@@ -1,4 +1,4 @@
import { FC, useCallback, useEffect, useMemo } from 'react' import { FC, useCallback, useEffect } from 'react'
import { import {
EdgeById, EdgeById,
NodeById, NodeById,
@@ -11,28 +11,34 @@ import { useGraphStore } from '@/stores/graph'
import MiniSearch from 'minisearch' import MiniSearch from 'minisearch'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
interface OptionItem { // Message item identifier for search results
export const messageId = '__message_item'
// Search result option item interface
export interface OptionItem {
id: string id: string
type: 'nodes' | 'edges' | 'message' type: 'nodes' | 'edges' | 'message'
message?: string message?: string
} }
const NodeOption = ({ id }: { id: string }) => {
const graph = useGraphStore.use.sigmaGraph()
if (!graph?.hasNode(id)) {
return null
}
return <NodeById id={id} />
}
function OptionComponent(item: OptionItem) { function OptionComponent(item: OptionItem) {
return ( return (
<div> <div>
{item.type === 'nodes' && <NodeById id={item.id} />} {item.type === 'nodes' && <NodeOption id={item.id} />}
{item.type === 'edges' && <EdgeById id={item.id} />} {item.type === 'edges' && <EdgeById id={item.id} />}
{item.type === 'message' && <div>{item.message}</div>} {item.type === 'message' && <div>{item.message}</div>}
</div> </div>
) )
} }
const messageId = '__message_item'
// Reset this cache when graph changes to ensure fresh search results
const lastGraph: any = {
graph: null,
searchEngine: null
}
/** /**
* Component thats display the search input. * Component thats display the search input.
@@ -48,25 +54,24 @@ export const GraphSearchInput = ({
}) => { }) => {
const { t } = useTranslation() const { t } = useTranslation()
const graph = useGraphStore.use.sigmaGraph() const graph = useGraphStore.use.sigmaGraph()
const searchEngine = useGraphStore.use.searchEngine()
// Force reset the cache when graph changes // Reset search engine when graph changes
useEffect(() => { useEffect(() => {
if (graph) { if (graph) {
// Reset cache to ensure fresh search results with new graph data useGraphStore.getState().resetSearchEngine()
lastGraph.graph = null;
lastGraph.searchEngine = null;
} }
}, [graph]); }, [graph]);
const searchEngine = useMemo(() => { // Create search engine when needed
if (lastGraph.graph == graph) { useEffect(() => {
return lastGraph.searchEngine // Skip if no graph, empty graph, or search engine already exists
if (!graph || graph.nodes().length === 0 || searchEngine) {
return
} }
if (!graph || graph.nodes().length == 0) return
lastGraph.graph = graph // Create new search engine
const newSearchEngine = new MiniSearch({
const searchEngine = new MiniSearch({
idField: 'id', idField: 'id',
fields: ['label'], fields: ['label'],
searchOptions: { searchOptions: {
@@ -78,16 +83,16 @@ export const GraphSearchInput = ({
} }
}) })
// Add documents // Add nodes to search engine
const documents = graph.nodes().map((id: string) => ({ const documents = graph.nodes().map((id: string) => ({
id: id, id: id,
label: graph.getNodeAttribute(id, 'label') label: graph.getNodeAttribute(id, 'label')
})) }))
searchEngine.addAll(documents) newSearchEngine.addAll(documents)
lastGraph.searchEngine = searchEngine // Update search engine in store
return searchEngine useGraphStore.getState().setSearchEngine(newSearchEngine)
}, [graph]) }, [graph, searchEngine])
/** /**
* Loading the options while the user is typing. * Loading the options while the user is typing.
@@ -95,19 +100,32 @@ export const GraphSearchInput = ({
const loadOptions = useCallback( const loadOptions = useCallback(
async (query?: string): Promise<OptionItem[]> => { async (query?: string): Promise<OptionItem[]> => {
if (onFocus) onFocus(null) if (onFocus) onFocus(null)
if (!graph || !searchEngine) return []
// If no query, return first searchResultLimit nodes // Safety checks to prevent crashes
if (!graph || !searchEngine) {
return []
}
// Verify graph has nodes before proceeding
if (graph.nodes().length === 0) {
return []
}
// If no query, return some nodes for user to select
if (!query) { if (!query) {
const nodeIds = graph.nodes().slice(0, searchResultLimit) const nodeIds = graph.nodes()
.filter(id => graph.hasNode(id))
.slice(0, searchResultLimit)
return nodeIds.map(id => ({ return nodeIds.map(id => ({
id, id,
type: 'nodes' type: 'nodes'
})) }))
} }
// If has query, search nodes // If has query, search nodes and verify they still exist
const result: OptionItem[] = searchEngine.search(query).map((r: { id: string }) => ({ const result: OptionItem[] = searchEngine.search(query)
.filter((r: { id: string }) => graph.hasNode(r.id))
.map((r: { id: string }) => ({
id: r.id, id: r.id,
type: 'nodes' type: 'nodes'
})) }))

View File

@@ -7,7 +7,7 @@ import { useLayoutForce, useWorkerLayoutForce } from '@react-sigma/layout-force'
import { useLayoutForceAtlas2, useWorkerLayoutForceAtlas2 } from '@react-sigma/layout-forceatlas2' import { useLayoutForceAtlas2, useWorkerLayoutForceAtlas2 } from '@react-sigma/layout-forceatlas2'
import { useLayoutNoverlap, useWorkerLayoutNoverlap } from '@react-sigma/layout-noverlap' import { useLayoutNoverlap, useWorkerLayoutNoverlap } from '@react-sigma/layout-noverlap'
import { useLayoutRandom } from '@react-sigma/layout-random' import { useLayoutRandom } from '@react-sigma/layout-random'
import { useCallback, useMemo, useState, useEffect } from 'react' import { useCallback, useMemo, useState, useEffect, useRef } from 'react'
import Button from '@/components/ui/Button' import Button from '@/components/ui/Button'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/Popover' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/Popover'
@@ -26,43 +26,161 @@ type LayoutName =
| 'Force Directed' | 'Force Directed'
| 'Force Atlas' | 'Force Atlas'
const WorkerLayoutControl = ({ layout, autoRunFor }: WorkerLayoutControlProps) => { // Extend WorkerLayoutControlProps to include mainLayout
interface ExtendedWorkerLayoutControlProps extends WorkerLayoutControlProps {
mainLayout: LayoutHook;
}
const WorkerLayoutControl = ({ layout, autoRunFor, mainLayout }: ExtendedWorkerLayoutControlProps) => {
const sigma = useSigma() const sigma = useSigma()
const { stop, start, isRunning } = layout // Use local state to track animation running status
const [isRunning, setIsRunning] = useState(false)
// Timer reference for animation
const animationTimerRef = useRef<number | null>(null)
const { t } = useTranslation() const { t } = useTranslation()
// Function to update node positions using the layout algorithm
const updatePositions = useCallback(() => {
if (!sigma) return
try {
const graph = sigma.getGraph()
if (!graph || graph.order === 0) return
// Use mainLayout to get positions, similar to refreshLayout function
// console.log('Getting positions from mainLayout')
const positions = mainLayout.positions()
// Animate nodes to new positions
// console.log('Updating node positions with layout algorithm')
animateNodes(graph, positions, { duration: 300 }) // Reduced duration for more frequent updates
} catch (error) {
console.error('Error updating positions:', error)
// Stop animation if there's an error
if (animationTimerRef.current) {
window.clearInterval(animationTimerRef.current)
animationTimerRef.current = null
setIsRunning(false)
}
}
}, [sigma, mainLayout])
// Improved click handler that uses our own animation timer
const handleClick = useCallback(() => {
if (isRunning) {
// Stop the animation
console.log('Stopping layout animation')
if (animationTimerRef.current) {
window.clearInterval(animationTimerRef.current)
animationTimerRef.current = null
}
// Try to kill the layout algorithm if it's running
try {
if (typeof layout.kill === 'function') {
layout.kill()
console.log('Layout algorithm killed')
} else if (typeof layout.stop === 'function') {
layout.stop()
console.log('Layout algorithm stopped')
}
} catch (error) {
console.error('Error stopping layout algorithm:', error)
}
setIsRunning(false)
} else {
// Start the animation
console.log('Starting layout animation')
// Initial position update
updatePositions()
// Set up interval for continuous updates
animationTimerRef.current = window.setInterval(() => {
updatePositions()
}, 200) // Reduced interval to create overlapping animations for smoother transitions
setIsRunning(true)
// Set a timeout to automatically stop the animation after 3 seconds
setTimeout(() => {
if (animationTimerRef.current) {
console.log('Auto-stopping layout animation after 3 seconds')
window.clearInterval(animationTimerRef.current)
animationTimerRef.current = null
setIsRunning(false)
// Try to stop the layout algorithm
try {
if (typeof layout.kill === 'function') {
layout.kill()
} else if (typeof layout.stop === 'function') {
layout.stop()
}
} catch (error) {
console.error('Error stopping layout algorithm:', error)
}
}
}, 3000)
}
}, [isRunning, layout, updatePositions])
/** /**
* Init component when Sigma or component settings change. * Init component when Sigma or component settings change.
*/ */
useEffect(() => { useEffect(() => {
if (!sigma) { if (!sigma) {
console.log('No sigma instance available')
return return
} }
// we run the algo // Auto-run if specified
let timeout: number | null = null let timeout: number | null = null
if (autoRunFor !== undefined && autoRunFor > -1 && sigma.getGraph().order > 0) { if (autoRunFor !== undefined && autoRunFor > -1 && sigma.getGraph().order > 0) {
start() console.log('Auto-starting layout animation')
// set a timeout to stop it
timeout = // Initial position update
autoRunFor > 0 updatePositions()
? window.setTimeout(() => { stop() }, autoRunFor) // prettier-ignore
: null // Set up interval for continuous updates
animationTimerRef.current = window.setInterval(() => {
updatePositions()
}, 200) // Reduced interval to create overlapping animations for smoother transitions
setIsRunning(true)
// Set a timeout to stop it if autoRunFor > 0
if (autoRunFor > 0) {
timeout = window.setTimeout(() => {
console.log('Auto-stopping layout animation after timeout')
if (animationTimerRef.current) {
window.clearInterval(animationTimerRef.current)
animationTimerRef.current = null
}
setIsRunning(false)
}, autoRunFor)
}
} }
//cleaning // Cleanup function
return () => { return () => {
stop() // console.log('Cleaning up WorkerLayoutControl')
if (animationTimerRef.current) {
window.clearInterval(animationTimerRef.current)
animationTimerRef.current = null
}
if (timeout) { if (timeout) {
clearTimeout(timeout) window.clearTimeout(timeout)
} }
setIsRunning(false)
} }
}, [autoRunFor, start, stop, sigma]) }, [autoRunFor, sigma, updatePositions])
return ( return (
<Button <Button
size="icon" size="icon"
onClick={() => (isRunning ? stop() : start())} onClick={handleClick}
tooltip={isRunning ? t('graphPanel.sideBar.layoutsControl.stopAnimation') : t('graphPanel.sideBar.layoutsControl.startAnimation')} tooltip={isRunning ? t('graphPanel.sideBar.layoutsControl.stopAnimation') : t('graphPanel.sideBar.layoutsControl.startAnimation')}
variant={controlButtonVariant} variant={controlButtonVariant}
> >
@@ -85,8 +203,27 @@ const LayoutsControl = () => {
const layoutCircular = useLayoutCircular() const layoutCircular = useLayoutCircular()
const layoutCirclepack = useLayoutCirclepack() const layoutCirclepack = useLayoutCirclepack()
const layoutRandom = useLayoutRandom() const layoutRandom = useLayoutRandom()
const layoutNoverlap = useLayoutNoverlap({ settings: { margin: 1 } }) const layoutNoverlap = useLayoutNoverlap({
const layoutForce = useLayoutForce({ maxIterations: maxIterations }) maxIterations: maxIterations,
settings: {
margin: 5,
expansion: 1.1,
gridSize: 1,
ratio: 1,
speed: 3,
}
})
// Add parameters for Force Directed layout to improve convergence
const layoutForce = useLayoutForce({
maxIterations: maxIterations,
settings: {
attraction: 0.0003, // Lower attraction force to reduce oscillation
repulsion: 0.05, // Lower repulsion force to reduce oscillation
gravity: 0.01, // Increase gravity to make nodes converge to center faster
inertia: 0.4, // Lower inertia to add damping effect
maxMove: 100 // Limit maximum movement per step to prevent large jumps
}
})
const layoutForceAtlas2 = useLayoutForceAtlas2({ iterations: maxIterations }) const layoutForceAtlas2 = useLayoutForceAtlas2({ iterations: maxIterations })
const workerNoverlap = useWorkerLayoutNoverlap() const workerNoverlap = useWorkerLayoutNoverlap()
const workerForce = useWorkerLayoutForce() const workerForce = useWorkerLayoutForce()
@@ -130,10 +267,23 @@ const LayoutsControl = () => {
const runLayout = useCallback( const runLayout = useCallback(
(newLayout: LayoutName) => { (newLayout: LayoutName) => {
console.debug(newLayout) console.debug('Running layout:', newLayout)
const { positions } = layouts[newLayout].layout const { positions } = layouts[newLayout].layout
animateNodes(sigma.getGraph(), positions(), { duration: 500 })
try {
const graph = sigma.getGraph()
if (!graph) {
console.error('No graph available')
return
}
const pos = positions()
console.log('Positions calculated, animating nodes')
animateNodes(graph, pos, { duration: 400 })
setLayout(newLayout) setLayout(newLayout)
} catch (error) {
console.error('Error running layout:', error)
}
}, },
[layouts, sigma] [layouts, sigma]
) )
@@ -142,7 +292,10 @@ const LayoutsControl = () => {
<> <>
<div> <div>
{layouts[layout] && 'worker' in layouts[layout] && ( {layouts[layout] && 'worker' in layouts[layout] && (
<WorkerLayoutControl layout={layouts[layout].worker!} /> <WorkerLayoutControl
layout={layouts[layout].worker!}
mainLayout={layouts[layout].layout}
/>
)} )}
</div> </div>
<div> <div>

View File

@@ -1,8 +1,10 @@
import { useEffect, useState } from 'react' import { useEffect, useState } from 'react'
import { useGraphStore, RawNodeType, RawEdgeType } from '@/stores/graph' import { useGraphStore, RawNodeType, RawEdgeType } from '@/stores/graph'
import Text from '@/components/ui/Text' import Text from '@/components/ui/Text'
import Button from '@/components/ui/Button'
import useLightragGraph from '@/hooks/useLightragGraph' import useLightragGraph from '@/hooks/useLightragGraph'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { GitBranchPlus, Scissors } from 'lucide-react'
/** /**
* Component that view properties of elements in graph. * Component that view properties of elements in graph.
@@ -88,11 +90,26 @@ const refineNodeProperties = (node: RawNodeType): NodeType => {
const relationships = [] const relationships = []
if (state.sigmaGraph && state.rawGraph) { if (state.sigmaGraph && state.rawGraph) {
for (const edgeId of state.sigmaGraph.edges(node.id)) { try {
if (!state.sigmaGraph.hasNode(node.id)) {
return {
...node,
relationships: []
}
}
const edges = state.sigmaGraph.edges(node.id)
for (const edgeId of edges) {
if (!state.sigmaGraph.hasEdge(edgeId)) continue;
const edge = state.rawGraph.getEdge(edgeId, true) const edge = state.rawGraph.getEdge(edgeId, true)
if (edge) { if (edge) {
const isTarget = node.id === edge.source const isTarget = node.id === edge.source
const neighbourId = isTarget ? edge.target : edge.source const neighbourId = isTarget ? edge.target : edge.source
if (!state.sigmaGraph.hasNode(neighbourId)) continue;
const neighbour = state.rawGraph.getNode(neighbourId) const neighbour = state.rawGraph.getNode(neighbourId)
if (neighbour) { if (neighbour) {
relationships.push({ relationships.push({
@@ -103,7 +120,11 @@ const refineNodeProperties = (node: RawNodeType): NodeType => {
} }
} }
} }
} catch (error) {
console.error('Error refining node properties:', error)
} }
}
return { return {
...node, ...node,
relationships relationships
@@ -112,8 +133,31 @@ const refineNodeProperties = (node: RawNodeType): NodeType => {
const refineEdgeProperties = (edge: RawEdgeType): EdgeType => { const refineEdgeProperties = (edge: RawEdgeType): EdgeType => {
const state = useGraphStore.getState() const state = useGraphStore.getState()
const sourceNode = state.rawGraph?.getNode(edge.source) let sourceNode: RawNodeType | undefined = undefined
const targetNode = state.rawGraph?.getNode(edge.target) let targetNode: RawNodeType | undefined = undefined
if (state.sigmaGraph && state.rawGraph) {
try {
if (!state.sigmaGraph.hasEdge(edge.id)) {
return {
...edge,
sourceNode: undefined,
targetNode: undefined
}
}
if (state.sigmaGraph.hasNode(edge.source)) {
sourceNode = state.rawGraph.getNode(edge.source)
}
if (state.sigmaGraph.hasNode(edge.target)) {
targetNode = state.rawGraph.getNode(edge.target)
}
} catch (error) {
console.error('Error refining edge properties:', error)
}
}
return { return {
...edge, ...edge,
sourceNode, sourceNode,
@@ -157,9 +201,40 @@ const PropertyRow = ({
const NodePropertiesView = ({ node }: { node: NodeType }) => { const NodePropertiesView = ({ node }: { node: NodeType }) => {
const { t } = useTranslation() const { t } = useTranslation()
const handleExpandNode = () => {
useGraphStore.getState().triggerNodeExpand(node.id)
}
const handlePruneNode = () => {
useGraphStore.getState().triggerNodePrune(node.id)
}
return ( return (
<div className="flex flex-col gap-2"> <div className="flex flex-col gap-2">
<label className="text-md pl-1 font-bold tracking-wide text-sky-300">{t('graphPanel.propertiesView.node.title')}</label> <div className="flex justify-between items-center">
<label className="text-md pl-1 font-bold tracking-wide text-blue-700">{t('graphPanel.propertiesView.node.title')}</label>
<div className="flex gap-3">
<Button
size="icon"
variant="ghost"
className="h-7 w-7 border border-gray-400 hover:bg-gray-200 dark:border-gray-600 dark:hover:bg-gray-700"
onClick={handleExpandNode}
tooltip={t('graphPanel.propertiesView.node.expandNode')}
>
<GitBranchPlus className="h-4 w-4 text-gray-700 dark:text-gray-300" />
</Button>
<Button
size="icon"
variant="ghost"
className="h-7 w-7 border border-gray-400 hover:bg-gray-200 dark:border-gray-600 dark:hover:bg-gray-700"
onClick={handlePruneNode}
tooltip={t('graphPanel.propertiesView.node.pruneNode')}
>
<Scissors className="h-4 w-4 text-gray-900 dark:text-gray-300" />
</Button>
</div>
</div>
<div className="bg-primary/5 max-h-96 overflow-auto rounded p-1"> <div className="bg-primary/5 max-h-96 overflow-auto rounded p-1">
<PropertyRow name={t('graphPanel.propertiesView.node.id')} value={node.id} /> <PropertyRow name={t('graphPanel.propertiesView.node.id')} value={node.id} />
<PropertyRow <PropertyRow
@@ -171,7 +246,7 @@ const NodePropertiesView = ({ node }: { node: NodeType }) => {
/> />
<PropertyRow name={t('graphPanel.propertiesView.node.degree')} value={node.degree} /> <PropertyRow name={t('graphPanel.propertiesView.node.degree')} value={node.degree} />
</div> </div>
<label className="text-md pl-1 font-bold tracking-wide text-yellow-400/90">{t('graphPanel.propertiesView.node.properties')}</label> <label className="text-md pl-1 font-bold tracking-wide text-amber-700">{t('graphPanel.propertiesView.node.properties')}</label>
<div className="bg-primary/5 max-h-96 overflow-auto rounded p-1"> <div className="bg-primary/5 max-h-96 overflow-auto rounded p-1">
{Object.keys(node.properties) {Object.keys(node.properties)
.sort() .sort()
@@ -181,7 +256,7 @@ const NodePropertiesView = ({ node }: { node: NodeType }) => {
</div> </div>
{node.relationships.length > 0 && ( {node.relationships.length > 0 && (
<> <>
<label className="text-md pl-1 font-bold tracking-wide text-teal-600/90"> <label className="text-md pl-1 font-bold tracking-wide text-emerald-700">
{t('graphPanel.propertiesView.node.relationships')} {t('graphPanel.propertiesView.node.relationships')}
</label> </label>
<div className="bg-primary/5 max-h-96 overflow-auto rounded p-1"> <div className="bg-primary/5 max-h-96 overflow-auto rounded p-1">
@@ -208,7 +283,7 @@ const EdgePropertiesView = ({ edge }: { edge: EdgeType }) => {
const { t } = useTranslation() const { t } = useTranslation()
return ( return (
<div className="flex flex-col gap-2"> <div className="flex flex-col gap-2">
<label className="text-md pl-1 font-bold tracking-wide text-teal-600">{t('graphPanel.propertiesView.edge.title')}</label> <label className="text-md pl-1 font-bold tracking-wide text-violet-700">{t('graphPanel.propertiesView.edge.title')}</label>
<div className="bg-primary/5 max-h-96 overflow-auto rounded p-1"> <div className="bg-primary/5 max-h-96 overflow-auto rounded p-1">
<PropertyRow name={t('graphPanel.propertiesView.edge.id')} value={edge.id} /> <PropertyRow name={t('graphPanel.propertiesView.edge.id')} value={edge.id} />
{edge.type && <PropertyRow name={t('graphPanel.propertiesView.edge.type')} value={edge.type} />} {edge.type && <PropertyRow name={t('graphPanel.propertiesView.edge.type')} value={edge.type} />}
@@ -227,7 +302,7 @@ const EdgePropertiesView = ({ edge }: { edge: EdgeType }) => {
}} }}
/> />
</div> </div>
<label className="text-md pl-1 font-bold tracking-wide text-yellow-400/90">{t('graphPanel.propertiesView.edge.properties')}</label> <label className="text-md pl-1 font-bold tracking-wide text-amber-700">{t('graphPanel.propertiesView.edge.properties')}</label>
<div className="bg-primary/5 max-h-96 overflow-auto rounded p-1"> <div className="bg-primary/5 max-h-96 overflow-auto rounded p-1">
{Object.keys(edge.properties) {Object.keys(edge.properties)
.sort() .sort()

View File

@@ -1,4 +1,4 @@
import { useState, useCallback, useEffect } from 'react' import { useState, useCallback} from 'react'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/Popover' import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/Popover'
import Checkbox from '@/components/ui/Checkbox' import Checkbox from '@/components/ui/Checkbox'
import Button from '@/components/ui/Button' import Button from '@/components/ui/Button'
@@ -7,10 +7,8 @@ import Input from '@/components/ui/Input'
import { controlButtonVariant } from '@/lib/constants' import { controlButtonVariant } from '@/lib/constants'
import { useSettingsStore } from '@/stores/settings' import { useSettingsStore } from '@/stores/settings'
import { useBackendState } from '@/stores/state'
import { useGraphStore } from '@/stores/graph'
import { SettingsIcon, RefreshCwIcon } from 'lucide-react' import { SettingsIcon } from 'lucide-react'
import { useTranslation } from 'react-i18next'; import { useTranslation } from 'react-i18next';
/** /**
@@ -114,8 +112,6 @@ const LabeledNumberInput = ({
*/ */
export default function Settings() { export default function Settings() {
const [opened, setOpened] = useState<boolean>(false) const [opened, setOpened] = useState<boolean>(false)
const [tempApiKey, setTempApiKey] = useState<string>('')
const refreshLayout = useGraphStore.use.refreshLayout()
const showPropertyPanel = useSettingsStore.use.showPropertyPanel() const showPropertyPanel = useSettingsStore.use.showPropertyPanel()
const showNodeSearchBar = useSettingsStore.use.showNodeSearchBar() const showNodeSearchBar = useSettingsStore.use.showNodeSearchBar()
@@ -129,11 +125,6 @@ export default function Settings() {
const graphLayoutMaxIterations = useSettingsStore.use.graphLayoutMaxIterations() const graphLayoutMaxIterations = useSettingsStore.use.graphLayoutMaxIterations()
const enableHealthCheck = useSettingsStore.use.enableHealthCheck() const enableHealthCheck = useSettingsStore.use.enableHealthCheck()
const apiKey = useSettingsStore.use.apiKey()
useEffect(() => {
setTempApiKey(apiKey || '')
}, [apiKey, opened])
const setEnableNodeDrag = useCallback( const setEnableNodeDrag = useCallback(
() => useSettingsStore.setState((pre) => ({ enableNodeDrag: !pre.enableNodeDrag })), () => useSettingsStore.setState((pre) => ({ enableNodeDrag: !pre.enableNodeDrag })),
@@ -182,11 +173,22 @@ export default function Settings() {
const setGraphQueryMaxDepth = useCallback((depth: number) => { const setGraphQueryMaxDepth = useCallback((depth: number) => {
if (depth < 1) return if (depth < 1) return
useSettingsStore.setState({ graphQueryMaxDepth: depth }) useSettingsStore.setState({ graphQueryMaxDepth: depth })
const currentLabel = useSettingsStore.getState().queryLabel
useSettingsStore.getState().setQueryLabel('')
setTimeout(() => {
useSettingsStore.getState().setQueryLabel(currentLabel)
}, 300)
}, []) }, [])
const setGraphMinDegree = useCallback((degree: number) => { const setGraphMinDegree = useCallback((degree: number) => {
if (degree < 0) return if (degree < 0) return
useSettingsStore.setState({ graphMinDegree: degree }) useSettingsStore.setState({ graphMinDegree: degree })
const currentLabel = useSettingsStore.getState().queryLabel
useSettingsStore.getState().setQueryLabel('')
setTimeout(() => {
useSettingsStore.getState().setQueryLabel(currentLabel)
}, 300)
}, []) }, [])
const setGraphLayoutMaxIterations = useCallback((iterations: number) => { const setGraphLayoutMaxIterations = useCallback((iterations: number) => {
@@ -194,34 +196,19 @@ export default function Settings() {
useSettingsStore.setState({ graphLayoutMaxIterations: iterations }) useSettingsStore.setState({ graphLayoutMaxIterations: iterations })
}, []) }, [])
const setApiKey = useCallback(async () => {
useSettingsStore.setState({ apiKey: tempApiKey || null })
await useBackendState.getState().check()
setOpened(false)
}, [tempApiKey])
const handleTempApiKeyChange = useCallback(
(e: React.ChangeEvent<HTMLInputElement>) => {
setTempApiKey(e.target.value)
},
[setTempApiKey]
)
const { t } = useTranslation(); const { t } = useTranslation();
const saveSettings = () => setOpened(false);
return ( return (
<> <>
<Button
variant={controlButtonVariant}
tooltip={t('graphPanel.sideBar.settings.refreshLayout')}
size="icon"
onClick={refreshLayout}
>
<RefreshCwIcon />
</Button>
<Popover open={opened} onOpenChange={setOpened}> <Popover open={opened} onOpenChange={setOpened}>
<PopoverTrigger asChild> <PopoverTrigger asChild>
<Button variant={controlButtonVariant} tooltip={t('graphPanel.sideBar.settings.settings')} size="icon"> <Button
variant={controlButtonVariant}
tooltip={t('graphPanel.sideBar.settings.settings')}
size="icon"
>
<SettingsIcon /> <SettingsIcon />
</Button> </Button>
</PopoverTrigger> </PopoverTrigger>
@@ -303,30 +290,15 @@ export default function Settings() {
onEditFinished={setGraphLayoutMaxIterations} onEditFinished={setGraphLayoutMaxIterations}
/> />
<Separator /> <Separator />
<div className="flex flex-col gap-2">
<label className="text-sm font-medium">{t('graphPanel.sideBar.settings.apiKey')}</label>
<form className="flex h-6 gap-2" onSubmit={(e) => e.preventDefault()}>
<div className="w-0 flex-1">
<Input
type="password"
value={tempApiKey}
onChange={handleTempApiKeyChange}
placeholder={t('graphPanel.sideBar.settings.enterYourAPIkey')}
className="max-h-full w-full min-w-0"
autoComplete="off"
/>
</div>
<Button <Button
onClick={setApiKey} onClick={saveSettings}
variant="outline" variant="outline"
size="sm" size="sm"
className="max-h-full shrink-0" className="ml-auto px-4"
> >
{t('graphPanel.sideBar.settings.save')} {t('graphPanel.sideBar.settings.save')}
</Button> </Button>
</form>
</div>
</div> </div>
</PopoverContent> </PopoverContent>
</Popover> </Popover>

View File

@@ -11,7 +11,7 @@ const SettingsDisplay = () => {
const graphMinDegree = useSettingsStore.use.graphMinDegree() const graphMinDegree = useSettingsStore.use.graphMinDegree()
return ( return (
<div className="absolute bottom-2 left-[calc(2rem+2.5rem)] flex items-center gap-2 text-xs text-gray-400"> <div className="absolute bottom-4 left-[calc(1rem+2.5rem)] flex items-center gap-2 text-xs text-gray-400">
<div>{t('graphPanel.sideBar.settings.depth')}: {graphQueryMaxDepth}</div> <div>{t('graphPanel.sideBar.settings.depth')}: {graphQueryMaxDepth}</div>
<div>{t('graphPanel.sideBar.settings.degree')}: {graphMinDegree}</div> <div>{t('graphPanel.sideBar.settings.degree')}: {graphMinDegree}</div>
</div> </div>

View File

@@ -1,37 +1,107 @@
import { useCamera } from '@react-sigma/core' import { useCamera, useSigma } from '@react-sigma/core'
import { useCallback } from 'react' import { useCallback } from 'react'
import Button from '@/components/ui/Button' import Button from '@/components/ui/Button'
import { ZoomInIcon, ZoomOutIcon, FullscreenIcon } from 'lucide-react' import { ZoomInIcon, ZoomOutIcon, FullscreenIcon, RotateCwIcon, RotateCcwIcon } from 'lucide-react'
import { controlButtonVariant } from '@/lib/constants' import { controlButtonVariant } from '@/lib/constants'
import { useTranslation } from "react-i18next"; import { useTranslation } from 'react-i18next';
/** /**
* Component that provides zoom controls for the graph viewer. * Component that provides zoom controls for the graph viewer.
*/ */
const ZoomControl = () => { const ZoomControl = () => {
const { zoomIn, zoomOut, reset } = useCamera({ duration: 200, factor: 1.5 }) const { zoomIn, zoomOut, reset } = useCamera({ duration: 200, factor: 1.5 })
const sigma = useSigma()
const { t } = useTranslation(); const { t } = useTranslation();
const handleZoomIn = useCallback(() => zoomIn(), [zoomIn]) const handleZoomIn = useCallback(() => zoomIn(), [zoomIn])
const handleZoomOut = useCallback(() => zoomOut(), [zoomOut]) const handleZoomOut = useCallback(() => zoomOut(), [zoomOut])
const handleResetZoom = useCallback(() => reset(), [reset]) const handleResetZoom = useCallback(() => {
if (!sigma) return
try {
// First clear any custom bounding box and refresh
sigma.setCustomBBox(null)
sigma.refresh()
// Get graph after refresh
const graph = sigma.getGraph()
// Check if graph has nodes before accessing them
if (!graph?.order || graph.nodes().length === 0) {
// Use reset() for empty graph case
reset()
return
}
sigma.getCamera().animate(
{ x: 0.5, y: 0.5, ratio: 1.1 },
{ duration: 1000 }
)
} catch (error) {
console.error('Error resetting zoom:', error)
// Use reset() as fallback on error
reset()
}
}, [sigma, reset])
const handleRotate = useCallback(() => {
if (!sigma) return
const camera = sigma.getCamera()
const currentAngle = camera.angle
const newAngle = currentAngle + Math.PI / 8
camera.animate(
{ angle: newAngle },
{ duration: 200 }
)
}, [sigma])
const handleRotateCounterClockwise = useCallback(() => {
if (!sigma) return
const camera = sigma.getCamera()
const currentAngle = camera.angle
const newAngle = currentAngle - Math.PI / 8
camera.animate(
{ angle: newAngle },
{ duration: 200 }
)
}, [sigma])
return ( return (
<> <>
<Button variant={controlButtonVariant} onClick={handleZoomIn} tooltip={t("graphPanel.sideBar.zoomControl.zoomIn")} size="icon"> <Button
<ZoomInIcon /> variant={controlButtonVariant}
onClick={handleRotateCounterClockwise}
tooltip={t('graphPanel.sideBar.zoomControl.rotateCameraCounterClockwise')}
size="icon"
>
<RotateCcwIcon />
</Button> </Button>
<Button variant={controlButtonVariant} onClick={handleZoomOut} tooltip={t("graphPanel.sideBar.zoomControl.zoomOut")} size="icon"> <Button
<ZoomOutIcon /> variant={controlButtonVariant}
onClick={handleRotate}
tooltip={t('graphPanel.sideBar.zoomControl.rotateCamera')}
size="icon"
>
<RotateCwIcon />
</Button> </Button>
<Button <Button
variant={controlButtonVariant} variant={controlButtonVariant}
onClick={handleResetZoom} onClick={handleResetZoom}
tooltip={t("graphPanel.sideBar.zoomControl.resetZoom")} tooltip={t('graphPanel.sideBar.zoomControl.resetZoom')}
size="icon" size="icon"
> >
<FullscreenIcon /> <FullscreenIcon />
</Button> </Button>
<Button variant={controlButtonVariant} onClick={handleZoomIn} tooltip={t('graphPanel.sideBar.zoomControl.zoomIn')} size="icon">
<ZoomInIcon />
</Button>
<Button variant={controlButtonVariant} onClick={handleZoomOut} tooltip={t('graphPanel.sideBar.zoomControl.zoomOut')} size="icon">
<ZoomOutIcon />
</Button>
</> </>
) )
} }

View File

@@ -11,7 +11,6 @@ const PopoverContent = React.forwardRef<
React.ComponentRef<typeof PopoverPrimitive.Content>, React.ComponentRef<typeof PopoverPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof PopoverPrimitive.Content> React.ComponentPropsWithoutRef<typeof PopoverPrimitive.Content>
>(({ className, align = 'center', sideOffset = 4, ...props }, ref) => ( >(({ className, align = 'center', sideOffset = 4, ...props }, ref) => (
<PopoverPrimitive.Portal>
<PopoverPrimitive.Content <PopoverPrimitive.Content
ref={ref} ref={ref}
align={align} align={align}
@@ -22,7 +21,6 @@ const PopoverContent = React.forwardRef<
)} )}
{...props} {...props}
/> />
</PopoverPrimitive.Portal>
)) ))
PopoverContent.displayName = PopoverPrimitive.Content.displayName PopoverContent.displayName = PopoverPrimitive.Content.displayName

View File

@@ -38,7 +38,7 @@ const TooltipContent = React.forwardRef<
side={side} side={side}
align={align} align={align}
className={cn( className={cn(
'bg-popover text-popover-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 max-h-[60vh] overflow-y-auto whitespace-pre-wrap break-words rounded-md border px-3 py-2 text-sm shadow-md', 'bg-popover text-popover-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 max-h-[60vh] overflow-y-auto whitespace-pre-wrap break-words rounded-md border px-3 py-2 text-sm shadow-md z-60',
className className
)} )}
{...props} {...props}

View File

@@ -15,16 +15,22 @@ export const TabVisibilityProvider: React.FC<TabVisibilityProviderProps> = ({ ch
// Get current tab from settings store // Get current tab from settings store
const currentTab = useSettingsStore.use.currentTab(); const currentTab = useSettingsStore.use.currentTab();
// Initialize visibility state with current tab as visible // Initialize visibility state with all tabs visible
const [visibleTabs, setVisibleTabs] = useState<Record<string, boolean>>(() => ({ const [visibleTabs, setVisibleTabs] = useState<Record<string, boolean>>(() => ({
[currentTab]: true 'documents': true,
'knowledge-graph': true,
'retrieval': true,
'api': true
})); }));
// Update visibility when current tab changes // Keep all tabs visible because we use CSS to control TAB visibility instead of React
useEffect(() => { useEffect(() => {
setVisibleTabs((prev) => ({ setVisibleTabs((prev) => ({
...prev, ...prev,
[currentTab]: true 'documents': true,
'knowledge-graph': true,
'retrieval': true,
'api': true
})); }));
}, [currentTab]); }, [currentTab]);

View File

@@ -1,6 +1,6 @@
import { useState, useEffect, useCallback, useRef } from 'react' import { useState, useEffect, useCallback } from 'react'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { useTabVisibility } from '@/contexts/useTabVisibility' import { useSettingsStore } from '@/stores/settings'
import Button from '@/components/ui/Button' import Button from '@/components/ui/Button'
import { import {
Table, Table,
@@ -27,9 +27,7 @@ export default function DocumentManager() {
const { t } = useTranslation() const { t } = useTranslation()
const health = useBackendState.use.health() const health = useBackendState.use.health()
const [docs, setDocs] = useState<DocsStatusesResponse | null>(null) const [docs, setDocs] = useState<DocsStatusesResponse | null>(null)
const { isTabVisible } = useTabVisibility() const currentTab = useSettingsStore.use.currentTab()
const isDocumentsTabVisible = isTabVisible('documents')
const initialLoadRef = useRef(false)
const fetchDocuments = useCallback(async () => { const fetchDocuments = useCallback(async () => {
try { try {
@@ -45,7 +43,6 @@ export default function DocumentManager() {
} else { } else {
setDocs(null) setDocs(null)
} }
// console.log(docs)
} else { } else {
setDocs(null) setDocs(null)
} }
@@ -54,13 +51,12 @@ export default function DocumentManager() {
} }
}, [setDocs, t]) }, [setDocs, t])
// Only fetch documents when the tab becomes visible for the first time // Fetch documents when the tab becomes visible
useEffect(() => { useEffect(() => {
if (isDocumentsTabVisible && !initialLoadRef.current) { if (currentTab === 'documents') {
fetchDocuments() fetchDocuments()
initialLoadRef.current = true
} }
}, [isDocumentsTabVisible, fetchDocuments]) }, [currentTab, fetchDocuments])
const scanDocuments = useCallback(async () => { const scanDocuments = useCallback(async () => {
try { try {
@@ -71,9 +67,9 @@ export default function DocumentManager() {
} }
}, [t]) }, [t])
// Only set up polling when the tab is visible and health is good // Set up polling when the documents tab is active and health is good
useEffect(() => { useEffect(() => {
if (!isDocumentsTabVisible || !health) { if (currentTab !== 'documents' || !health) {
return return
} }
@@ -86,7 +82,7 @@ export default function DocumentManager() {
}, 5000) }, 5000)
return () => clearInterval(interval) return () => clearInterval(interval)
}, [health, fetchDocuments, t, isDocumentsTabVisible]) }, [health, fetchDocuments, t, currentTab])
return ( return (
<Card className="!size-full !rounded-none !border-none"> <Card className="!size-full !rounded-none !border-none">

View File

@@ -1,5 +1,4 @@
import { useEffect, useState, useCallback, useMemo, useRef } from 'react' import { useEffect, useState, useCallback, useMemo, useRef } from 'react'
import { useTabVisibility } from '@/contexts/useTabVisibility'
// import { MiniMap } from '@react-sigma/minimap' // import { MiniMap } from '@react-sigma/minimap'
import { SigmaContainer, useRegisterEvents, useSigma } from '@react-sigma/core' import { SigmaContainer, useRegisterEvents, useSigma } from '@react-sigma/core'
import { Settings as SigmaSettings } from 'sigma/settings' import { Settings as SigmaSettings } from 'sigma/settings'
@@ -108,46 +107,46 @@ const GraphEvents = () => {
const GraphViewer = () => { const GraphViewer = () => {
const [sigmaSettings, setSigmaSettings] = useState(defaultSigmaSettings) const [sigmaSettings, setSigmaSettings] = useState(defaultSigmaSettings)
const sigmaRef = useRef<any>(null) const sigmaRef = useRef<any>(null)
const initAttemptedRef = useRef(false)
const selectedNode = useGraphStore.use.selectedNode() const selectedNode = useGraphStore.use.selectedNode()
const focusedNode = useGraphStore.use.focusedNode() const focusedNode = useGraphStore.use.focusedNode()
const moveToSelectedNode = useGraphStore.use.moveToSelectedNode() const moveToSelectedNode = useGraphStore.use.moveToSelectedNode()
const isFetching = useGraphStore.use.isFetching() const isFetching = useGraphStore.use.isFetching()
const shouldRender = useGraphStore.use.shouldRender() // Rendering control state
// Get tab visibility
const { isTabVisible } = useTabVisibility()
const isGraphTabVisible = isTabVisible('knowledge-graph')
const showPropertyPanel = useSettingsStore.use.showPropertyPanel() const showPropertyPanel = useSettingsStore.use.showPropertyPanel()
const showNodeSearchBar = useSettingsStore.use.showNodeSearchBar() const showNodeSearchBar = useSettingsStore.use.showNodeSearchBar()
const enableNodeDrag = useSettingsStore.use.enableNodeDrag() const enableNodeDrag = useSettingsStore.use.enableNodeDrag()
// Handle component mount/unmount and tab visibility
useEffect(() => {
// When component mounts or tab becomes visible
if (isGraphTabVisible && !shouldRender && !isFetching && !initAttemptedRef.current) {
// If tab is visible but graph is not rendering, try to enable rendering
useGraphStore.getState().setShouldRender(true)
initAttemptedRef.current = true
console.log('Graph viewer initialized')
}
// Cleanup function when component unmounts
return () => {
// Only log cleanup, don't actually clean up the WebGL context
// This allows the WebGL context to persist across tab switches
console.log('Graph viewer cleanup')
}
}, [isGraphTabVisible, shouldRender, isFetching])
// Initialize sigma settings once on component mount // Initialize sigma settings once on component mount
// All dynamic settings will be updated in GraphControl using useSetSettings // All dynamic settings will be updated in GraphControl using useSetSettings
useEffect(() => { useEffect(() => {
setSigmaSettings(defaultSigmaSettings) setSigmaSettings(defaultSigmaSettings)
console.log('Initialized sigma settings')
}, []) }, [])
// Clean up sigma instance when component unmounts
useEffect(() => {
return () => {
// TAB is mount twice in vite dev mode, this is a workaround
const sigma = useGraphStore.getState().sigmaInstance;
if (sigma) {
try {
// Destroy sigmaand clear WebGL context
sigma.kill();
useGraphStore.getState().setSigmaInstance(null);
console.log('Cleared sigma instance on Graphviewer unmount');
} catch (error) {
console.error('Error cleaning up sigma instance:', error);
}
}
};
}, []);
// Note: There was a useLayoutEffect hook here to set up the sigma instance and graph data,
// but testing showed it wasn't executing or having any effect, while the backup mechanism
// in GraphControl was sufficient. This code was removed to simplify implementation
const onSearchFocus = useCallback((value: GraphSearchOption | null) => { const onSearchFocus = useCallback((value: GraphSearchOption | null) => {
if (value === null) useGraphStore.getState().setFocusedNode(null) if (value === null) useGraphStore.getState().setFocusedNode(null)
else if (value.type === 'nodes') useGraphStore.getState().setFocusedNode(value.id) else if (value.type === 'nodes') useGraphStore.getState().setFocusedNode(value.id)
@@ -167,12 +166,9 @@ const GraphViewer = () => {
[selectedNode] [selectedNode]
) )
// Since TabsContent now forces mounting of all tabs, we need to conditionally render // Always render SigmaContainer but control its visibility with CSS
// the SigmaContainer based on visibility to avoid unnecessary rendering
return ( return (
<div className="relative h-full w-full"> <div className="relative h-full w-full overflow-hidden">
{/* Only render the SigmaContainer when the tab is visible */}
{isGraphTabVisible ? (
<SigmaContainer <SigmaContainer
settings={sigmaSettings} settings={sigmaSettings}
className="!bg-background !size-full overflow-hidden" className="!bg-background !size-full overflow-hidden"
@@ -196,10 +192,10 @@ const GraphViewer = () => {
</div> </div>
<div className="bg-background/60 absolute bottom-2 left-2 flex flex-col rounded-xl border-2 backdrop-blur-lg"> <div className="bg-background/60 absolute bottom-2 left-2 flex flex-col rounded-xl border-2 backdrop-blur-lg">
<Settings />
<ZoomControl />
<LayoutsControl /> <LayoutsControl />
<ZoomControl />
<FullScreenControl /> <FullScreenControl />
<Settings />
{/* <ThemeToggle /> */} {/* <ThemeToggle /> */}
</div> </div>
@@ -215,14 +211,6 @@ const GraphViewer = () => {
<SettingsDisplay /> <SettingsDisplay />
</SigmaContainer> </SigmaContainer>
) : (
// Placeholder when tab is not visible
<div className="flex h-full w-full items-center justify-center">
<div className="text-center text-muted-foreground">
{/* Placeholder content */}
</div>
</div>
)}
{/* Loading overlay - shown when data is loading */} {/* Loading overlay - shown when data is loading */}
{isFetching && ( {isFetching && (

View File

@@ -0,0 +1,177 @@
import { useState, useEffect } from 'react'
import { useNavigate } from 'react-router-dom'
import { useAuthStore } from '@/stores/state'
import { loginToServer, getAuthStatus } from '@/api/lightrag'
import { toast } from 'sonner'
import { useTranslation } from 'react-i18next'
import { Card, CardContent, CardHeader } from '@/components/ui/Card'
import Input from '@/components/ui/Input'
import Button from '@/components/ui/Button'
import { ZapIcon } from 'lucide-react'
import AppSettings from '@/components/AppSettings'
const LoginPage = () => {
const navigate = useNavigate()
const { login, isAuthenticated } = useAuthStore()
const { t } = useTranslation()
const [loading, setLoading] = useState(false)
const [username, setUsername] = useState('')
const [password, setPassword] = useState('')
const [checkingAuth, setCheckingAuth] = useState(true)
useEffect(() => {
console.log('LoginPage mounted')
}, []);
// Check if authentication is configured, skip login if not
useEffect(() => {
let isMounted = true; // Flag to prevent state updates after unmount
const checkAuthConfig = async () => {
try {
// If already authenticated, redirect to home
if (isAuthenticated) {
navigate('/')
return
}
// Check auth status
const status = await getAuthStatus()
// Only proceed if component is still mounted
if (!isMounted) return;
if (!status.auth_configured && status.access_token) {
// If auth is not configured, use the guest token and redirect
login(status.access_token, true)
if (status.message) {
toast.info(status.message)
}
navigate('/')
return // Exit early, no need to set checkingAuth to false
}
} catch (error) {
console.error('Failed to check auth configuration:', error)
} finally {
// Only update state if component is still mounted
if (isMounted) {
setCheckingAuth(false)
}
}
}
// Execute immediately
checkAuthConfig()
// Cleanup function to prevent state updates after unmount
return () => {
isMounted = false;
}
}, [isAuthenticated, login, navigate])
// Don't render anything while checking auth
if (checkingAuth) {
return null
}
const handleSubmit = async (e: React.FormEvent<HTMLFormElement>) => {
e.preventDefault()
if (!username || !password) {
toast.error(t('login.errorEmptyFields'))
return
}
try {
setLoading(true)
const response = await loginToServer(username, password)
// Check authentication mode
const isGuestMode = response.auth_mode === 'disabled'
login(response.access_token, isGuestMode)
if (isGuestMode) {
// Show authentication disabled notification
toast.info(response.message || t('login.authDisabled', 'Authentication is disabled. Using guest access.'))
} else {
toast.success(t('login.successMessage'))
}
// Navigate to home page after successful login
navigate('/')
} catch (error) {
console.error('Login failed...', error)
toast.error(t('login.errorInvalidCredentials'))
// Clear any existing auth state
useAuthStore.getState().logout()
// Clear local storage
localStorage.removeItem('LIGHTRAG-API-TOKEN')
} finally {
setLoading(false)
}
}
return (
<div className="flex h-screen w-screen items-center justify-center bg-gradient-to-br from-emerald-50 to-teal-100 dark:from-gray-900 dark:to-gray-800">
<div className="absolute top-4 right-4 flex items-center gap-2">
<AppSettings className="bg-white/30 dark:bg-gray-800/30 backdrop-blur-sm rounded-md" />
</div>
<Card className="w-full max-w-[480px] shadow-lg mx-4">
<CardHeader className="flex items-center justify-center space-y-2 pb-8 pt-6">
<div className="flex flex-col items-center space-y-4">
<div className="flex items-center gap-3">
<img src="logo.png" alt="LightRAG Logo" className="h-12 w-12" />
<ZapIcon className="size-10 text-emerald-400" aria-hidden="true" />
</div>
<div className="text-center space-y-2">
<h1 className="text-3xl font-bold tracking-tight">LightRAG</h1>
<p className="text-muted-foreground text-sm">
{t('login.description')}
</p>
</div>
</div>
</CardHeader>
<CardContent className="px-8 pb-8">
<form onSubmit={handleSubmit} className="space-y-6">
<div className="flex items-center gap-4">
<label htmlFor="username" className="text-sm font-medium w-16 shrink-0">
{t('login.username')}
</label>
<Input
id="username"
placeholder={t('login.usernamePlaceholder')}
value={username}
onChange={(e) => setUsername(e.target.value)}
required
className="h-11 flex-1"
/>
</div>
<div className="flex items-center gap-4">
<label htmlFor="password" className="text-sm font-medium w-16 shrink-0">
{t('login.password')}
</label>
<Input
id="password"
type="password"
placeholder={t('login.passwordPlaceholder')}
value={password}
onChange={(e) => setPassword(e.target.value)}
required
className="h-11 flex-1"
/>
</div>
<Button
type="submit"
className="w-full h-11 text-base font-medium mt-2"
disabled={loading}
>
{loading ? t('login.loggingIn') : t('login.loginButton')}
</Button>
</form>
</CardContent>
</Card>
</div>
)
}
export default LoginPage

View File

@@ -112,7 +112,7 @@ export default function RetrievalTesting() {
}, [setMessages]) }, [setMessages])
return ( return (
<div className="flex size-full gap-2 px-2 pb-12"> <div className="flex size-full gap-2 px-2 pb-12 overflow-hidden">
<div className="flex grow flex-col gap-4"> <div className="flex grow flex-col gap-4">
<div className="relative grow"> <div className="relative grow">
<div className="bg-primary-foreground/60 absolute inset-0 flex flex-col overflow-auto rounded-lg border p-2"> <div className="bg-primary-foreground/60 absolute inset-0 flex flex-col overflow-auto rounded-lg border p-2">

View File

@@ -1,12 +1,13 @@
import Button from '@/components/ui/Button' import Button from '@/components/ui/Button'
import { SiteInfo } from '@/lib/constants' import { SiteInfo, webuiPrefix } from '@/lib/constants'
import AppSettings from '@/components/AppSettings' import AppSettings from '@/components/AppSettings'
import { TabsList, TabsTrigger } from '@/components/ui/Tabs' import { TabsList, TabsTrigger } from '@/components/ui/Tabs'
import { useSettingsStore } from '@/stores/settings' import { useSettingsStore } from '@/stores/settings'
import { useAuthStore } from '@/stores/state'
import { cn } from '@/lib/utils' import { cn } from '@/lib/utils'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import { navigationService } from '@/services/navigation'
import { ZapIcon, GithubIcon } from 'lucide-react' import { ZapIcon, GithubIcon, LogOutIcon } from 'lucide-react'
interface NavigationTabProps { interface NavigationTabProps {
value: string value: string
@@ -54,9 +55,15 @@ function TabsNavigation() {
export default function SiteHeader() { export default function SiteHeader() {
const { t } = useTranslation() const { t } = useTranslation()
const { isGuestMode } = useAuthStore()
const handleLogout = () => {
navigationService.navigateToLogin();
}
return ( return (
<header className="border-border/40 bg-background/95 supports-[backdrop-filter]:bg-background/60 sticky top-0 z-50 flex h-10 w-full border-b px-4 backdrop-blur"> <header className="border-border/40 bg-background/95 supports-[backdrop-filter]:bg-background/60 sticky top-0 z-50 flex h-10 w-full border-b px-4 backdrop-blur">
<a href="/" className="mr-6 flex items-center gap-2"> <a href={webuiPrefix} className="mr-6 flex items-center gap-2">
<ZapIcon className="size-4 text-emerald-400" aria-hidden="true" /> <ZapIcon className="size-4 text-emerald-400" aria-hidden="true" />
{/* <img src='/logo.png' className="size-4" /> */} {/* <img src='/logo.png' className="size-4" /> */}
<span className="font-bold md:inline-block">{SiteInfo.name}</span> <span className="font-bold md:inline-block">{SiteInfo.name}</span>
@@ -64,6 +71,11 @@ export default function SiteHeader() {
<div className="flex h-10 flex-1 justify-center"> <div className="flex h-10 flex-1 justify-center">
<TabsNavigation /> <TabsNavigation />
{isGuestMode && (
<div className="ml-2 self-center px-2 py-1 text-xs bg-amber-100 text-amber-800 dark:bg-amber-900 dark:text-amber-200 rounded-md">
{t('login.guestMode', 'Guest Mode')}
</div>
)}
</div> </div>
<nav className="flex items-center"> <nav className="flex items-center">
@@ -74,6 +86,9 @@ export default function SiteHeader() {
</a> </a>
</Button> </Button>
<AppSettings /> <AppSettings />
<Button variant="ghost" size="icon" side="bottom" tooltip={t('header.logout')} onClick={handleLogout}>
<LogOutIcon className="size-4" aria-hidden="true" />
</Button>
</div> </div>
</nav> </nav>
</header> </header>

View File

@@ -1,12 +1,13 @@
import Graph, { DirectedGraph } from 'graphology' import Graph, { DirectedGraph } from 'graphology'
import { useCallback, useEffect, useRef } from 'react' import { useCallback, useEffect, useRef } from 'react'
import { useTranslation } from 'react-i18next'
import { randomColor, errorMessage } from '@/lib/utils' import { randomColor, errorMessage } from '@/lib/utils'
import * as Constants from '@/lib/constants' import * as Constants from '@/lib/constants'
import { useGraphStore, RawGraph } from '@/stores/graph' import { useGraphStore, RawGraph, RawNodeType, RawEdgeType } from '@/stores/graph'
import { toast } from 'sonner'
import { queryGraphs } from '@/api/lightrag' import { queryGraphs } from '@/api/lightrag'
import { useBackendState } from '@/stores/state' import { useBackendState } from '@/stores/state'
import { useSettingsStore } from '@/stores/settings' import { useSettingsStore } from '@/stores/settings'
import { useTabVisibility } from '@/contexts/useTabVisibility'
import seedrandom from 'seedrandom' import seedrandom from 'seedrandom'
@@ -139,7 +140,13 @@ const fetchGraph = async (label: string, maxDepth: number, minDegree: number) =>
// Create a new graph instance with the raw graph data // Create a new graph instance with the raw graph data
const createSigmaGraph = (rawGraph: RawGraph | null) => { const createSigmaGraph = (rawGraph: RawGraph | null) => {
// Always create a new graph instance // Skip graph creation if no data or empty nodes
if (!rawGraph || !rawGraph.nodes.length) {
console.log('No graph data available, skipping sigma graph creation');
return null;
}
// Create new graph instance
const graph = new DirectedGraph() const graph = new DirectedGraph()
// Add nodes from raw graph data // Add nodes from raw graph data
@@ -172,30 +179,20 @@ const createSigmaGraph = (rawGraph: RawGraph | null) => {
} }
const useLightrangeGraph = () => { const useLightrangeGraph = () => {
const { t } = useTranslation()
const queryLabel = useSettingsStore.use.queryLabel() const queryLabel = useSettingsStore.use.queryLabel()
const rawGraph = useGraphStore.use.rawGraph() const rawGraph = useGraphStore.use.rawGraph()
const sigmaGraph = useGraphStore.use.sigmaGraph() const sigmaGraph = useGraphStore.use.sigmaGraph()
const maxQueryDepth = useSettingsStore.use.graphQueryMaxDepth() const maxQueryDepth = useSettingsStore.use.graphQueryMaxDepth()
const minDegree = useSettingsStore.use.graphMinDegree() const minDegree = useSettingsStore.use.graphMinDegree()
const isFetching = useGraphStore.use.isFetching() const isFetching = useGraphStore.use.isFetching()
const nodeToExpand = useGraphStore.use.nodeToExpand()
// Get tab visibility const nodeToPrune = useGraphStore.use.nodeToPrune()
const { isTabVisible } = useTabVisibility()
const isGraphTabVisible = isTabVisible('knowledge-graph')
// Track previous parameters to detect actual changes
const prevParamsRef = useRef({ queryLabel, maxQueryDepth, minDegree })
// Use ref to track if data has been loaded and initial load // Use ref to track if data has been loaded and initial load
const dataLoadedRef = useRef(false) const dataLoadedRef = useRef(false)
const initialLoadRef = useRef(false) const initialLoadRef = useRef(false)
// Check if parameters have changed
const paramsChanged =
prevParamsRef.current.queryLabel !== queryLabel ||
prevParamsRef.current.maxQueryDepth !== maxQueryDepth ||
prevParamsRef.current.minDegree !== minDegree
const getNode = useCallback( const getNode = useCallback(
(nodeId: string) => { (nodeId: string) => {
return rawGraph?.getNode(nodeId) || null return rawGraph?.getNode(nodeId) || null
@@ -213,43 +210,33 @@ const useLightrangeGraph = () => {
// Track if a fetch is in progress to prevent multiple simultaneous fetches // Track if a fetch is in progress to prevent multiple simultaneous fetches
const fetchInProgressRef = useRef(false) const fetchInProgressRef = useRef(false)
// Data fetching logic - simplified but preserving TAB visibility check // Reset graph when query label is cleared
useEffect(() => { useEffect(() => {
// Skip if fetch is already in progress if (!queryLabel && (rawGraph !== null || sigmaGraph !== null)) {
if (fetchInProgressRef.current) {
return
}
// If there's no query label, reset the graph
if (!queryLabel) {
if (rawGraph !== null || sigmaGraph !== null) {
const state = useGraphStore.getState() const state = useGraphStore.getState()
state.reset() state.reset()
state.setGraphDataFetchAttempted(false) state.setGraphDataFetchAttempted(false)
state.setLabelsFetchAttempted(false) state.setLabelsFetchAttempted(false)
}
dataLoadedRef.current = false dataLoadedRef.current = false
initialLoadRef.current = false initialLoadRef.current = false
}
}, [queryLabel, rawGraph, sigmaGraph])
// Data fetching logic
useEffect(() => {
// Skip if fetch is already in progress or no query label
if (fetchInProgressRef.current || !queryLabel) {
return return
} }
// Check if parameters have changed // Only fetch data when graphDataFetchAttempted is false (avoids re-fetching on vite dev mode)
if (!isFetching && !fetchInProgressRef.current && if (!isFetching && !useGraphStore.getState().graphDataFetchAttempted) {
(paramsChanged || !useGraphStore.getState().graphDataFetchAttempted)) {
// Only fetch data if the Graph tab is visible
if (!isGraphTabVisible) {
console.log('Graph tab not visible, skipping data fetch');
return;
}
// Set flags // Set flags
fetchInProgressRef.current = true fetchInProgressRef.current = true
useGraphStore.getState().setGraphDataFetchAttempted(true) useGraphStore.getState().setGraphDataFetchAttempted(true)
const state = useGraphStore.getState() const state = useGraphStore.getState()
state.setIsFetching(true) state.setIsFetching(true)
state.setShouldRender(false) // Disable rendering during data loading
// Clear selection and highlighted nodes before fetching new graph // Clear selection and highlighted nodes before fetching new graph
state.clearSelection() state.clearSelection()
@@ -259,9 +246,6 @@ const useLightrangeGraph = () => {
}) })
} }
// Update parameter reference
prevParamsRef.current = { queryLabel, maxQueryDepth, minDegree }
console.log('Fetching graph data...') console.log('Fetching graph data...')
// Use a local copy of the parameters // Use a local copy of the parameters
@@ -284,8 +268,6 @@ const useLightrangeGraph = () => {
state.setSigmaGraph(newSigmaGraph) state.setSigmaGraph(newSigmaGraph)
state.setRawGraph(data) state.setRawGraph(data)
// No longer need to extract labels from graph data
// Update flags // Update flags
dataLoadedRef.current = true dataLoadedRef.current = true
initialLoadRef.current = true initialLoadRef.current = true
@@ -294,8 +276,6 @@ const useLightrangeGraph = () => {
// Reset camera view // Reset camera view
state.setMoveToSelectedNode(true) state.setMoveToSelectedNode(true)
// Enable rendering if the tab is visible
state.setShouldRender(isGraphTabVisible)
state.setIsFetching(false) state.setIsFetching(false)
}).catch((error) => { }).catch((error) => {
console.error('Error fetching graph data:', error) console.error('Error fetching graph data:', error)
@@ -303,29 +283,425 @@ const useLightrangeGraph = () => {
// Reset state on error // Reset state on error
const state = useGraphStore.getState() const state = useGraphStore.getState()
state.setIsFetching(false) state.setIsFetching(false)
state.setShouldRender(isGraphTabVisible)
dataLoadedRef.current = false dataLoadedRef.current = false
fetchInProgressRef.current = false fetchInProgressRef.current = false
state.setGraphDataFetchAttempted(false) state.setGraphDataFetchAttempted(false)
}) })
} }
}, [queryLabel, maxQueryDepth, minDegree, isFetching, paramsChanged, isGraphTabVisible, rawGraph, sigmaGraph]) }, [queryLabel, maxQueryDepth, minDegree, isFetching])
// Update rendering state and handle tab visibility changes // Handle node expansion
useEffect(() => { useEffect(() => {
// When tab becomes visible const handleNodeExpand = async (nodeId: string | null) => {
if (isGraphTabVisible) { if (!nodeId || !sigmaGraph || !rawGraph) return;
// If we have data, enable rendering
if (rawGraph) { try {
useGraphStore.getState().setShouldRender(true) // Get the node to expand
const nodeToExpand = rawGraph.getNode(nodeId);
if (!nodeToExpand) {
console.error('Node not found:', nodeId);
return;
} }
// We no longer reset the fetch attempted flag here to prevent continuous API calls // Get the label of the node to expand
} else { const label = nodeToExpand.labels[0];
// When tab becomes invisible, disable rendering if (!label) {
useGraphStore.getState().setShouldRender(false) console.error('Node has no label:', nodeId);
return;
} }
}, [isGraphTabVisible, rawGraph])
// Fetch the extended subgraph with depth 2
const extendedGraph = await queryGraphs(label, 2, 0);
if (!extendedGraph || !extendedGraph.nodes || !extendedGraph.edges) {
console.error('Failed to fetch extended graph');
return;
}
// Process nodes to add required properties for RawNodeType
const processedNodes: RawNodeType[] = [];
for (const node of extendedGraph.nodes) {
// Generate random color values
seedrandom(node.id, { global: true });
const color = randomColor();
// Create a properly typed RawNodeType
processedNodes.push({
id: node.id,
labels: node.labels,
properties: node.properties,
size: 10, // Default size, will be calculated later
x: Math.random(), // Random position, will be adjusted later
y: Math.random(), // Random position, will be adjusted later
color: color, // Random color
degree: 0 // Initial degree, will be calculated later
});
}
// Process edges to add required properties for RawEdgeType
const processedEdges: RawEdgeType[] = [];
for (const edge of extendedGraph.edges) {
// Create a properly typed RawEdgeType
processedEdges.push({
id: edge.id,
source: edge.source,
target: edge.target,
type: edge.type,
properties: edge.properties,
dynamicId: '' // Will be set when adding to sigma graph
});
}
// Store current node positions
const nodePositions: Record<string, {x: number, y: number}> = {};
sigmaGraph.forEachNode((node) => {
nodePositions[node] = {
x: sigmaGraph.getNodeAttribute(node, 'x'),
y: sigmaGraph.getNodeAttribute(node, 'y')
};
});
// Get existing node IDs
const existingNodeIds = new Set(sigmaGraph.nodes());
// Identify nodes and edges to keep
const nodesToAdd = new Set<string>();
const edgesToAdd = new Set<string>();
// Get degree range from existing graph for size calculations
const minDegree = 1;
let maxDegree = 0;
sigmaGraph.forEachNode(node => {
const degree = sigmaGraph.degree(node);
maxDegree = Math.max(maxDegree, degree);
});
// Calculate size formula parameters
const range = maxDegree - minDegree || 1; // Avoid division by zero
const scale = Constants.maxNodeSize - Constants.minNodeSize;
// First identify connectable nodes (nodes connected to the expanded node)
for (const node of processedNodes) {
// Skip if node already exists
if (existingNodeIds.has(node.id)) {
continue;
}
// Check if this node is connected to the selected node
const isConnected = processedEdges.some(
edge => (edge.source === nodeId && edge.target === node.id) ||
(edge.target === nodeId && edge.source === node.id)
);
if (isConnected) {
nodesToAdd.add(node.id);
}
}
// Calculate node degrees and track discarded edges in one pass
const nodeDegrees = new Map<string, number>();
const nodesWithDiscardedEdges = new Set<string>();
for (const edge of processedEdges) {
const sourceExists = existingNodeIds.has(edge.source) || nodesToAdd.has(edge.source);
const targetExists = existingNodeIds.has(edge.target) || nodesToAdd.has(edge.target);
if (sourceExists && targetExists) {
edgesToAdd.add(edge.id);
// Add degrees for valid edges
if (nodesToAdd.has(edge.source)) {
nodeDegrees.set(edge.source, (nodeDegrees.get(edge.source) || 0) + 1);
}
if (nodesToAdd.has(edge.target)) {
nodeDegrees.set(edge.target, (nodeDegrees.get(edge.target) || 0) + 1);
}
} else {
// Track discarded edges for both new and existing nodes
if (sigmaGraph.hasNode(edge.source)) {
nodesWithDiscardedEdges.add(edge.source);
} else if (nodesToAdd.has(edge.source)) {
nodesWithDiscardedEdges.add(edge.source);
nodeDegrees.set(edge.source, (nodeDegrees.get(edge.source) || 0) + 1); // +1 for discarded edge
}
if (sigmaGraph.hasNode(edge.target)) {
nodesWithDiscardedEdges.add(edge.target);
} else if (nodesToAdd.has(edge.target)) {
nodesWithDiscardedEdges.add(edge.target);
nodeDegrees.set(edge.target, (nodeDegrees.get(edge.target) || 0) + 1); // +1 for discarded edge
}
}
}
// Helper function to update node sizes
const updateNodeSizes = (
sigmaGraph: DirectedGraph,
nodesWithDiscardedEdges: Set<string>,
minDegree: number,
range: number,
scale: number
) => {
for (const nodeId of nodesWithDiscardedEdges) {
if (sigmaGraph.hasNode(nodeId)) {
let newDegree = sigmaGraph.degree(nodeId);
newDegree += 1; // Add +1 for discarded edges
const newSize = Math.round(
Constants.minNodeSize + scale * Math.pow((newDegree - minDegree) / range, 0.5)
);
const currentSize = sigmaGraph.getNodeAttribute(nodeId, 'size');
if (newSize > currentSize) {
sigmaGraph.setNodeAttribute(nodeId, 'size', newSize);
}
}
}
};
// If no new connectable nodes found, show toast and return
if (nodesToAdd.size === 0) {
updateNodeSizes(sigmaGraph, nodesWithDiscardedEdges, minDegree, range, scale);
toast.info(t('graphPanel.propertiesView.node.noNewNodes'));
return;
}
// Update maxDegree with new node degrees
for (const [, degree] of nodeDegrees.entries()) {
maxDegree = Math.max(maxDegree, degree);
}
// SAdd nodes and edges to the graph
// Calculate camera ratio and spread factor once before the loop
const cameraRatio = useGraphStore.getState().sigmaInstance?.getCamera().ratio || 1;
const spreadFactor = Math.max(
Math.sqrt(nodeToExpand.size) * 4, // Base on node size
Math.sqrt(nodesToAdd.size) * 3 // Scale with number of nodes
) / cameraRatio; // Adjust for zoom level
seedrandom(Date.now().toString(), { global: true });
const randomAngle = Math.random() * 2 * Math.PI
console.log('nodeSize:', nodeToExpand.size, 'nodesToAdd:', nodesToAdd.size);
console.log('cameraRatio:', Math.round(cameraRatio*100)/100, 'spreadFactor:', Math.round(spreadFactor*100)/100);
// Add new nodes
for (const nodeId of nodesToAdd) {
const newNode = processedNodes.find(n => n.id === nodeId)!;
const nodeDegree = nodeDegrees.get(nodeId) || 0;
// Calculate node size
const nodeSize = Math.round(
Constants.minNodeSize + scale * Math.pow((nodeDegree - minDegree) / range, 0.5)
);
// Calculate angle for polar coordinates
const angle = 2 * Math.PI * (Array.from(nodesToAdd).indexOf(nodeId) / nodesToAdd.size);
// Calculate final position
const x = nodePositions[nodeId]?.x ||
(nodePositions[nodeToExpand.id].x + Math.cos(randomAngle + angle) * spreadFactor);
const y = nodePositions[nodeId]?.y ||
(nodePositions[nodeToExpand.id].y + Math.sin(randomAngle + angle) * spreadFactor);
// Add the new node to the sigma graph with calculated position
sigmaGraph.addNode(nodeId, {
label: newNode.labels.join(', '),
color: newNode.color,
x: x,
y: y,
size: nodeSize,
borderColor: Constants.nodeBorderColor,
borderSize: 0.2
});
// Add the node to the raw graph
if (!rawGraph.getNode(nodeId)) {
// Update node properties
newNode.size = nodeSize;
newNode.x = x;
newNode.y = y;
newNode.degree = nodeDegree;
// Add to nodes array
rawGraph.nodes.push(newNode);
// Update nodeIdMap
rawGraph.nodeIdMap[nodeId] = rawGraph.nodes.length - 1;
}
}
// Add new edges
for (const edgeId of edgesToAdd) {
const newEdge = processedEdges.find(e => e.id === edgeId)!;
// Skip if edge already exists
if (sigmaGraph.hasEdge(newEdge.source, newEdge.target)) {
continue;
}
if (sigmaGraph.hasEdge(newEdge.target, newEdge.source)) {
continue;
}
// Add the edge to the sigma graph
newEdge.dynamicId = sigmaGraph.addDirectedEdge(newEdge.source, newEdge.target, {
label: newEdge.type || undefined
});
// Add the edge to the raw graph
if (!rawGraph.getEdge(newEdge.id, false)) {
// Add to edges array
rawGraph.edges.push(newEdge);
// Update edgeIdMap
rawGraph.edgeIdMap[newEdge.id] = rawGraph.edges.length - 1;
// Update dynamic edge map
rawGraph.edgeDynamicIdMap[newEdge.dynamicId] = rawGraph.edges.length - 1;
} else {
console.error('Edge already exists in rawGraph:', newEdge.id);
}
}
// Update the dynamic edge map and invalidate search cache
rawGraph.buildDynamicMap();
// Reset search engine to force rebuild
useGraphStore.getState().resetSearchEngine();
// Update sizes for all nodes with discarded edges
updateNodeSizes(sigmaGraph, nodesWithDiscardedEdges, minDegree, range, scale);
} catch (error) {
console.error('Error expanding node:', error);
}
};
// If there's a node to expand, handle it
if (nodeToExpand) {
handleNodeExpand(nodeToExpand);
// Reset the nodeToExpand state after handling
window.setTimeout(() => {
useGraphStore.getState().triggerNodeExpand(null);
}, 0);
}
}, [nodeToExpand, sigmaGraph, rawGraph, t]);
// Helper function to get all nodes that will be deleted
const getNodesThatWillBeDeleted = useCallback((nodeId: string, graph: DirectedGraph) => {
const nodesToDelete = new Set<string>([nodeId]);
// Find all nodes that would become isolated after deletion
graph.forEachNode((node) => {
if (node === nodeId) return; // Skip the node being deleted
// Get all neighbors of this node
const neighbors = graph.neighbors(node);
// If this node has only one neighbor and that neighbor is the node being deleted,
// this node will become isolated, so we should delete it too
if (neighbors.length === 1 && neighbors[0] === nodeId) {
nodesToDelete.add(node);
}
});
return nodesToDelete;
}, []);
// Handle node pruning
useEffect(() => {
const handleNodePrune = (nodeId: string | null) => {
if (!nodeId || !sigmaGraph || !rawGraph) return;
try {
const state = useGraphStore.getState();
// 1. 检查节点是否存在
if (!sigmaGraph.hasNode(nodeId)) {
console.error('Node not found:', nodeId);
return;
}
// 2. 获取要删除的节点
const nodesToDelete = getNodesThatWillBeDeleted(nodeId, sigmaGraph);
// 3. 检查是否会删除所有节点
if (nodesToDelete.size === sigmaGraph.nodes().length) {
toast.error(t('graphPanel.propertiesView.node.deleteAllNodesError'));
return;
}
// 4. 清除选中状态 - 这会导致PropertiesView立即关闭
state.clearSelection();
// 5. 删除节点和相关边
for (const nodeToDelete of nodesToDelete) {
// Remove the node from the sigma graph (this will also remove connected edges)
sigmaGraph.dropNode(nodeToDelete);
// Remove the node from the raw graph
const nodeIndex = rawGraph.nodeIdMap[nodeToDelete];
if (nodeIndex !== undefined) {
// Find all edges connected to this node
const edgesToRemove = rawGraph.edges.filter(
edge => edge.source === nodeToDelete || edge.target === nodeToDelete
);
// Remove edges from raw graph
for (const edge of edgesToRemove) {
const edgeIndex = rawGraph.edgeIdMap[edge.id];
if (edgeIndex !== undefined) {
// Remove from edges array
rawGraph.edges.splice(edgeIndex, 1);
// Update edgeIdMap for all edges after this one
for (const [id, idx] of Object.entries(rawGraph.edgeIdMap)) {
if (idx > edgeIndex) {
rawGraph.edgeIdMap[id] = idx - 1;
}
}
// Remove from edgeIdMap
delete rawGraph.edgeIdMap[edge.id];
// Remove from edgeDynamicIdMap
delete rawGraph.edgeDynamicIdMap[edge.dynamicId];
}
}
// Remove node from nodes array
rawGraph.nodes.splice(nodeIndex, 1);
// Update nodeIdMap for all nodes after this one
for (const [id, idx] of Object.entries(rawGraph.nodeIdMap)) {
if (idx > nodeIndex) {
rawGraph.nodeIdMap[id] = idx - 1;
}
}
// Remove from nodeIdMap
delete rawGraph.nodeIdMap[nodeToDelete];
}
}
// Rebuild the dynamic edge map and invalidate search cache
rawGraph.buildDynamicMap();
// Reset search engine to force rebuild
useGraphStore.getState().resetSearchEngine();
// Show notification if we deleted more than just the selected node
if (nodesToDelete.size > 1) {
toast.info(t('graphPanel.propertiesView.node.nodesRemoved', { count: nodesToDelete.size }));
}
} catch (error) {
console.error('Error pruning node:', error);
}
};
// If there's a node to prune, handle it
if (nodeToPrune) {
handleNodePrune(nodeToPrune);
// Reset the nodeToPrune state after handling
window.setTimeout(() => {
useGraphStore.getState().triggerNodePrune(null);
}, 0);
}
}, [nodeToPrune, sigmaGraph, rawGraph, getNodesThatWillBeDeleted, t]);
const lightrageGraph = useCallback(() => { const lightrageGraph = useCallback(() => {
// If we already have a graph instance, return it // If we already have a graph instance, return it

View File

@@ -0,0 +1,35 @@
import i18n from "i18next";
import { initReactI18next } from "react-i18next";
import { useSettingsStore } from "./stores/settings";
import en from "./locales/en.json";
import zh from "./locales/zh.json";
const getStoredLanguage = () => {
try {
const settingsString = localStorage.getItem('settings-storage');
if (settingsString) {
const settings = JSON.parse(settingsString);
return settings.state?.language || 'en';
}
} catch (e) {
console.error('Failed to get stored language:', e);
}
return 'en';
};
i18n
.use(initReactI18next)
.init({
resources: {
en: { translation: en },
zh: { translation: zh }
},
lng: getStoredLanguage(), // 使用存储的语言设置
fallbackLng: "en",
interpolation: {
escapeValue: false
}
});
export default i18n;

View File

@@ -1,6 +1,7 @@
import { ButtonVariantType } from '@/components/ui/Button' import { ButtonVariantType } from '@/components/ui/Button'
export const backendBaseUrl = '' export const backendBaseUrl = ''
export const webuiPrefix = '/webui/'
export const controlButtonVariant: ButtonVariantType = 'ghost' export const controlButtonVariant: ButtonVariantType = 'ghost'

View File

@@ -12,11 +12,26 @@
"retrieval": "Retrieval", "retrieval": "Retrieval",
"api": "API", "api": "API",
"projectRepository": "Project Repository", "projectRepository": "Project Repository",
"logout": "Logout",
"themeToggle": { "themeToggle": {
"switchToLight": "Switch to light theme", "switchToLight": "Switch to light theme",
"switchToDark": "Switch to dark theme" "switchToDark": "Switch to dark theme"
} }
}, },
"login": {
"description": "Please enter your account and password to log in to the system",
"username": "Username",
"usernamePlaceholder": "Please input a username",
"password": "Password",
"passwordPlaceholder": "Please input a password",
"loginButton": "Login",
"loggingIn": "Logging in...",
"successMessage": "Login succeeded",
"errorEmptyFields": "Please enter your username and password",
"errorInvalidCredentials": "Login failed, please check username and password",
"authDisabled": "Authentication is disabled. Using login free mode.",
"guestMode": "Login Free"
},
"documentPanel": { "documentPanel": {
"clearDocuments": { "clearDocuments": {
"button": "Clear", "button": "Clear",
@@ -97,12 +112,14 @@
"zoomControl": { "zoomControl": {
"zoomIn": "Zoom In", "zoomIn": "Zoom In",
"zoomOut": "Zoom Out", "zoomOut": "Zoom Out",
"resetZoom": "Reset Zoom" "resetZoom": "Reset Zoom",
"rotateCamera": "Clockwise Rotate",
"rotateCameraCounterClockwise": "Counter-Clockwise Rotate"
}, },
"layoutsControl": { "layoutsControl": {
"startAnimation": "Start the layout animation", "startAnimation": "Continue layout animation",
"stopAnimation": "Stop the layout animation", "stopAnimation": "Stop layout animation",
"layoutGraph": "Layout Graph", "layoutGraph": "Layout Graph",
"layouts": { "layouts": {
"Circular": "Circular", "Circular": "Circular",
@@ -151,6 +168,11 @@
"degree": "Degree", "degree": "Degree",
"properties": "Properties", "properties": "Properties",
"relationships": "Relationships", "relationships": "Relationships",
"expandNode": "Expand Node",
"pruneNode": "Prune Node",
"deleteAllNodesError": "Refuse to delete all nodes in the graph",
"nodesRemoved": "{{count}} nodes removed, including orphan nodes",
"noNewNodes": "No expandable nodes found",
"propertyNames": { "propertyNames": {
"description": "Description", "description": "Description",
"entity_id": "Name", "entity_id": "Name",
@@ -177,7 +199,8 @@
"noLabels": "No labels found", "noLabels": "No labels found",
"label": "Label", "label": "Label",
"placeholder": "Search labels...", "placeholder": "Search labels...",
"andOthers": "And {count} others" "andOthers": "And {count} others",
"refreshTooltip": "Reload graph data"
} }
}, },
"retrievePanel": { "retrievePanel": {

View File

@@ -12,11 +12,26 @@
"retrieval": "检索", "retrieval": "检索",
"api": "API", "api": "API",
"projectRepository": "项目仓库", "projectRepository": "项目仓库",
"logout": "退出登录",
"themeToggle": { "themeToggle": {
"switchToLight": "切换到浅色主题", "switchToLight": "切换到浅色主题",
"switchToDark": "切换到深色主题" "switchToDark": "切换到深色主题"
} }
}, },
"login": {
"description": "请输入您的账号和密码登录系统",
"username": "用户名",
"usernamePlaceholder": "请输入用户名",
"password": "密码",
"passwordPlaceholder": "请输入密码",
"loginButton": "登录",
"loggingIn": "登录中...",
"successMessage": "登录成功",
"errorEmptyFields": "请输入您的用户名和密码",
"errorInvalidCredentials": "登录失败,请检查用户名和密码",
"authDisabled": "认证已禁用,使用无需登陆模式。",
"guestMode": "无需登陆"
},
"documentPanel": { "documentPanel": {
"clearDocuments": { "clearDocuments": {
"button": "清空", "button": "清空",
@@ -84,7 +99,7 @@
"hideUnselectedEdges": "隐藏未选中的边", "hideUnselectedEdges": "隐藏未选中的边",
"edgeEvents": "边事件", "edgeEvents": "边事件",
"maxQueryDepth": "最大查询深度", "maxQueryDepth": "最大查询深度",
"minDegree": "最小数", "minDegree": "最小邻边数",
"maxLayoutIterations": "最大布局迭代次数", "maxLayoutIterations": "最大布局迭代次数",
"depth": "深度", "depth": "深度",
"degree": "邻边", "degree": "邻边",
@@ -96,10 +111,12 @@
"zoomControl": { "zoomControl": {
"zoomIn": "放大", "zoomIn": "放大",
"zoomOut": "缩小", "zoomOut": "缩小",
"resetZoom": "重置缩放" "resetZoom": "重置缩放",
"rotateCamera": "顺时针旋转图形",
"rotateCameraCounterClockwise": "逆时针旋转图形"
}, },
"layoutsControl": { "layoutsControl": {
"startAnimation": "开始布局动画", "startAnimation": "继续布局动画",
"stopAnimation": "停止布局动画", "stopAnimation": "停止布局动画",
"layoutGraph": "图布局", "layoutGraph": "图布局",
"layouts": { "layouts": {
@@ -108,7 +125,7 @@
"Random": "随机", "Random": "随机",
"Noverlaps": "无重叠", "Noverlaps": "无重叠",
"Force Directed": "力导向", "Force Directed": "力导向",
"Force Atlas": "力图" "Force Atlas": "力图"
} }
}, },
"fullScreenControl": { "fullScreenControl": {
@@ -148,6 +165,11 @@
"degree": "度数", "degree": "度数",
"properties": "属性", "properties": "属性",
"relationships": "关系", "relationships": "关系",
"expandNode": "扩展节点",
"pruneNode": "修剪节点",
"deleteAllNodesError": "拒绝删除图中的所有节点",
"nodesRemoved": "已删除 {{count}} 个节点,包括孤立节点",
"noNewNodes": "没有发现可以扩展的节点",
"propertyNames": { "propertyNames": {
"description": "描述", "description": "描述",
"entity_id": "名称", "entity_id": "名称",
@@ -174,7 +196,8 @@
"noLabels": "未找到标签", "noLabels": "未找到标签",
"label": "标签", "label": "标签",
"placeholder": "搜索标签...", "placeholder": "搜索标签...",
"andOthers": "还有 {count} 个" "andOthers": "还有 {count} 个",
"refreshTooltip": "重新加载图形数据"
} }
}, },
"retrievePanel": { "retrievePanel": {

View File

@@ -1,5 +1,13 @@
import { StrictMode } from 'react'
import { createRoot } from 'react-dom/client' import { createRoot } from 'react-dom/client'
import './index.css' import './index.css'
import { Root } from '@/components/Root' import AppRouter from './AppRouter'
import './i18n';
createRoot(document.getElementById('root')!).render(<Root />)
createRoot(document.getElementById('root')!).render(
<StrictMode>
<AppRouter />
</StrictMode>
)

View File

@@ -0,0 +1,90 @@
import { NavigateFunction } from 'react-router-dom';
import { useAuthStore, useBackendState } from '@/stores/state';
import { useGraphStore } from '@/stores/graph';
import { useSettingsStore } from '@/stores/settings';
class NavigationService {
private navigate: NavigateFunction | null = null;
setNavigate(navigate: NavigateFunction) {
this.navigate = navigate;
}
/**
* Reset all application state to ensure a clean environment.
* This function should be called when:
* 1. User logs out
* 2. Authentication token expires
* 3. Direct access to login page
*/
resetAllApplicationState() {
console.log('Resetting all application state...');
// Reset graph state
const graphStore = useGraphStore.getState();
const sigma = graphStore.sigmaInstance;
graphStore.reset();
graphStore.setGraphDataFetchAttempted(false);
graphStore.setLabelsFetchAttempted(false);
graphStore.setSigmaInstance(null);
graphStore.setIsFetching(false); // Reset isFetching state to prevent data loading issues
// Reset backend state
useBackendState.getState().clear();
// Reset retrieval history while preserving other user preferences
useSettingsStore.getState().setRetrievalHistory([]);
// Clear authentication state
sessionStorage.clear();
if (sigma) {
sigma.getGraph().clear();
sigma.kill();
useGraphStore.getState().setSigmaInstance(null);
}
}
/**
* Handle direct access to login page
* @returns true if it's a direct access, false if navigated from another page
*/
handleDirectLoginAccess() {
const isDirectAccess = !document.referrer;
if (isDirectAccess) {
this.resetAllApplicationState();
}
return isDirectAccess;
}
/**
* Navigate to login page and reset application state
* @param skipReset whether to skip state reset (used for direct access scenario where reset is already handled)
*/
navigateToLogin() {
if (!this.navigate) {
console.error('Navigation function not set');
return;
}
// First navigate to login page
this.navigate('/login');
// Then reset state after navigation
setTimeout(() => {
this.resetAllApplicationState();
useAuthStore.getState().logout();
}, 0);
}
navigateToHome() {
if (!this.navigate) {
console.error('Navigation function not set');
return;
}
this.navigate('/');
}
}
export const navigationService = new NavigationService();

View File

@@ -2,6 +2,7 @@ import { create } from 'zustand'
import { createSelectors } from '@/lib/utils' import { createSelectors } from '@/lib/utils'
import { DirectedGraph } from 'graphology' import { DirectedGraph } from 'graphology'
import { getGraphLabels } from '@/api/lightrag' import { getGraphLabels } from '@/api/lightrag'
import MiniSearch from 'minisearch'
export type RawNodeType = { export type RawNodeType = {
id: string id: string
@@ -66,17 +67,19 @@ interface GraphState {
rawGraph: RawGraph | null rawGraph: RawGraph | null
sigmaGraph: DirectedGraph | null sigmaGraph: DirectedGraph | null
sigmaInstance: any | null
allDatabaseLabels: string[] allDatabaseLabels: string[]
searchEngine: MiniSearch | null
moveToSelectedNode: boolean moveToSelectedNode: boolean
isFetching: boolean isFetching: boolean
shouldRender: boolean
// Global flags to track data fetching attempts // Global flags to track data fetching attempts
graphDataFetchAttempted: boolean graphDataFetchAttempted: boolean
labelsFetchAttempted: boolean labelsFetchAttempted: boolean
refreshLayout: () => void setSigmaInstance: (instance: any) => void
setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) => void setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) => void
setFocusedNode: (nodeId: string | null) => void setFocusedNode: (nodeId: string | null) => void
setSelectedEdge: (edgeId: string | null) => void setSelectedEdge: (edgeId: string | null) => void
@@ -91,14 +94,25 @@ interface GraphState {
setAllDatabaseLabels: (labels: string[]) => void setAllDatabaseLabels: (labels: string[]) => void
fetchAllDatabaseLabels: () => Promise<void> fetchAllDatabaseLabels: () => Promise<void>
setIsFetching: (isFetching: boolean) => void setIsFetching: (isFetching: boolean) => void
setShouldRender: (shouldRender: boolean) => void
// 搜索引擎方法
setSearchEngine: (engine: MiniSearch | null) => void
resetSearchEngine: () => void
// Methods to set global flags // Methods to set global flags
setGraphDataFetchAttempted: (attempted: boolean) => void setGraphDataFetchAttempted: (attempted: boolean) => void
setLabelsFetchAttempted: (attempted: boolean) => void setLabelsFetchAttempted: (attempted: boolean) => void
// Event trigger methods for node operations
triggerNodeExpand: (nodeId: string | null) => void
triggerNodePrune: (nodeId: string | null) => void
// Node operation state
nodeToExpand: string | null
nodeToPrune: string | null
} }
const useGraphStoreBase = create<GraphState>()((set, get) => ({ const useGraphStoreBase = create<GraphState>()((set) => ({
selectedNode: null, selectedNode: null,
focusedNode: null, focusedNode: null,
selectedEdge: null, selectedEdge: null,
@@ -106,7 +120,6 @@ const useGraphStoreBase = create<GraphState>()((set, get) => ({
moveToSelectedNode: false, moveToSelectedNode: false,
isFetching: false, isFetching: false,
shouldRender: false,
// Initialize global flags // Initialize global flags
graphDataFetchAttempted: false, graphDataFetchAttempted: false,
@@ -114,21 +127,13 @@ const useGraphStoreBase = create<GraphState>()((set, get) => ({
rawGraph: null, rawGraph: null,
sigmaGraph: null, sigmaGraph: null,
sigmaInstance: null,
allDatabaseLabels: ['*'], allDatabaseLabels: ['*'],
refreshLayout: () => { searchEngine: null,
const currentGraph = get().sigmaGraph;
if (currentGraph) {
get().clearSelection();
get().setSigmaGraph(null);
setTimeout(() => {
get().setSigmaGraph(currentGraph);
}, 10);
}
},
setIsFetching: (isFetching: boolean) => set({ isFetching }), setIsFetching: (isFetching: boolean) => set({ isFetching }),
setShouldRender: (shouldRender: boolean) => set({ shouldRender }),
setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) => setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) =>
set({ selectedNode: nodeId, moveToSelectedNode }), set({ selectedNode: nodeId, moveToSelectedNode }),
setFocusedNode: (nodeId: string | null) => set({ focusedNode: nodeId }), setFocusedNode: (nodeId: string | null) => set({ focusedNode: nodeId }),
@@ -142,24 +147,15 @@ const useGraphStoreBase = create<GraphState>()((set, get) => ({
focusedEdge: null focusedEdge: null
}), }),
reset: () => { reset: () => {
// Get the existing graph
const existingGraph = get().sigmaGraph;
// If we have an existing graph, clear it by removing all nodes
if (existingGraph) {
const nodes = Array.from(existingGraph.nodes());
nodes.forEach(node => existingGraph.dropNode(node));
}
set({ set({
selectedNode: null, selectedNode: null,
focusedNode: null, focusedNode: null,
selectedEdge: null, selectedEdge: null,
focusedEdge: null, focusedEdge: null,
rawGraph: null, rawGraph: null,
// Keep the existing graph instance but with cleared data sigmaGraph: null, // to avoid other components from acccessing graph objects
moveToSelectedNode: false, searchEngine: null,
shouldRender: false moveToSelectedNode: false
}); });
}, },
@@ -190,9 +186,23 @@ const useGraphStoreBase = create<GraphState>()((set, get) => ({
setMoveToSelectedNode: (moveToSelectedNode?: boolean) => set({ moveToSelectedNode }), setMoveToSelectedNode: (moveToSelectedNode?: boolean) => set({ moveToSelectedNode }),
setSigmaInstance: (instance: any) => set({ sigmaInstance: instance }),
setSearchEngine: (engine: MiniSearch | null) => set({ searchEngine: engine }),
resetSearchEngine: () => set({ searchEngine: null }),
// Methods to set global flags // Methods to set global flags
setGraphDataFetchAttempted: (attempted: boolean) => set({ graphDataFetchAttempted: attempted }), setGraphDataFetchAttempted: (attempted: boolean) => set({ graphDataFetchAttempted: attempted }),
setLabelsFetchAttempted: (attempted: boolean) => set({ labelsFetchAttempted: attempted }) setLabelsFetchAttempted: (attempted: boolean) => set({ labelsFetchAttempted: attempted }),
// Node operation state
nodeToExpand: null,
nodeToPrune: null,
// Event trigger methods for node operations
triggerNodeExpand: (nodeId: string | null) => set({ nodeToExpand: nodeId }),
triggerNodePrune: (nodeId: string | null) => set({ nodeToPrune: nodeId }),
})) }))
const useGraphStore = createSelectors(useGraphStoreBase) const useGraphStore = createSelectors(useGraphStoreBase)

View File

@@ -16,6 +16,13 @@ interface BackendState {
setErrorMessage: (message: string, messageTitle: string) => void setErrorMessage: (message: string, messageTitle: string) => void
} }
interface AuthState {
isAuthenticated: boolean;
isGuestMode: boolean; // Add guest mode flag
login: (token: string, isGuest?: boolean) => void;
logout: () => void;
}
const useBackendStateStoreBase = create<BackendState>()((set) => ({ const useBackendStateStoreBase = create<BackendState>()((set) => ({
health: true, health: true,
message: null, message: null,
@@ -57,3 +64,60 @@ const useBackendStateStoreBase = create<BackendState>()((set) => ({
const useBackendState = createSelectors(useBackendStateStoreBase) const useBackendState = createSelectors(useBackendStateStoreBase)
export { useBackendState } export { useBackendState }
// Helper function to check if token is a guest token
const isGuestToken = (token: string): boolean => {
try {
// JWT tokens are in the format: header.payload.signature
const parts = token.split('.');
if (parts.length !== 3) return false;
// Decode the payload (second part)
const payload = JSON.parse(atob(parts[1]));
// Check if the token has a role field with value "guest"
return payload.role === 'guest';
} catch (e) {
console.error('Error parsing token:', e);
return false;
}
};
// Initialize auth state from localStorage
const initAuthState = (): { isAuthenticated: boolean; isGuestMode: boolean } => {
const token = localStorage.getItem('LIGHTRAG-API-TOKEN');
if (!token) {
return { isAuthenticated: false, isGuestMode: false };
}
return {
isAuthenticated: true,
isGuestMode: isGuestToken(token)
};
};
export const useAuthStore = create<AuthState>(set => {
// Get initial state from localStorage
const initialState = initAuthState();
return {
isAuthenticated: initialState.isAuthenticated,
isGuestMode: initialState.isGuestMode,
login: (token, isGuest = false) => {
localStorage.setItem('LIGHTRAG-API-TOKEN', token);
set({
isAuthenticated: true,
isGuestMode: isGuest
});
},
logout: () => {
localStorage.removeItem('LIGHTRAG-API-TOKEN');
set({
isAuthenticated: false,
isGuestMode: false
});
}
};
});

View File

@@ -26,5 +26,5 @@
"@/*": ["./src/*"] "@/*": ["./src/*"]
} }
}, },
"include": ["src", "vite.config.ts"] "include": ["src", "vite.config.ts", "src/vite-env.d.ts"]
} }

View File

@@ -1,6 +1,6 @@
import { defineConfig } from 'vite' import { defineConfig } from 'vite'
import path from 'path' import path from 'path'
import { webuiPrefix } from '@/lib/constants'
import react from '@vitejs/plugin-react-swc' import react from '@vitejs/plugin-react-swc'
import tailwindcss from '@tailwindcss/vite' import tailwindcss from '@tailwindcss/vite'
@@ -12,7 +12,8 @@ export default defineConfig({
'@': path.resolve(__dirname, './src') '@': path.resolve(__dirname, './src')
} }
}, },
base: './', // base: import.meta.env.VITE_BASE_URL || '/webui/',
base: webuiPrefix,
build: { build: {
outDir: path.resolve(__dirname, '../lightrag/api/webui'), outDir: path.resolve(__dirname, '../lightrag/api/webui'),
emptyOutDir: true emptyOutDir: true