Merge branch 'improve-property-tooltip' into loginPage

This commit is contained in:
choizhang
2025-03-15 00:11:50 +08:00
57 changed files with 1822 additions and 606 deletions

1
.gitattributes vendored Normal file
View File

@@ -0,0 +1 @@
lightrag/api/webui/** -diff

3
.gitignore vendored
View File

@@ -64,3 +64,6 @@ gui/
# unit-test files
test_*
# Cline files
memory-bank/

View File

@@ -3,16 +3,21 @@ repos:
rev: v5.0.0
hooks:
- id: trailing-whitespace
exclude: ^lightrag/api/webui/
- id: end-of-file-fixer
exclude: ^lightrag/api/webui/
- id: requirements-txt-fixer
exclude: ^lightrag/api/webui/
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.4
hooks:
- id: ruff-format
exclude: ^lightrag/api/webui/
- id: ruff
args: [--fix, --ignore=E402]
exclude: ^lightrag/api/webui/
- repo: https://github.com/mgedmin/check-manifest
@@ -20,3 +25,4 @@ repos:
hooks:
- id: check-manifest
stages: [manual]
exclude: ^lightrag/api/webui/

233
README.md
View File

@@ -37,28 +37,30 @@ This repository hosts the code of LightRAG. The structure of this code is based
</br>
<details>
<summary style="font-size: 1.4em; font-weight: bold; cursor: pointer; display: list-item;">
🎉 News
</summary>
- [x] [2025.02.05]🎯📢Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG) understanding extremely long-context videos.
- [x] [2025.01.13]🎯📢Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models.
- [x] [2025.01.06]🎯📢You can now [use PostgreSQL for Storage](#using-postgresql-for-storage).
- [x] [2024.12.31]🎯📢LightRAG now supports [deletion by document ID](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete).
- [x] [2024.11.25]🎯📢LightRAG now supports seamless integration of [custom knowledge graphs](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#insert-custom-kg), empowering users to enhance the system with their own domain expertise.
- [x] [2024.11.19]🎯📢A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). Many thanks to the blog author.
- [x] [2024.11.12]🎯📢LightRAG now supports [Oracle Database 23ai for all storage types (KV, vector, and graph)](https://github.com/HKUDS/LightRAG/blob/main/examples/lightrag_oracle_demo.py).
- [x] [2024.11.11]🎯📢LightRAG now supports [deleting entities by their names](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete).
- [x] [2024.11.09]🎯📢Introducing the [LightRAG Gui](https://lightrag-gui.streamlit.app), which allows you to insert, query, visualize, and download LightRAG knowledge.
- [x] [2024.11.04]🎯📢You can now [use Neo4J for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage).
- [x] [2024.10.29]🎯📢LightRAG now supports multiple file types, including PDF, DOC, PPT, and CSV via `textract`.
- [x] [2024.10.20]🎯📢We've added a new feature to LightRAG: Graph Visualization.
- [x] [2024.10.18]🎯📢We've added a link to a [LightRAG Introduction Video](https://youtu.be/oageL-1I0GE). Thanks to the author!
- [x] [2024.10.17]🎯📢We have created a [Discord channel](https://discord.gg/yF2MmDJyGJ)! Welcome to join for sharing and discussions! 🎉🎉
- [x] [2024.10.16]🎯📢LightRAG now supports [Ollama models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
- [x] [2024.10.15]🎯📢LightRAG now supports [Hugging Face models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
- [X] [2025.02.05]🎯📢Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG) understanding extremely long-context videos.
- [X] [2025.01.13]🎯📢Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models.
- [X] [2025.01.06]🎯📢You can now [use PostgreSQL for Storage](#using-postgresql-for-storage).
- [X] [2024.12.31]🎯📢LightRAG now supports [deletion by document ID](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete).
- [X] [2024.11.25]🎯📢LightRAG now supports seamless integration of [custom knowledge graphs](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#insert-custom-kg), empowering users to enhance the system with their own domain expertise.
- [X] [2024.11.19]🎯📢A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). Many thanks to the blog author.
- [X] [2024.11.12]🎯📢LightRAG now supports [Oracle Database 23ai for all storage types (KV, vector, and graph)](https://github.com/HKUDS/LightRAG/blob/main/examples/lightrag_oracle_demo.py).
- [X] [2024.11.11]🎯📢LightRAG now supports [deleting entities by their names](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#delete).
- [X] [2024.11.09]🎯📢Introducing the [LightRAG Gui](https://lightrag-gui.streamlit.app), which allows you to insert, query, visualize, and download LightRAG knowledge.
- [X] [2024.11.04]🎯📢You can now [use Neo4J for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage).
- [X] [2024.10.29]🎯📢LightRAG now supports multiple file types, including PDF, DOC, PPT, and CSV via `textract`.
- [X] [2024.10.20]🎯📢We've added a new feature to LightRAG: Graph Visualization.
- [X] [2024.10.18]🎯📢We've added a link to a [LightRAG Introduction Video](https://youtu.be/oageL-1I0GE). Thanks to the author!
- [X] [2024.10.17]🎯📢We have created a [Discord channel](https://discord.gg/yF2MmDJyGJ)! Welcome to join for sharing and discussions! 🎉🎉
- [X] [2024.10.16]🎯📢LightRAG now supports [Ollama models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
- [X] [2024.10.15]🎯📢LightRAG now supports [Hugging Face models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
</details>
@@ -82,16 +84,20 @@ This repository hosts the code of LightRAG. The structure of this code is based
cd LightRAG
pip install -e .
```
* Install from PyPI
```bash
pip install lightrag-hku
```
## Quick Start
* [Video demo](https://www.youtube.com/watch?v=g21royNJ4fw) of running LightRAG locally.
* All the code can be found in the `examples`.
* Set OpenAI API key in environment if using OpenAI models: `export OPENAI_API_KEY="sk-...".`
* Download the demo text "A Christmas Carol by Charles Dickens":
```bash
curl https://raw.githubusercontent.com/gusye1234/nano-graphrag/main/tests/mock_data.txt > ./book.txt
```
@@ -187,6 +193,7 @@ class QueryParam:
<summary> <b>Using Open AI-like APIs</b> </summary>
* LightRAG also supports Open AI-like chat/embeddings APIs:
```python
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
@@ -225,6 +232,7 @@ async def initialize_rag():
return rag
```
</details>
<details>
@@ -252,12 +260,14 @@ rag = LightRAG(
),
)
```
</details>
<details>
<summary> <b>Using Ollama Models</b> </summary>
### Overview
If you want to use Ollama models, you need to pull model you plan to use and embedding model, for example `nomic-embed-text`.
Then you only need to set LightRAG as follows:
@@ -281,31 +291,37 @@ rag = LightRAG(
```
### Increasing context size
In order for LightRAG to work context should be at least 32k tokens. By default Ollama models have context size of 8k. You can achieve this using one of two ways:
#### Increasing the `num_ctx` parameter in Modelfile.
1. Pull the model:
```bash
ollama pull qwen2
```
2. Display the model file:
```bash
ollama show --modelfile qwen2 > Modelfile
```
3. Edit the Modelfile by adding the following line:
```bash
PARAMETER num_ctx 32768
```
4. Create the modified model:
```bash
ollama create -f Modelfile qwen2m
```
#### Setup `num_ctx` via Ollama API.
Tiy can use `llm_model_kwargs` param to configure ollama:
```python
@@ -325,6 +341,7 @@ rag = LightRAG(
),
)
```
#### Low RAM GPUs
In order to run this experiment on low RAM GPU you should select small model and tune context window (increasing context increase memory consumption). For example, running this ollama example on repurposed mining GPU with 6Gb of RAM required to set context size to 26k while using `gemma2:2b`. It was able to find 197 entities and 19 relations on `book.txt`.
@@ -402,6 +419,7 @@ if __name__ == "__main__":
```
#### For detailed documentation and examples, see:
- [LlamaIndex Documentation](lightrag/llm/Readme.md)
- [Direct OpenAI Example](examples/lightrag_llamaindex_direct_demo.py)
- [LiteLLM Proxy Example](examples/lightrag_llamaindex_litellm_demo.py)
@@ -483,13 +501,16 @@ print(response_custom)
We've introduced a new function `query_with_separate_keyword_extraction` to enhance the keyword extraction capabilities. This function separates the keyword extraction process from the user's prompt, focusing solely on the query to improve the relevance of extracted keywords.
##### How It Works?
The function operates by dividing the input into two parts:
- `User Query`
- `Prompt`
It then performs keyword extraction exclusively on the `user query`. This separation ensures that the extraction process is focused and relevant, unaffected by any additional language in the `prompt`. It also allows the `prompt` to serve purely for response formatting, maintaining the intent and clarity of the user's original question.
##### Usage Example
This `example` shows how to tailor the function for educational content, focusing on detailed explanations for older students.
```python
@@ -563,6 +584,7 @@ custom_kg = {
rag.insert_custom_kg(custom_kg)
```
</details>
## Insert
@@ -593,6 +615,7 @@ rag.insert(["TEXT1", "TEXT2", "TEXT3", ...]) # Documents will be processed in b
```
The `insert_batch_size` parameter in `addon_params` controls how many documents are processed in each batch during insertion. This is useful for:
- Managing memory usage with large document collections
- Optimizing processing speed
- Providing better progress tracking
@@ -647,6 +670,7 @@ text_content = textract.process(file_path)
rag.insert(text_content.decode('utf-8'))
```
</details>
## Storage
@@ -685,6 +709,7 @@ async def initialize_rag():
return rag
```
see test_neo4j.py for a working example.
</details>
@@ -693,6 +718,7 @@ see test_neo4j.py for a working example.
<summary> <b>Using PostgreSQL for Storage</b> </summary>
For production level scenarios you will most likely want to leverage an enterprise solution. PostgreSQL can provide a one-stop solution for you as KV store, VectorDB (pgvector) and GraphDB (apache AGE).
* PostgreSQL is lightweight,the whole binary distribution including all necessary plugins can be zipped to 40MB: Ref to [Windows Release](https://github.com/ShanGor/apache-age-windows/releases/tag/PG17%2Fv1.5.0-rc0) as it is easy to install for Linux/Mac.
* If you prefer docker, please start with this image if you are a beginner to avoid hiccups (DO read the overview): https://hub.docker.com/r/shangor/postgres-for-rag
* How to start? Ref to: [examples/lightrag_zhipu_postgres_demo.py](https://github.com/HKUDS/LightRAG/blob/main/examples/lightrag_zhipu_postgres_demo.py)
@@ -735,6 +761,7 @@ For production level scenarios you will most likely want to leverage an enterpri
> It is a known issue of the release version: https://github.com/apache/age/pull/1721
>
> You can Compile the AGE from source code and fix it.
>
</details>
@@ -742,9 +769,11 @@ For production level scenarios you will most likely want to leverage an enterpri
<summary> <b>Using Faiss for Storage</b> </summary>
- Install the required dependencies:
```
pip install faiss-cpu
```
You can also install `faiss-gpu` if you have GPU support.
- Here we are using `sentence-transformers` but you can also use `OpenAIEmbedding` model with `3072` dimensions.
@@ -810,6 +839,7 @@ relation = rag.create_relation("Google", "Gmail", {
"weight": 2.0
})
```
</details>
<details>
@@ -835,6 +865,7 @@ updated_relation = rag.edit_relation("Google", "Google Mail", {
"weight": 3.0
})
```
</details>
All operations are available in both synchronous and asynchronous versions. The asynchronous versions have the prefix "a" (e.g., `acreate_entity`, `aedit_relation`).
@@ -851,6 +882,55 @@ All operations are available in both synchronous and asynchronous versions. The
These operations maintain data consistency across both the graph database and vector database components, ensuring your knowledge graph remains coherent.
## Data Export Functions
## Overview
LightRAG allows you to export your knowledge graph data in various formats for analysis, sharing, and backup purposes. The system supports exporting entities, relations, and relationship data.
## Export Functions
### Basic Usage
```python
# Basic CSV export (default format)
rag.export_data("knowledge_graph.csv")
# Specify any format
rag.export_data("output.xlsx", file_format="excel")
```
### Different File Formats supported
```python
#Export data in CSV format
rag.export_data("graph_data.csv", file_format="csv")
# Export data in Excel sheet
rag.export_data("graph_data.xlsx", file_format="excel")
# Export data in markdown format
rag.export_data("graph_data.md", file_format="md")
# Export data in Text
rag.export_data("graph_data.txt", file_format="txt")
```
## Additional Options
Include vector embeddings in the export (optional):
```python
rag.export_data("complete_data.csv", include_vector_data=True)
```
## Data Included in Export
All exports include:
* Entity information (names, IDs, metadata)
* Relation data (connections between entities)
* Relationship information from vector database
## Entity Merging
<details>
@@ -913,6 +993,7 @@ rag.merge_entities(
```
When merging entities:
* All relationships from source entities are redirected to the target entity
* Duplicate relationships are intelligently merged
* Self-relationships (loops) are prevented
@@ -946,6 +1027,7 @@ rag.clear_cache(modes=["local"])
```
Valid modes are:
- `"default"`: Extraction cache
- `"naive"`: Naive search cache
- `"local"`: Local search cache
@@ -960,33 +1042,33 @@ Valid modes are:
<details>
<summary> Parameters </summary>
| **Parameter** | **Type** | **Explanation** | **Default** |
|----------------------------------------------| --- |-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------|
| **working\_dir** | `str` | Directory where the cache will be stored | `lightrag_cache+timestamp` |
| **kv\_storage** | `str` | Storage type for documents and text chunks. Supported types: `JsonKVStorage`, `OracleKVStorage` | `JsonKVStorage` |
| **vector\_storage** | `str` | Storage type for embedding vectors. Supported types: `NanoVectorDBStorage`, `OracleVectorDBStorage` | `NanoVectorDBStorage` |
| **graph\_storage** | `str` | Storage type for graph edges and nodes. Supported types: `NetworkXStorage`, `Neo4JStorage`, `OracleGraphStorage` | `NetworkXStorage` |
| **chunk\_token\_size** | `int` | Maximum token size per chunk when splitting documents | `1200` |
| **chunk\_overlap\_token\_size** | `int` | Overlap token size between two chunks when splitting documents | `100` |
| **tiktoken\_model\_name** | `str` | Model name for the Tiktoken encoder used to calculate token numbers | `gpt-4o-mini` |
| **entity\_extract\_max\_gleaning** | `int` | Number of loops in the entity extraction process, appending history messages | `1` |
| **entity\_summary\_to\_max\_tokens** | `int` | Maximum token size for each entity summary | `500` |
| **node\_embedding\_algorithm** | `str` | Algorithm for node embedding (currently not used) | `node2vec` |
| **node2vec\_params** | `dict` | Parameters for node embedding | `{"dimensions": 1536,"num_walks": 10,"walk_length": 40,"window_size": 2,"iterations": 3,"random_seed": 3,}` |
| **embedding\_func** | `EmbeddingFunc` | Function to generate embedding vectors from text | `openai_embed` |
| **embedding\_batch\_num** | `int` | Maximum batch size for embedding processes (multiple texts sent per batch) | `32` |
| **embedding\_func\_max\_async** | `int` | Maximum number of concurrent asynchronous embedding processes | `16` |
| **llm\_model\_func** | `callable` | Function for LLM generation | `gpt_4o_mini_complete` |
| **llm\_model\_name** | `str` | LLM model name for generation | `meta-llama/Llama-3.2-1B-Instruct` |
| **llm\_model\_max\_token\_size** | `int` | Maximum token size for LLM generation (affects entity relation summaries) | `32768`default value changed by env var MAX_TOKENS) |
| **llm\_model\_max\_async** | `int` | Maximum number of concurrent asynchronous LLM processes | `16`default value changed by env var MAX_ASYNC) |
| **llm\_model\_kwargs** | `dict` | Additional parameters for LLM generation | |
| **vector\_db\_storage\_cls\_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval. | cosine_better_than_threshold: 0.2default value changed by env var COSINE_THRESHOLD) |
| **enable\_llm\_cache** | `bool` | If `TRUE`, stores LLM results in cache; repeated prompts return cached responses | `TRUE` |
| **enable\_llm\_cache\_for\_entity\_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `TRUE` |
| **addon\_params** | `dict` | Additional parameters, e.g., `{"example_number": 1, "language": "Simplified Chinese", "entity_types": ["organization", "person", "geo", "event"], "insert_batch_size": 10}`: sets example limit, output language, and batch size for document processing | `example_number: all examples, language: English, insert_batch_size: 10` |
| **convert\_response\_to\_json\_func** | `callable` | Not used | `convert_response_to_json` |
| **embedding\_cache\_config** | `dict` | Configuration for question-answer caching. Contains three parameters:<br>- `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers.<br>- `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM.<br>- `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default: `{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` |
| **Parameter** | **Type** | **Explanation** | **Default** |
| -------------------------------------------------- | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- |
| **working\_dir** | `str` | Directory where the cache will be stored | `lightrag_cache+timestamp` |
| **kv\_storage** | `str` | Storage type for documents and text chunks. Supported types:`JsonKVStorage`, `OracleKVStorage` | `JsonKVStorage` |
| **vector\_storage** | `str` | Storage type for embedding vectors. Supported types:`NanoVectorDBStorage`, `OracleVectorDBStorage` | `NanoVectorDBStorage` |
| **graph\_storage** | `str` | Storage type for graph edges and nodes. Supported types:`NetworkXStorage`, `Neo4JStorage`, `OracleGraphStorage` | `NetworkXStorage` |
| **chunk\_token\_size** | `int` | Maximum token size per chunk when splitting documents | `1200` |
| **chunk\_overlap\_token\_size** | `int` | Overlap token size between two chunks when splitting documents | `100` |
| **tiktoken\_model\_name** | `str` | Model name for the Tiktoken encoder used to calculate token numbers | `gpt-4o-mini` |
| **entity\_extract\_max\_gleaning** | `int` | Number of loops in the entity extraction process, appending history messages | `1` |
| **entity\_summary\_to\_max\_tokens** | `int` | Maximum token size for each entity summary | `500` |
| **node\_embedding\_algorithm** | `str` | Algorithm for node embedding (currently not used) | `node2vec` |
| **node2vec\_params** | `dict` | Parameters for node embedding | `{"dimensions": 1536,"num_walks": 10,"walk_length": 40,"window_size": 2,"iterations": 3,"random_seed": 3,}` |
| **embedding\_func** | `EmbeddingFunc` | Function to generate embedding vectors from text | `openai_embed` |
| **embedding\_batch\_num** | `int` | Maximum batch size for embedding processes (multiple texts sent per batch) | `32` |
| **embedding\_func\_max\_async** | `int` | Maximum number of concurrent asynchronous embedding processes | `16` |
| **llm\_model\_func** | `callable` | Function for LLM generation | `gpt_4o_mini_complete` |
| **llm\_model\_name** | `str` | LLM model name for generation | `meta-llama/Llama-3.2-1B-Instruct` |
| **llm\_model\_max\_token\_size** | `int` | Maximum token size for LLM generation (affects entity relation summaries) | `32768`default value changed by env var MAX_TOKENS) |
| **llm\_model\_max\_async** | `int` | Maximum number of concurrent asynchronous LLM processes | `16`default value changed by env var MAX_ASYNC) |
| **llm\_model\_kwargs** | `dict` | Additional parameters for LLM generation | |
| **vector\_db\_storage\_cls\_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval. | cosine_better_than_threshold: 0.2default value changed by env var COSINE_THRESHOLD) |
| **enable\_llm\_cache** | `bool` | If `TRUE`, stores LLM results in cache; repeated prompts return cached responses | `TRUE` |
| **enable\_llm\_cache\_for\_entity\_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `TRUE` |
| **addon\_params** | `dict` | Additional parameters, e.g.,`{"example_number": 1, "language": "Simplified Chinese", "entity_types": ["organization", "person", "geo", "event"], "insert_batch_size": 10}`: sets example limit, output language, and batch size for document processing | `example_number: all examples, language: English, insert_batch_size: 10` |
| **convert\_response\_to\_json\_func** | `callable` | Not used | `convert_response_to_json` |
| **embedding\_cache\_config** | `dict` | Configuration for question-answer caching. Contains three parameters:`<br>`- `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers.`<br>`- `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM.`<br>`- `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default:`{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` |
</details>
@@ -996,12 +1078,15 @@ Valid modes are:
<summary>Click to view error handling details</summary>
The API includes comprehensive error handling:
- File not found errors (404)
- Processing errors (500)
- Supports multiple file encodings (UTF-8 and GBK)
</details>
## API
LightRag can be installed with API support to serve a Fast api interface to perform data upload and indexing/Rag operations/Rescan of the input folder etc..
[LightRag API](lightrag/api/README.md)
@@ -1035,7 +1120,6 @@ net.show('knowledge_graph.html')
<details>
<summary> <b>Graph visualization with Neo4</b> </summary>
* The following code can be found in `examples/graph_visual_with_neo4j.py`
```python
@@ -1171,10 +1255,13 @@ LightRag can be installed with Tools support to add extra tools like the graphml
</details>
## Evaluation
### Dataset
The dataset used in LightRAG can be downloaded from [TommyChien/UltraDomain](https://huggingface.co/datasets/TommyChien/UltraDomain).
### Generate Query
LightRAG uses the following prompt to generate high-level queries, with the corresponding code in `example/generate_query.py`.
<details>
@@ -1203,9 +1290,11 @@ Output the results in the following structure:
- User 5: [user description]
...
```
</details>
### Batch Eval
To evaluate the performance of two RAG systems on high-level queries, LightRAG uses the following prompt, with the specific code available in `example/batch_eval.py`.
<details>
@@ -1253,37 +1342,40 @@ Output your evaluation in the following JSON format:
}}
}}
```
</details>
### Overall Performance Table
| | **Agriculture** | | **CS** | | **Legal** | | **Mix** | |
|----------------------|-------------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|
| | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** |
| **Comprehensiveness** | 32.4% | **67.6%** | 38.4% | **61.6%** | 16.4% | **83.6%** | 38.8% | **61.2%** |
| **Diversity** | 23.6% | **76.4%** | 38.0% | **62.0%** | 13.6% | **86.4%** | 32.4% | **67.6%** |
| **Empowerment** | 32.4% | **67.6%** | 38.8% | **61.2%** | 16.4% | **83.6%** | 42.8% | **57.2%** |
| **Overall** | 32.4% | **67.6%** | 38.8% | **61.2%** | 15.2% | **84.8%** | 40.0% | **60.0%** |
| | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** |
| **Comprehensiveness** | 31.6% | **68.4%** | 38.8% | **61.2%** | 15.2% | **84.8%** | 39.2% | **60.8%** |
| **Diversity** | 29.2% | **70.8%** | 39.2% | **60.8%** | 11.6% | **88.4%** | 30.8% | **69.2%** |
| **Empowerment** | 31.6% | **68.4%** | 36.4% | **63.6%** | 15.2% | **84.8%** | 42.4% | **57.6%** |
| **Overall** | 32.4% | **67.6%** | 38.0% | **62.0%** | 14.4% | **85.6%** | 40.0% | **60.0%** |
| | HyDE | **LightRAG** | HyDE | **LightRAG** | HyDE | **LightRAG** | HyDE | **LightRAG** |
| **Comprehensiveness** | 26.0% | **74.0%** | 41.6% | **58.4%** | 26.8% | **73.2%** | 40.4% | **59.6%** |
| **Diversity** | 24.0% | **76.0%** | 38.8% | **61.2%** | 20.0% | **80.0%** | 32.4% | **67.6%** |
| **Empowerment** | 25.2% | **74.8%** | 40.8% | **59.2%** | 26.0% | **74.0%** | 46.0% | **54.0%** |
| **Overall** | 24.8% | **75.2%** | 41.6% | **58.4%** | 26.4% | **73.6%** | 42.4% | **57.6%** |
| | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** |
| **Comprehensiveness** | 45.6% | **54.4%** | 48.4% | **51.6%** | 48.4% | **51.6%** | **50.4%** | 49.6% |
| **Diversity** | 22.8% | **77.2%** | 40.8% | **59.2%** | 26.4% | **73.6%** | 36.0% | **64.0%** |
| **Empowerment** | 41.2% | **58.8%** | 45.2% | **54.8%** | 43.6% | **56.4%** | **50.8%** | 49.2% |
| **Overall** | 45.2% | **54.8%** | 48.0% | **52.0%** | 47.2% | **52.8%** | **50.4%** | 49.6% |
| | **Agriculture** | | **CS** | | **Legal** | | **Mix** | |
| --------------------------- | --------------------- | ------------------ | ------------ | ------------------ | --------------- | ------------------ | --------------- | ------------------ |
| | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** |
| **Comprehensiveness** | 32.4% | **67.6%** | 38.4% | **61.6%** | 16.4% | **83.6%** | 38.8% | **61.2%** |
| **Diversity** | 23.6% | **76.4%** | 38.0% | **62.0%** | 13.6% | **86.4%** | 32.4% | **67.6%** |
| **Empowerment** | 32.4% | **67.6%** | 38.8% | **61.2%** | 16.4% | **83.6%** | 42.8% | **57.2%** |
| **Overall** | 32.4% | **67.6%** | 38.8% | **61.2%** | 15.2% | **84.8%** | 40.0% | **60.0%** |
| | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** | RQ-RAG | **LightRAG** |
| **Comprehensiveness** | 31.6% | **68.4%** | 38.8% | **61.2%** | 15.2% | **84.8%** | 39.2% | **60.8%** |
| **Diversity** | 29.2% | **70.8%** | 39.2% | **60.8%** | 11.6% | **88.4%** | 30.8% | **69.2%** |
| **Empowerment** | 31.6% | **68.4%** | 36.4% | **63.6%** | 15.2% | **84.8%** | 42.4% | **57.6%** |
| **Overall** | 32.4% | **67.6%** | 38.0% | **62.0%** | 14.4% | **85.6%** | 40.0% | **60.0%** |
| | HyDE | **LightRAG** | HyDE | **LightRAG** | HyDE | **LightRAG** | HyDE | **LightRAG** |
| **Comprehensiveness** | 26.0% | **74.0%** | 41.6% | **58.4%** | 26.8% | **73.2%** | 40.4% | **59.6%** |
| **Diversity** | 24.0% | **76.0%** | 38.8% | **61.2%** | 20.0% | **80.0%** | 32.4% | **67.6%** |
| **Empowerment** | 25.2% | **74.8%** | 40.8% | **59.2%** | 26.0% | **74.0%** | 46.0% | **54.0%** |
| **Overall** | 24.8% | **75.2%** | 41.6% | **58.4%** | 26.4% | **73.6%** | 42.4% | **57.6%** |
| | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** | GraphRAG | **LightRAG** |
| **Comprehensiveness** | 45.6% | **54.4%** | 48.4% | **51.6%** | 48.4% | **51.6%** | **50.4%** | 49.6% |
| **Diversity** | 22.8% | **77.2%** | 40.8% | **59.2%** | 26.4% | **73.6%** | 36.0% | **64.0%** |
| **Empowerment** | 41.2% | **58.8%** | 45.2% | **54.8%** | 43.6% | **56.4%** | **50.8%** | 49.2% |
| **Overall** | 45.2% | **54.8%** | 48.0% | **52.0%** | 47.2% | **52.8%** | **50.4%** | 49.6% |
## Reproduce
All the code can be found in the `./reproduce` directory.
### Step-0 Extract Unique Contexts
First, we need to extract unique contexts in the datasets.
<details>
@@ -1340,9 +1432,11 @@ def extract_unique_contexts(input_directory, output_directory):
print("All files have been processed.")
```
</details>
### Step-1 Insert Contexts
For the extracted contexts, we insert them into the LightRAG system.
<details>
@@ -1366,6 +1460,7 @@ def insert_text(rag, file_path):
if retries == max_retries:
print("Insertion failed after exceeding the maximum number of retries")
```
</details>
### Step-2 Generate Queries
@@ -1390,9 +1485,11 @@ def get_summary(context, tot_tokens=2000):
return summary
```
</details>
### Step-3 Query
For the queries generated in Step-2, we will extract them and query LightRAG.
<details>
@@ -1409,6 +1506,7 @@ def extract_queries(file_path):
return queries
```
</details>
## Star History
@@ -1441,4 +1539,5 @@ archivePrefix={arXiv},
primaryClass={cs.IR}
}
```
**Thank you for your interest in our work!**

View File

@@ -1,5 +1,5 @@
from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam
__version__ = "1.2.5"
__version__ = "1.2.6"
__author__ = "Zirui Guo"
__url__ = "https://github.com/HKUDS/LightRAG"

View File

@@ -59,7 +59,7 @@ logconfig_dict = {
},
"filters": {
"path_filter": {
"()": "lightrag.api.lightrag_server.LightragPathFilter",
"()": "lightrag.utils.LightragPathFilter",
},
},
"loggers": {

View File

@@ -55,41 +55,6 @@ config = configparser.ConfigParser()
config.read("config.ini")
class LightragPathFilter(logging.Filter):
"""Filter for lightrag logger to filter out frequent path access logs"""
def __init__(self):
super().__init__()
# Define paths to be filtered
self.filtered_paths = ["/documents", "/health", "/webui/"]
def filter(self, record):
try:
# Check if record has the required attributes for an access log
if not hasattr(record, "args") or not isinstance(record.args, tuple):
return True
if len(record.args) < 5:
return True
# Extract method, path and status from the record args
method = record.args[1]
path = record.args[2]
status = record.args[4]
# Filter out successful GET requests to filtered paths
if (
method == "GET"
and (status == 200 or status == 304)
and path in self.filtered_paths
):
return False
return True
except Exception:
# In case of any error, let the message through
return True
def create_app(args):
# Setup logging
logger.setLevel(args.log_level)
@@ -177,6 +142,9 @@ def create_app(args):
if api_key
else "",
version=__api_version__,
openapi_url="/openapi.json", # Explicitly set OpenAPI schema URL
docs_url="/docs", # Explicitly set docs URL
redoc_url="/redoc", # Explicitly set redoc URL
openapi_tags=[{"name": "api"}],
lifespan=lifespan,
)
@@ -423,12 +391,24 @@ def create_app(args):
"update_status": update_status,
}
# Custom StaticFiles class to prevent caching of HTML files
class NoCacheStaticFiles(StaticFiles):
async def get_response(self, path: str, scope):
response = await super().get_response(path, scope)
if path.endswith(".html"):
response.headers["Cache-Control"] = (
"no-cache, no-store, must-revalidate"
)
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "0"
return response
# Webui mount webui/index.html
static_dir = Path(__file__).parent / "webui"
static_dir.mkdir(exist_ok=True)
app.mount(
"/webui",
StaticFiles(directory=static_dir, html=True, check_dir=True),
NoCacheStaticFiles(directory=static_dir, html=True, check_dir=True),
name="webui",
)
@@ -516,7 +496,7 @@ def configure_logging():
},
"filters": {
"path_filter": {
"()": "lightrag.api.lightrag_server.LightragPathFilter",
"()": "lightrag.utils.LightragPathFilter",
},
},
}

View File

@@ -99,6 +99,37 @@ class DocsStatusesResponse(BaseModel):
statuses: Dict[DocStatus, List[DocStatusResponse]] = {}
class PipelineStatusResponse(BaseModel):
"""Response model for pipeline status
Attributes:
autoscanned: Whether auto-scan has started
busy: Whether the pipeline is currently busy
job_name: Current job name (e.g., indexing files/indexing texts)
job_start: Job start time as ISO format string (optional)
docs: Total number of documents to be indexed
batchs: Number of batches for processing documents
cur_batch: Current processing batch
request_pending: Flag for pending request for processing
latest_message: Latest message from pipeline processing
history_messages: List of history messages
"""
autoscanned: bool = False
busy: bool = False
job_name: str = "Default Job"
job_start: Optional[str] = None
docs: int = 0
batchs: int = 0
cur_batch: int = 0
request_pending: bool = False
latest_message: str = ""
history_messages: Optional[List[str]] = None
class Config:
extra = "allow" # Allow additional fields from the pipeline status
class DocumentManager:
def __init__(
self,
@@ -247,7 +278,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool:
if global_args["main_args"].document_loading_engine == "DOCLING":
if not pm.is_installed("docling"): # type: ignore
pm.install("docling")
from docling.document_converter import DocumentConverter
from docling.document_converter import DocumentConverter # type: ignore
converter = DocumentConverter()
result = converter.convert(file_path)
@@ -266,7 +297,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool:
if global_args["main_args"].document_loading_engine == "DOCLING":
if not pm.is_installed("docling"): # type: ignore
pm.install("docling")
from docling.document_converter import DocumentConverter
from docling.document_converter import DocumentConverter # type: ignore
converter = DocumentConverter()
result = converter.convert(file_path)
@@ -286,7 +317,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool:
if global_args["main_args"].document_loading_engine == "DOCLING":
if not pm.is_installed("docling"): # type: ignore
pm.install("docling")
from docling.document_converter import DocumentConverter
from docling.document_converter import DocumentConverter # type: ignore
converter = DocumentConverter()
result = converter.convert(file_path)
@@ -307,7 +338,7 @@ async def pipeline_enqueue_file(rag: LightRAG, file_path: Path) -> bool:
if global_args["main_args"].document_loading_engine == "DOCLING":
if not pm.is_installed("docling"): # type: ignore
pm.install("docling")
from docling.document_converter import DocumentConverter
from docling.document_converter import DocumentConverter # type: ignore
converter = DocumentConverter()
result = converter.convert(file_path)
@@ -718,17 +749,33 @@ def create_document_routes(
logger.error(traceback.format_exc())
raise HTTPException(status_code=500, detail=str(e))
@router.get("/pipeline_status", dependencies=[Depends(optional_api_key)])
async def get_pipeline_status():
@router.get(
"/pipeline_status",
dependencies=[Depends(optional_api_key)],
response_model=PipelineStatusResponse,
)
async def get_pipeline_status() -> PipelineStatusResponse:
"""
Get the current status of the document indexing pipeline.
This endpoint returns information about the current state of the document processing pipeline,
including whether it's busy, the current job name, when it started, how many documents
are being processed, how many batches there are, and which batch is currently being processed.
including the processing status, progress information, and history messages.
Returns:
dict: A dictionary containing the pipeline status information
PipelineStatusResponse: A response object containing:
- autoscanned (bool): Whether auto-scan has started
- busy (bool): Whether the pipeline is currently busy
- job_name (str): Current job name (e.g., indexing files/indexing texts)
- job_start (str, optional): Job start time as ISO format string
- docs (int): Total number of documents to be indexed
- batchs (int): Number of batches for processing documents
- cur_batch (int): Current processing batch
- request_pending (bool): Flag for pending request for processing
- latest_message (str): Latest message from pipeline processing
- history_messages (List[str], optional): List of history messages
Raises:
HTTPException: If an error occurs while retrieving pipeline status (500)
"""
try:
from lightrag.kg.shared_storage import get_namespace_data
@@ -746,7 +793,7 @@ def create_document_routes(
if status_dict.get("job_start"):
status_dict["job_start"] = str(status_dict["job_start"])
return status_dict
return PipelineStatusResponse(**status_dict)
except Exception as e:
logger.error(f"Error getting pipeline status: {str(e)}")
logger.error(traceback.format_exc())

View File

@@ -0,0 +1,17 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
<meta http-equiv="Pragma" content="no-cache" />
<meta http-equiv="Expires" content="0" />
<link rel="icon" type="image/svg+xml" href="./logo.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Lightrag</title>
<script type="module" crossorigin src="./assets/index-DwcJE583.js"></script>
<link rel="stylesheet" crossorigin href="./assets/index-BV5s8k-a.css">
</head>
<body>
<div id="root"></div>
</body>
</html>

View File

@@ -156,7 +156,9 @@ class ChromaVectorDBStorage(BaseVectorStorage):
logger.error(f"Error during ChromaDB upsert: {str(e)}")
raise
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
try:
embedding = await self.embedding_func([query])

View File

@@ -171,7 +171,9 @@ class FaissVectorDBStorage(BaseVectorStorage):
logger.info(f"Upserted {len(list_data)} vectors into Faiss index.")
return [m["__id__"] for m in list_data]
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
"""
Search by a textual query; returns top_k results with their metadata + similarity distance.
"""

View File

@@ -101,7 +101,9 @@ class MilvusVectorDBStorage(BaseVectorStorage):
results = self._client.upsert(collection_name=self.namespace, data=list_data)
return results
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
embedding = await self.embedding_func([query])
results = self._client.search(
collection_name=self.namespace,

View File

@@ -938,7 +938,9 @@ class MongoVectorDBStorage(BaseVectorStorage):
return list_data
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
"""Queries the vector database using Atlas Vector Search."""
# Generate the embedding
embedding = await self.embedding_func([query])

View File

@@ -120,7 +120,9 @@ class NanoVectorDBStorage(BaseVectorStorage):
f"embedding is not 1-1 with data, {len(embeddings)} != {len(list_data)}"
)
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
# Execute embedding outside of lock to avoid long lock times
embedding = await self.embedding_func([query])
embedding = embedding[0]

View File

@@ -553,18 +553,6 @@ class Neo4JStorage(BaseGraphStorage):
logger.error(f"Error during upsert: {str(e)}")
raise
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
retry=retry_if_exception_type(
(
neo4jExceptions.ServiceUnavailable,
neo4jExceptions.TransientError,
neo4jExceptions.WriteServiceUnavailable,
neo4jExceptions.ClientError,
)
),
)
@retry(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=4, max=10),
@@ -666,14 +654,14 @@ class Neo4JStorage(BaseGraphStorage):
main_query = """
MATCH (n)
OPTIONAL MATCH (n)-[r]-()
WITH n, count(r) AS degree
WITH n, COALESCE(count(r), 0) AS degree
WHERE degree >= $min_degree
ORDER BY degree DESC
LIMIT $max_nodes
WITH collect({node: n}) AS filtered_nodes
UNWIND filtered_nodes AS node_info
WITH collect(node_info.node) AS kept_nodes, filtered_nodes
MATCH (a)-[r]-(b)
OPTIONAL MATCH (a)-[r]-(b)
WHERE a IN kept_nodes AND b IN kept_nodes
RETURN filtered_nodes AS node_info,
collect(DISTINCT r) AS relationships
@@ -703,7 +691,7 @@ class Neo4JStorage(BaseGraphStorage):
WITH start, nodes, relationships
UNWIND nodes AS node
OPTIONAL MATCH (node)-[r]-()
WITH node, count(r) AS degree, start, nodes, relationships
WITH node, COALESCE(count(r), 0) AS degree, start, nodes, relationships
WHERE node = start OR EXISTS((start)--(node)) OR degree >= $min_degree
ORDER BY
CASE
@@ -716,7 +704,7 @@ class Neo4JStorage(BaseGraphStorage):
WITH collect({node: node}) AS filtered_nodes
UNWIND filtered_nodes AS node_info
WITH collect(node_info.node) AS kept_nodes, filtered_nodes
MATCH (a)-[r]-(b)
OPTIONAL MATCH (a)-[r]-(b)
WHERE a IN kept_nodes AND b IN kept_nodes
RETURN filtered_nodes AS node_info,
collect(DISTINCT r) AS relationships
@@ -744,11 +732,7 @@ class Neo4JStorage(BaseGraphStorage):
result.nodes.append(
KnowledgeGraphNode(
id=f"{node_id}",
labels=[
label
for label in node.labels
if label != "base"
],
labels=[node.get("entity_id")],
properties=dict(node),
)
)
@@ -865,9 +849,7 @@ class Neo4JStorage(BaseGraphStorage):
# Create KnowledgeGraphNode for target
target_node = KnowledgeGraphNode(
id=f"{target_id}",
labels=[
label for label in b_node.labels if label != "base"
],
labels=list(f"{target_id}"),
properties=dict(b_node.properties),
)
@@ -907,9 +889,7 @@ class Neo4JStorage(BaseGraphStorage):
# Create initial KnowledgeGraphNode
start_node = KnowledgeGraphNode(
id=f"{node_record['n'].get('entity_id')}",
labels=[
label for label in node_record["n"].labels if label != "base"
],
labels=list(f"{node_record['n'].get('entity_id')}"),
properties=dict(node_record["n"].properties),
)
finally:

View File

@@ -417,7 +417,9 @@ class OracleVectorDBStorage(BaseVectorStorage):
self.db = None
#################### query method ###############
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
embeddings = await self.embedding_func([query])
embedding = embeddings[0]
# 转换精度

View File

@@ -123,7 +123,9 @@ class QdrantVectorDBStorage(BaseVectorStorage):
)
return results
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
embedding = await self.embedding_func([query])
results = self._client.search(
collection_name=self.namespace,

View File

@@ -306,7 +306,9 @@ class TiDBVectorDBStorage(BaseVectorStorage):
await ClientManager.release_client(self.db)
self.db = None
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
"""Search from tidb vector"""
embeddings = await self.embedding_func([query])
embedding = embeddings[0]

View File

@@ -3,11 +3,14 @@ from __future__ import annotations
import asyncio
import configparser
import os
import csv
import warnings
from dataclasses import asdict, dataclass, field
from datetime import datetime
from functools import partial
from typing import Any, AsyncIterator, Callable, Iterator, cast, final
from typing import Any, AsyncIterator, Callable, Iterator, cast, final, Literal
import pandas as pd
from lightrag.kg import (
STORAGE_ENV_REQUIREMENTS,
@@ -1111,6 +1114,7 @@ class LightRAG:
# Prepare node data
node_data: dict[str, str] = {
"entity_id": entity_name,
"entity_type": entity_type,
"description": description,
"source_id": source_id,
@@ -1148,6 +1152,7 @@ class LightRAG:
await self.chunk_entity_relation_graph.upsert_node(
need_insert_id,
node_data={
"entity_id": need_insert_id,
"source_id": source_id,
"description": "UNKNOWN",
"entity_type": "UNKNOWN",
@@ -2157,6 +2162,7 @@ class LightRAG:
# Prepare node data with defaults if missing
node_data = {
"entity_id": entity_name,
"entity_type": entity_data.get("entity_type", "UNKNOWN"),
"description": entity_data.get("description", ""),
"source_id": entity_data.get("source_id", "manual"),
@@ -2592,6 +2598,322 @@ class LightRAG:
logger.error(f"Error merging entities: {e}")
raise
async def aexport_data(
self,
output_path: str,
file_format: Literal["csv", "excel", "md", "txt"] = "csv",
include_vector_data: bool = False,
) -> None:
"""
Asynchronously exports all entities, relations, and relationships to various formats.
Args:
output_path: The path to the output file (including extension).
file_format: Output format - "csv", "excel", "md", "txt".
- csv: Comma-separated values file
- excel: Microsoft Excel file with multiple sheets
- md: Markdown tables
- txt: Plain text formatted output
- table: Print formatted tables to console
include_vector_data: Whether to include data from the vector database.
"""
# Collect data
entities_data = []
relations_data = []
relationships_data = []
# --- Entities ---
all_entities = await self.chunk_entity_relation_graph.get_all_labels()
for entity_name in all_entities:
entity_info = await self.get_entity_info(
entity_name, include_vector_data=include_vector_data
)
entity_row = {
"entity_name": entity_name,
"source_id": entity_info["source_id"],
"graph_data": str(
entity_info["graph_data"]
), # Convert to string to ensure compatibility
}
if include_vector_data and "vector_data" in entity_info:
entity_row["vector_data"] = str(entity_info["vector_data"])
entities_data.append(entity_row)
# --- Relations ---
for src_entity in all_entities:
for tgt_entity in all_entities:
if src_entity == tgt_entity:
continue
edge_exists = await self.chunk_entity_relation_graph.has_edge(
src_entity, tgt_entity
)
if edge_exists:
relation_info = await self.get_relation_info(
src_entity, tgt_entity, include_vector_data=include_vector_data
)
relation_row = {
"src_entity": src_entity,
"tgt_entity": tgt_entity,
"source_id": relation_info["source_id"],
"graph_data": str(
relation_info["graph_data"]
), # Convert to string
}
if include_vector_data and "vector_data" in relation_info:
relation_row["vector_data"] = str(relation_info["vector_data"])
relations_data.append(relation_row)
# --- Relationships (from VectorDB) ---
all_relationships = await self.relationships_vdb.client_storage
for rel in all_relationships["data"]:
relationships_data.append(
{
"relationship_id": rel["__id__"],
"data": str(rel), # Convert to string for compatibility
}
)
# Export based on format
if file_format == "csv":
# CSV export
with open(output_path, "w", newline="", encoding="utf-8") as csvfile:
# Entities
if entities_data:
csvfile.write("# ENTITIES\n")
writer = csv.DictWriter(csvfile, fieldnames=entities_data[0].keys())
writer.writeheader()
writer.writerows(entities_data)
csvfile.write("\n\n")
# Relations
if relations_data:
csvfile.write("# RELATIONS\n")
writer = csv.DictWriter(
csvfile, fieldnames=relations_data[0].keys()
)
writer.writeheader()
writer.writerows(relations_data)
csvfile.write("\n\n")
# Relationships
if relationships_data:
csvfile.write("# RELATIONSHIPS\n")
writer = csv.DictWriter(
csvfile, fieldnames=relationships_data[0].keys()
)
writer.writeheader()
writer.writerows(relationships_data)
elif file_format == "excel":
# Excel export
entities_df = (
pd.DataFrame(entities_data) if entities_data else pd.DataFrame()
)
relations_df = (
pd.DataFrame(relations_data) if relations_data else pd.DataFrame()
)
relationships_df = (
pd.DataFrame(relationships_data)
if relationships_data
else pd.DataFrame()
)
with pd.ExcelWriter(output_path, engine="xlsxwriter") as writer:
if not entities_df.empty:
entities_df.to_excel(writer, sheet_name="Entities", index=False)
if not relations_df.empty:
relations_df.to_excel(writer, sheet_name="Relations", index=False)
if not relationships_df.empty:
relationships_df.to_excel(
writer, sheet_name="Relationships", index=False
)
elif file_format == "md":
# Markdown export
with open(output_path, "w", encoding="utf-8") as mdfile:
mdfile.write("# LightRAG Data Export\n\n")
# Entities
mdfile.write("## Entities\n\n")
if entities_data:
# Write header
mdfile.write("| " + " | ".join(entities_data[0].keys()) + " |\n")
mdfile.write(
"| "
+ " | ".join(["---"] * len(entities_data[0].keys()))
+ " |\n"
)
# Write rows
for entity in entities_data:
mdfile.write(
"| " + " | ".join(str(v) for v in entity.values()) + " |\n"
)
mdfile.write("\n\n")
else:
mdfile.write("*No entity data available*\n\n")
# Relations
mdfile.write("## Relations\n\n")
if relations_data:
# Write header
mdfile.write("| " + " | ".join(relations_data[0].keys()) + " |\n")
mdfile.write(
"| "
+ " | ".join(["---"] * len(relations_data[0].keys()))
+ " |\n"
)
# Write rows
for relation in relations_data:
mdfile.write(
"| "
+ " | ".join(str(v) for v in relation.values())
+ " |\n"
)
mdfile.write("\n\n")
else:
mdfile.write("*No relation data available*\n\n")
# Relationships
mdfile.write("## Relationships\n\n")
if relationships_data:
# Write header
mdfile.write(
"| " + " | ".join(relationships_data[0].keys()) + " |\n"
)
mdfile.write(
"| "
+ " | ".join(["---"] * len(relationships_data[0].keys()))
+ " |\n"
)
# Write rows
for relationship in relationships_data:
mdfile.write(
"| "
+ " | ".join(str(v) for v in relationship.values())
+ " |\n"
)
else:
mdfile.write("*No relationship data available*\n\n")
elif file_format == "txt":
# Plain text export
with open(output_path, "w", encoding="utf-8") as txtfile:
txtfile.write("LIGHTRAG DATA EXPORT\n")
txtfile.write("=" * 80 + "\n\n")
# Entities
txtfile.write("ENTITIES\n")
txtfile.write("-" * 80 + "\n")
if entities_data:
# Create fixed width columns
col_widths = {
k: max(len(k), max(len(str(e[k])) for e in entities_data))
for k in entities_data[0]
}
header = " ".join(k.ljust(col_widths[k]) for k in entities_data[0])
txtfile.write(header + "\n")
txtfile.write("-" * len(header) + "\n")
# Write rows
for entity in entities_data:
row = " ".join(
str(v).ljust(col_widths[k]) for k, v in entity.items()
)
txtfile.write(row + "\n")
txtfile.write("\n\n")
else:
txtfile.write("No entity data available\n\n")
# Relations
txtfile.write("RELATIONS\n")
txtfile.write("-" * 80 + "\n")
if relations_data:
# Create fixed width columns
col_widths = {
k: max(len(k), max(len(str(r[k])) for r in relations_data))
for k in relations_data[0]
}
header = " ".join(
k.ljust(col_widths[k]) for k in relations_data[0]
)
txtfile.write(header + "\n")
txtfile.write("-" * len(header) + "\n")
# Write rows
for relation in relations_data:
row = " ".join(
str(v).ljust(col_widths[k]) for k, v in relation.items()
)
txtfile.write(row + "\n")
txtfile.write("\n\n")
else:
txtfile.write("No relation data available\n\n")
# Relationships
txtfile.write("RELATIONSHIPS\n")
txtfile.write("-" * 80 + "\n")
if relationships_data:
# Create fixed width columns
col_widths = {
k: max(len(k), max(len(str(r[k])) for r in relationships_data))
for k in relationships_data[0]
}
header = " ".join(
k.ljust(col_widths[k]) for k in relationships_data[0]
)
txtfile.write(header + "\n")
txtfile.write("-" * len(header) + "\n")
# Write rows
for relationship in relationships_data:
row = " ".join(
str(v).ljust(col_widths[k]) for k, v in relationship.items()
)
txtfile.write(row + "\n")
else:
txtfile.write("No relationship data available\n\n")
else:
raise ValueError(
f"Unsupported file format: {file_format}. "
f"Choose from: csv, excel, md, txt"
)
if file_format is not None:
print(f"Data exported to: {output_path} with format: {file_format}")
else:
print("Data displayed as table format")
def export_data(
self,
output_path: str,
file_format: Literal["csv", "excel", "md", "txt"] = "csv",
include_vector_data: bool = False,
) -> None:
"""
Synchronously exports all entities, relations, and relationships to various formats.
Args:
output_path: The path to the output file (including extension).
file_format: Output format - "csv", "excel", "md", "txt".
- csv: Comma-separated values file
- excel: Microsoft Excel file with multiple sheets
- md: Markdown tables
- txt: Plain text formatted output
- table: Print formatted tables to console
include_vector_data: Whether to include data from the vector database.
"""
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(
self.aexport_data(output_path, file_format, include_vector_data)
)
def merge_entities(
self,
source_entities: list[str],

View File

@@ -76,6 +76,7 @@ class LightragPathFilter(logging.Filter):
super().__init__()
# Define paths to be filtered
self.filtered_paths = ["/documents", "/health", "/webui/"]
# self.filtered_paths = ["/health", "/webui/"]
def filter(self, record):
try:

View File

@@ -63,6 +63,7 @@
"@types/node": "^22.13.5",
"@types/react": "^19.0.10",
"@types/react-dom": "^19.0.4",
"@types/react-i18next": "^8.1.0",
"@types/react-syntax-highlighter": "^15.5.13",
"@types/seedrandom": "^3.0.8",
"@vitejs/plugin-react-swc": "^3.8.0",
@@ -446,6 +447,8 @@
"@types/react-dom": ["@types/react-dom@19.0.4", "", { "peerDependencies": { "@types/react": "^19.0.0" } }, "sha512-4fSQ8vWFkg+TGhePfUzVmat3eC14TXYSsiiDSLI0dVLsrm9gZFABjPy/Qu6TKgl1tq1Bu1yDsuQgY3A3DOjCcg=="],
"@types/react-i18next": ["@types/react-i18next@8.1.0", "", { "dependencies": { "react-i18next": "*" } }, "sha512-d4xhcjX5b3roNMObRNMfb1HinHQlQLPo8xlDj60dnHeeAw2bBymR2cy/l1giJpHzo/ZFgSvgVUvIWr4kCrenCg=="],
"@types/react-syntax-highlighter": ["@types/react-syntax-highlighter@15.5.13", "", { "dependencies": { "@types/react": "*" } }, "sha512-uLGJ87j6Sz8UaBAooU0T6lWJ0dBmjZgN1PZTrj05TNql2/XpC6+4HhMT5syIdFUUt+FASfCeLLv4kBygNU+8qA=="],
"@types/react-transition-group": ["@types/react-transition-group@4.4.12", "", { "peerDependencies": { "@types/react": "*" } }, "sha512-8TV6R3h2j7a91c+1DXdJi3Syo69zzIZbz7Lg5tORM5LEJG7X/E6a1V3drRyBRZq7/utz7A+c4OgYLiLcYGHG6w=="],

View File

@@ -2,6 +2,9 @@
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
<meta http-equiv="Pragma" content="no-cache" />
<meta http-equiv="Expires" content="0" />
<link rel="icon" type="image/svg+xml" href="logo.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Lightrag</title>

View File

@@ -72,6 +72,7 @@
"@types/node": "^22.13.5",
"@types/react": "^19.0.10",
"@types/react-dom": "^19.0.4",
"@types/react-i18next": "^8.1.0",
"@types/react-syntax-highlighter": "^15.5.13",
"@types/seedrandom": "^3.0.8",
"@vitejs/plugin-react-swc": "^3.8.0",

View File

@@ -1,4 +1,6 @@
import { useState, useCallback } from 'react'
import ThemeProvider from '@/components/ThemeProvider'
import TabVisibilityProvider from '@/contexts/TabVisibilityProvider'
import MessageAlert from '@/components/MessageAlert'
import ApiKeyAlert from '@/components/ApiKeyAlert'
import StatusIndicator from '@/components/graph/StatusIndicator'
@@ -19,7 +21,7 @@ import { Tabs, TabsContent } from '@/components/ui/Tabs'
function App() {
const message = useBackendState.use.message()
const enableHealthCheck = useSettingsStore.use.enableHealthCheck()
const [currentTab] = useState(() => useSettingsStore.getState().currentTab)
const currentTab = useSettingsStore.use.currentTab()
const [apiKeyInvalid, setApiKeyInvalid] = useState(false)
// Health check
@@ -51,32 +53,36 @@ function App() {
}, [message, setApiKeyInvalid])
return (
<main className="flex h-screen w-screen overflow-x-hidden">
<Tabs
defaultValue={currentTab}
className="!m-0 flex grow flex-col !p-0"
onValueChange={handleTabChange}
>
<SiteHeader />
<div className="relative grow">
<TabsContent value="documents" className="absolute top-0 right-0 bottom-0 left-0">
<DocumentManager />
</TabsContent>
<TabsContent value="knowledge-graph" className="absolute top-0 right-0 bottom-0 left-0">
<GraphViewer />
</TabsContent>
<TabsContent value="retrieval" className="absolute top-0 right-0 bottom-0 left-0">
<RetrievalTesting />
</TabsContent>
<TabsContent value="api" className="absolute top-0 right-0 bottom-0 left-0">
<ApiSite />
</TabsContent>
</div>
</Tabs>
{enableHealthCheck && <StatusIndicator />}
{message !== null && !apiKeyInvalid && <MessageAlert />}
{apiKeyInvalid && <ApiKeyAlert />}
</main>
<ThemeProvider>
<TabVisibilityProvider>
<main className="flex h-screen w-screen overflow-x-hidden">
<Tabs
defaultValue={currentTab}
className="!m-0 flex grow flex-col !p-0"
onValueChange={handleTabChange}
>
<SiteHeader />
<div className="relative grow">
<TabsContent value="documents" className="absolute top-0 right-0 bottom-0 left-0">
<DocumentManager />
</TabsContent>
<TabsContent value="knowledge-graph" className="absolute top-0 right-0 bottom-0 left-0">
<GraphViewer />
</TabsContent>
<TabsContent value="retrieval" className="absolute top-0 right-0 bottom-0 left-0">
<RetrievalTesting />
</TabsContent>
<TabsContent value="api" className="absolute top-0 right-0 bottom-0 left-0">
<ApiSite />
</TabsContent>
</div>
</Tabs>
{enableHealthCheck && <StatusIndicator />}
{message !== null && !apiKeyInvalid && <MessageAlert />}
{apiKeyInvalid && <ApiKeyAlert />}
</main>
</TabVisibilityProvider>
</ThemeProvider>
)
}

View File

@@ -0,0 +1,66 @@
import { useState, useCallback } from 'react'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/Popover'
import Button from '@/components/ui/Button'
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@/components/ui/Select'
import { useSettingsStore } from '@/stores/settings'
import { PaletteIcon } from 'lucide-react'
import { useTranslation } from 'react-i18next'
export default function AppSettings() {
const [opened, setOpened] = useState<boolean>(false)
const { t } = useTranslation()
const language = useSettingsStore.use.language()
const setLanguage = useSettingsStore.use.setLanguage()
const theme = useSettingsStore.use.theme()
const setTheme = useSettingsStore.use.setTheme()
const handleLanguageChange = useCallback((value: string) => {
setLanguage(value as 'en' | 'zh')
}, [setLanguage])
const handleThemeChange = useCallback((value: string) => {
setTheme(value as 'light' | 'dark' | 'system')
}, [setTheme])
return (
<Popover open={opened} onOpenChange={setOpened}>
<PopoverTrigger asChild>
<Button variant="outline" size="icon" className="h-9 w-9">
<PaletteIcon className="h-5 w-5" />
</Button>
</PopoverTrigger>
<PopoverContent side="bottom" align="end" className="w-56">
<div className="flex flex-col gap-4">
<div className="flex flex-col gap-2">
<label className="text-sm font-medium">{t('settings.language')}</label>
<Select value={language} onValueChange={handleLanguageChange}>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="en">English</SelectItem>
<SelectItem value="zh"></SelectItem>
</SelectContent>
</Select>
</div>
<div className="flex flex-col gap-2">
<label className="text-sm font-medium">{t('settings.theme')}</label>
<Select value={theme} onValueChange={handleThemeChange}>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="light">{t('settings.light')}</SelectItem>
<SelectItem value="dark">{t('settings.dark')}</SelectItem>
<SelectItem value="system">{t('settings.system')}</SelectItem>
</SelectContent>
</Select>
</div>
</div>
</PopoverContent>
</Popover>
)
}

View File

@@ -0,0 +1,24 @@
import { StrictMode, useEffect, useState } from 'react'
import { initializeI18n } from '@/i18n'
import App from '@/App'
export const Root = () => {
const [isI18nInitialized, setIsI18nInitialized] = useState(false)
useEffect(() => {
// Initialize i18n immediately with persisted language
initializeI18n().then(() => {
setIsI18nInitialized(true)
})
}, [])
if (!isI18nInitialized) {
return null // or a loading spinner
}
return (
<StrictMode>
<App />
</StrictMode>
)
}

View File

@@ -1,4 +1,4 @@
import { createContext, useEffect, useState } from 'react'
import { createContext, useEffect } from 'react'
import { Theme, useSettingsStore } from '@/stores/settings'
type ThemeProviderProps = {
@@ -21,30 +21,32 @@ const ThemeProviderContext = createContext<ThemeProviderState>(initialState)
* Component that provides the theme state and setter function to its children.
*/
export default function ThemeProvider({ children, ...props }: ThemeProviderProps) {
const [theme, setTheme] = useState<Theme>(useSettingsStore.getState().theme)
const theme = useSettingsStore.use.theme()
const setTheme = useSettingsStore.use.setTheme()
useEffect(() => {
const root = window.document.documentElement
root.classList.remove('light', 'dark')
if (theme === 'system') {
const systemTheme = window.matchMedia('(prefers-color-scheme: dark)').matches
? 'dark'
: 'light'
root.classList.add(systemTheme)
setTheme(systemTheme)
return
}
const mediaQuery = window.matchMedia('(prefers-color-scheme: dark)')
const handleChange = (e: MediaQueryListEvent) => {
root.classList.remove('light', 'dark')
root.classList.add(e.matches ? 'dark' : 'light')
}
root.classList.add(theme)
root.classList.add(mediaQuery.matches ? 'dark' : 'light')
mediaQuery.addEventListener('change', handleChange)
return () => mediaQuery.removeEventListener('change', handleChange)
} else {
root.classList.add(theme)
}
}, [theme])
const value = {
theme,
setTheme: (theme: Theme) => {
useSettingsStore.getState().setTheme(theme)
setTheme(theme)
}
setTheme
}
return (

View File

@@ -13,15 +13,24 @@ const FocusOnNode = ({ node, move }: { node: string | null; move?: boolean }) =>
* When the selected item changes, highlighted the node and center the camera on it.
*/
useEffect(() => {
if (!node) return
sigma.getGraph().setNodeAttribute(node, 'highlighted', true)
if (move) {
gotoNode(node)
if (node) {
sigma.getGraph().setNodeAttribute(node, 'highlighted', true)
gotoNode(node)
} else {
// If no node is selected but move is true, reset to default view
sigma.setCustomBBox(null)
sigma.getCamera().animate({ x: 0.5, y: 0.5, ratio: 1 }, { duration: 0 })
}
useGraphStore.getState().setMoveToSelectedNode(false)
} else if (node) {
sigma.getGraph().setNodeAttribute(node, 'highlighted', true)
}
return () => {
sigma.getGraph().setNodeAttribute(node, 'highlighted', false)
if (node) {
sigma.getGraph().setNodeAttribute(node, 'highlighted', false)
}
}
}, [node, move, sigma, gotoNode])

View File

@@ -1,10 +1,11 @@
import { useLoadGraph, useRegisterEvents, useSetSettings, useSigma } from '@react-sigma/core'
import Graph from 'graphology'
// import { useLayoutCircular } from '@react-sigma/layout-circular'
import { useLayoutForceAtlas2 } from '@react-sigma/layout-forceatlas2'
import { useEffect } from 'react'
// import useRandomGraph, { EdgeType, NodeType } from '@/hooks/useRandomGraph'
import useLightragGraph, { EdgeType, NodeType } from '@/hooks/useLightragGraph'
import { EdgeType, NodeType } from '@/hooks/useLightragGraph'
import useTheme from '@/hooks/useTheme'
import * as Constants from '@/lib/constants'
@@ -21,7 +22,6 @@ const isButtonPressed = (ev: MouseEvent | TouchEvent) => {
}
const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean }) => {
const { lightrageGraph } = useLightragGraph()
const sigma = useSigma<NodeType, EdgeType>()
const registerEvents = useRegisterEvents<NodeType, EdgeType>()
const setSettings = useSetSettings<NodeType, EdgeType>()
@@ -34,21 +34,25 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
const { theme } = useTheme()
const hideUnselectedEdges = useSettingsStore.use.enableHideUnselectedEdges()
const enableEdgeEvents = useSettingsStore.use.enableEdgeEvents()
const renderEdgeLabels = useSettingsStore.use.showEdgeLabel()
const renderLabels = useSettingsStore.use.showNodeLabel()
const selectedNode = useGraphStore.use.selectedNode()
const focusedNode = useGraphStore.use.focusedNode()
const selectedEdge = useGraphStore.use.selectedEdge()
const focusedEdge = useGraphStore.use.focusedEdge()
const sigmaGraph = useGraphStore.use.sigmaGraph()
/**
* When component mount or maxIterations changes
* => load the graph and apply layout
*/
useEffect(() => {
// Create & load the graph
const graph = lightrageGraph()
loadGraph(graph)
assignLayout()
}, [assignLayout, loadGraph, lightrageGraph, maxIterations])
if (sigmaGraph) {
loadGraph(sigmaGraph as unknown as Graph<NodeType, EdgeType>)
assignLayout()
}
}, [assignLayout, loadGraph, sigmaGraph, maxIterations])
/**
* When component mount
@@ -58,39 +62,52 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
const { setFocusedNode, setSelectedNode, setFocusedEdge, setSelectedEdge, clearSelection } =
useGraphStore.getState()
// Register the events
registerEvents({
enterNode: (event) => {
// Define event types
type NodeEvent = { node: string; event: { original: MouseEvent | TouchEvent } }
type EdgeEvent = { edge: string; event: { original: MouseEvent | TouchEvent } }
// Register all events, but edge events will only be processed if enableEdgeEvents is true
const events: Record<string, any> = {
enterNode: (event: NodeEvent) => {
if (!isButtonPressed(event.event.original)) {
setFocusedNode(event.node)
}
},
leaveNode: (event) => {
leaveNode: (event: NodeEvent) => {
if (!isButtonPressed(event.event.original)) {
setFocusedNode(null)
}
},
clickNode: (event) => {
clickNode: (event: NodeEvent) => {
setSelectedNode(event.node)
setSelectedEdge(null)
},
clickEdge: (event) => {
clickStage: () => clearSelection()
}
// Only add edge event handlers if enableEdgeEvents is true
if (enableEdgeEvents) {
events.clickEdge = (event: EdgeEvent) => {
setSelectedEdge(event.edge)
setSelectedNode(null)
},
enterEdge: (event) => {
}
events.enterEdge = (event: EdgeEvent) => {
if (!isButtonPressed(event.event.original)) {
setFocusedEdge(event.edge)
}
},
leaveEdge: (event) => {
}
events.leaveEdge = (event: EdgeEvent) => {
if (!isButtonPressed(event.event.original)) {
setFocusedEdge(null)
}
},
clickStage: () => clearSelection()
})
}, [registerEvents])
}
}
// Register the events
registerEvents(events)
}, [registerEvents, enableEdgeEvents])
/**
* When component mount or hovered node change
@@ -101,7 +118,14 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
const labelColor = isDarkTheme ? Constants.labelColorDarkTheme : undefined
const edgeColor = isDarkTheme ? Constants.edgeColorDarkTheme : undefined
// Update all dynamic settings directly without recreating the sigma container
setSettings({
// Update display settings
enableEdgeEvents,
renderEdgeLabels,
renderLabels,
// Node reducer for node appearance
nodeReducer: (node, data) => {
const graph = sigma.getGraph()
const newData: NodeType & {
@@ -140,6 +164,8 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
}
return newData
},
// Edge reducer for edge appearance
edgeReducer: (edge, data) => {
const graph = sigma.getGraph()
const newData = { ...data, hidden: false, labelColor, color: edgeColor }
@@ -181,7 +207,10 @@ const GraphControl = ({ disableHoverEffect }: { disableHoverEffect?: boolean })
sigma,
disableHoverEffect,
theme,
hideUnselectedEdges
hideUnselectedEdges,
enableEdgeEvents,
renderEdgeLabels,
renderLabels
])
return null

View File

@@ -1,37 +1,48 @@
import { useCallback } from 'react'
import { useCallback, useEffect, useRef } from 'react'
import { AsyncSelect } from '@/components/ui/AsyncSelect'
import { getGraphLabels } from '@/api/lightrag'
import { useSettingsStore } from '@/stores/settings'
import { useGraphStore } from '@/stores/graph'
import { labelListLimit } from '@/lib/constants'
import MiniSearch from 'minisearch'
import { useTranslation } from 'react-i18next'
const lastGraph: any = {
graph: null,
searchEngine: null,
labels: []
}
const GraphLabels = () => {
const { t } = useTranslation()
const label = useSettingsStore.use.queryLabel()
const graph = useGraphStore.use.sigmaGraph()
const allDatabaseLabels = useGraphStore.use.allDatabaseLabels()
const labelsLoadedRef = useRef(false)
const getSearchEngine = useCallback(async () => {
if (lastGraph.graph == graph) {
return {
labels: lastGraph.labels,
searchEngine: lastGraph.searchEngine
}
}
const labels = ['*'].concat(await getGraphLabels())
// Ensure query label exists
if (!labels.includes(useSettingsStore.getState().queryLabel)) {
useSettingsStore.getState().setQueryLabel(labels[0])
// Track if a fetch is in progress to prevent multiple simultaneous fetches
const fetchInProgressRef = useRef(false)
// Fetch labels once on component mount, using global flag to prevent duplicates
useEffect(() => {
// Check if we've already attempted to fetch labels in this session
const labelsFetchAttempted = useGraphStore.getState().labelsFetchAttempted
// Only fetch if we haven't attempted in this session and no fetch is in progress
if (!labelsFetchAttempted && !fetchInProgressRef.current) {
fetchInProgressRef.current = true
// Set global flag to indicate we've attempted to fetch in this session
useGraphStore.getState().setLabelsFetchAttempted(true)
console.log('Fetching graph labels (once per session)...')
useGraphStore.getState().fetchAllDatabaseLabels()
.then(() => {
labelsLoadedRef.current = true
fetchInProgressRef.current = false
})
.catch((error) => {
console.error('Failed to fetch labels:', error)
fetchInProgressRef.current = false
// Reset global flag to allow retry
useGraphStore.getState().setLabelsFetchAttempted(false)
})
}
}, []) // Empty dependency array ensures this only runs once on mount
const getSearchEngine = useCallback(() => {
// Create search engine
const searchEngine = new MiniSearch({
idField: 'id',
@@ -46,41 +57,32 @@ const GraphLabels = () => {
})
// Add documents
const documents = labels.map((str, index) => ({ id: index, value: str }))
const documents = allDatabaseLabels.map((str, index) => ({ id: index, value: str }))
searchEngine.addAll(documents)
lastGraph.graph = graph
lastGraph.searchEngine = searchEngine
lastGraph.labels = labels
return {
labels,
labels: allDatabaseLabels,
searchEngine
}
}, [graph])
}, [allDatabaseLabels])
const fetchData = useCallback(
async (query?: string): Promise<string[]> => {
const { labels, searchEngine } = await getSearchEngine()
const { labels, searchEngine } = getSearchEngine()
let result: string[] = labels
if (query) {
// Search labels
result = searchEngine.search(query).map((r) => labels[r.id])
result = searchEngine.search(query).map((r: { id: number }) => labels[r.id])
}
return result.length <= labelListLimit
? result
: [...result.slice(0, labelListLimit), t('graphLabels.andOthers', { count: result.length - labelListLimit })]
: [...result.slice(0, labelListLimit), '...']
},
[getSearchEngine]
)
const setQueryLabel = useCallback((label: string) => {
if (label.startsWith('And ') && label.endsWith(' others')) return
useSettingsStore.getState().setQueryLabel(label)
}, [])
return (
<AsyncSelect<string>
className="ml-2"
@@ -94,8 +96,38 @@ const GraphLabels = () => {
notFound={<div className="py-6 text-center text-sm">No labels found</div>}
label={t('graphPanel.graphLabels.label')}
placeholder={t('graphPanel.graphLabels.placeholder')}
value={label !== null ? label : ''}
onChange={setQueryLabel}
value={label !== null ? label : '*'}
onChange={(newLabel) => {
const currentLabel = useSettingsStore.getState().queryLabel
// select the last item means query all
if (newLabel === '...') {
newLabel = '*'
}
// Reset the fetch attempted flag to force a new data fetch
useGraphStore.getState().setGraphDataFetchAttempted(false)
// Clear current graph data to ensure complete reload when label changes
if (newLabel !== currentLabel) {
const graphStore = useGraphStore.getState();
graphStore.clearSelection();
// Reset the graph state but preserve the instance
if (graphStore.sigmaGraph) {
const nodes = Array.from(graphStore.sigmaGraph.nodes());
nodes.forEach(node => graphStore.sigmaGraph?.dropNode(node));
}
}
if (newLabel === currentLabel && newLabel !== '*') {
// reselect the same itme means qery all
useSettingsStore.getState().setQueryLabel('*')
} else {
useSettingsStore.getState().setQueryLabel(newLabel)
}
}}
clearable={false} // Prevent clearing value on reselect
/>
)
}

View File

@@ -1,4 +1,4 @@
import { FC, useCallback, useMemo } from 'react'
import { FC, useCallback, useEffect, useMemo } from 'react'
import {
EdgeById,
NodeById,
@@ -28,6 +28,7 @@ function OptionComponent(item: OptionItem) {
}
const messageId = '__message_item'
// Reset this cache when graph changes to ensure fresh search results
const lastGraph: any = {
graph: null,
searchEngine: null
@@ -48,6 +49,15 @@ export const GraphSearchInput = ({
const { t } = useTranslation()
const graph = useGraphStore.use.sigmaGraph()
// Force reset the cache when graph changes
useEffect(() => {
if (graph) {
// Reset cache to ensure fresh search results with new graph data
lastGraph.graph = null;
lastGraph.searchEngine = null;
}
}, [graph]);
const searchEngine = useMemo(() => {
if (lastGraph.graph == graph) {
return lastGraph.searchEngine
@@ -85,8 +95,19 @@ export const GraphSearchInput = ({
const loadOptions = useCallback(
async (query?: string): Promise<OptionItem[]> => {
if (onFocus) onFocus(null)
if (!query || !searchEngine) return []
const result: OptionItem[] = searchEngine.search(query).map((r) => ({
if (!graph || !searchEngine) return []
// If no query, return first searchResultLimit nodes
if (!query) {
const nodeIds = graph.nodes().slice(0, searchResultLimit)
return nodeIds.map(id => ({
id,
type: 'nodes'
}))
}
// If has query, search nodes
const result: OptionItem[] = searchEngine.search(query).map((r: { id: string }) => ({
id: r.id,
type: 'nodes'
}))
@@ -103,7 +124,7 @@ export const GraphSearchInput = ({
}
]
},
[searchEngine, onFocus]
[graph, searchEngine, onFocus, t]
)
return (

View File

@@ -96,9 +96,9 @@ const refineNodeProperties = (node: RawNodeType): NodeType => {
const neighbour = state.rawGraph.getNode(neighbourId)
if (neighbour) {
relationships.push({
type: isTarget ? 'Target' : 'Source',
type: 'Neighbour',
id: neighbourId,
label: neighbour.labels.join(', ')
label: neighbour.properties['entity_id'] ? neighbour.properties['entity_id'] : neighbour.labels.join(', ')
})
}
}
@@ -132,14 +132,22 @@ const PropertyRow = ({
onClick?: () => void
tooltip?: string
}) => {
const { t } = useTranslation()
const getPropertyNameTranslation = (name: string) => {
const translationKey = `graphPanel.propertiesView.node.propertyNames.${name}`
const translation = t(translationKey)
return translation === translationKey ? name : translation
}
return (
<div className="flex items-center gap-2">
<label className="text-primary/60 tracking-wide">{name}</label>:
<label className="text-primary/60 tracking-wide whitespace-nowrap">{getPropertyNameTranslation(name)}</label>:
<Text
className="hover:bg-primary/20 rounded p-1 text-ellipsis"
className="hover:bg-primary/20 rounded p-1 overflow-hidden text-ellipsis"
tooltipClassName="max-w-80"
text={value}
tooltip={tooltip || value}
tooltip={tooltip || (typeof value === 'string' ? value : JSON.stringify(value, null, 2))}
side="left"
onClick={onClick}
/>
@@ -174,7 +182,7 @@ const NodePropertiesView = ({ node }: { node: NodeType }) => {
{node.relationships.length > 0 && (
<>
<label className="text-md pl-1 font-bold tracking-wide text-teal-600/90">
{t('graphPanel.propertiesView.node.relationships')}
{t('graphPanel.propertiesView.node.relationships')}
</label>
<div className="bg-primary/5 max-h-96 overflow-auto rounded p-1">
{node.relationships.map(({ type, id, label }) => {

View File

@@ -8,9 +8,10 @@ import Input from '@/components/ui/Input'
import { controlButtonVariant } from '@/lib/constants'
import { useSettingsStore } from '@/stores/settings'
import { useBackendState } from '@/stores/state'
import { useGraphStore } from '@/stores/graph'
import { SettingsIcon } from 'lucide-react'
import { useTranslation } from "react-i18next";
import { SettingsIcon, RefreshCwIcon } from 'lucide-react'
import { useTranslation } from 'react-i18next';
/**
* Component that displays a checkbox with a label.
@@ -114,6 +115,7 @@ const LabeledNumberInput = ({
export default function Settings() {
const [opened, setOpened] = useState<boolean>(false)
const [tempApiKey, setTempApiKey] = useState<string>('')
const refreshLayout = useGraphStore.use.refreshLayout()
const showPropertyPanel = useSettingsStore.use.showPropertyPanel()
const showNodeSearchBar = useSettingsStore.use.showNodeSearchBar()
@@ -208,116 +210,126 @@ export default function Settings() {
const { t } = useTranslation();
return (
<Popover open={opened} onOpenChange={setOpened}>
<PopoverTrigger asChild>
<Button variant={controlButtonVariant} tooltip={t("graphPanel.sideBar.settings.settings")} size="icon">
<SettingsIcon />
</Button>
</PopoverTrigger>
<PopoverContent
side="right"
align="start"
className="mb-2 p-2"
onCloseAutoFocus={(e) => e.preventDefault()}
<>
<Button
variant={controlButtonVariant}
tooltip={t('graphPanel.sideBar.settings.refreshLayout')}
size="icon"
onClick={refreshLayout}
>
<div className="flex flex-col gap-2">
<LabeledCheckBox
checked={enableHealthCheck}
onCheckedChange={setEnableHealthCheck}
label={t("graphPanel.sideBar.settings.healthCheck")}
/>
<Separator />
<LabeledCheckBox
checked={showPropertyPanel}
onCheckedChange={setShowPropertyPanel}
label={t("graphPanel.sideBar.settings.showPropertyPanel")}
/>
<LabeledCheckBox
checked={showNodeSearchBar}
onCheckedChange={setShowNodeSearchBar}
label={t("graphPanel.sideBar.settings.showSearchBar")}
/>
<Separator />
<LabeledCheckBox
checked={showNodeLabel}
onCheckedChange={setShowNodeLabel}
label={t("graphPanel.sideBar.settings.showNodeLabel")}
/>
<LabeledCheckBox
checked={enableNodeDrag}
onCheckedChange={setEnableNodeDrag}
label={t("graphPanel.sideBar.settings.nodeDraggable")}
/>
<Separator />
<LabeledCheckBox
checked={showEdgeLabel}
onCheckedChange={setShowEdgeLabel}
label={t("graphPanel.sideBar.settings.showEdgeLabel")}
/>
<LabeledCheckBox
checked={enableHideUnselectedEdges}
onCheckedChange={setEnableHideUnselectedEdges}
label={t("graphPanel.sideBar.settings.hideUnselectedEdges")}
/>
<LabeledCheckBox
checked={enableEdgeEvents}
onCheckedChange={setEnableEdgeEvents}
label={t("graphPanel.sideBar.settings.edgeEvents")}
/>
<Separator />
<LabeledNumberInput
label={t("graphPanel.sideBar.settings.maxQueryDepth")}
min={1}
value={graphQueryMaxDepth}
onEditFinished={setGraphQueryMaxDepth}
/>
<LabeledNumberInput
label={t("graphPanel.sideBar.settings.minDegree")}
min={0}
value={graphMinDegree}
onEditFinished={setGraphMinDegree}
/>
<LabeledNumberInput
label={t("graphPanel.sideBar.settings.maxLayoutIterations")}
min={1}
max={20}
value={graphLayoutMaxIterations}
onEditFinished={setGraphLayoutMaxIterations}
/>
<Separator />
<RefreshCwIcon />
</Button>
<Popover open={opened} onOpenChange={setOpened}>
<PopoverTrigger asChild>
<Button variant={controlButtonVariant} tooltip={t('graphPanel.sideBar.settings.settings')} size="icon">
<SettingsIcon />
</Button>
</PopoverTrigger>
<PopoverContent
side="right"
align="start"
className="mb-2 p-2"
onCloseAutoFocus={(e) => e.preventDefault()}
>
<div className="flex flex-col gap-2">
<label className="text-sm font-medium">{t("graphPanel.sideBar.settings.apiKey")}</label>
<form className="flex h-6 gap-2" onSubmit={(e) => e.preventDefault()}>
<div className="w-0 flex-1">
<Input
type="password"
value={tempApiKey}
onChange={handleTempApiKeyChange}
placeholder={t("graphPanel.sideBar.settings.enterYourAPIkey")}
className="max-h-full w-full min-w-0"
autoComplete="off"
/>
</div>
<Button
onClick={setApiKey}
variant="outline"
size="sm"
className="max-h-full shrink-0"
>
{t("graphPanel.sideBar.settings.save")}
</Button>
</form>
<LabeledCheckBox
checked={enableHealthCheck}
onCheckedChange={setEnableHealthCheck}
label={t('graphPanel.sideBar.settings.healthCheck')}
/>
<Separator />
<LabeledCheckBox
checked={showPropertyPanel}
onCheckedChange={setShowPropertyPanel}
label={t('graphPanel.sideBar.settings.showPropertyPanel')}
/>
<LabeledCheckBox
checked={showNodeSearchBar}
onCheckedChange={setShowNodeSearchBar}
label={t('graphPanel.sideBar.settings.showSearchBar')}
/>
<Separator />
<LabeledCheckBox
checked={showNodeLabel}
onCheckedChange={setShowNodeLabel}
label={t('graphPanel.sideBar.settings.showNodeLabel')}
/>
<LabeledCheckBox
checked={enableNodeDrag}
onCheckedChange={setEnableNodeDrag}
label={t('graphPanel.sideBar.settings.nodeDraggable')}
/>
<Separator />
<LabeledCheckBox
checked={showEdgeLabel}
onCheckedChange={setShowEdgeLabel}
label={t('graphPanel.sideBar.settings.showEdgeLabel')}
/>
<LabeledCheckBox
checked={enableHideUnselectedEdges}
onCheckedChange={setEnableHideUnselectedEdges}
label={t('graphPanel.sideBar.settings.hideUnselectedEdges')}
/>
<LabeledCheckBox
checked={enableEdgeEvents}
onCheckedChange={setEnableEdgeEvents}
label={t('graphPanel.sideBar.settings.edgeEvents')}
/>
<Separator />
<LabeledNumberInput
label={t('graphPanel.sideBar.settings.maxQueryDepth')}
min={1}
value={graphQueryMaxDepth}
onEditFinished={setGraphQueryMaxDepth}
/>
<LabeledNumberInput
label={t('graphPanel.sideBar.settings.minDegree')}
min={0}
value={graphMinDegree}
onEditFinished={setGraphMinDegree}
/>
<LabeledNumberInput
label={t('graphPanel.sideBar.settings.maxLayoutIterations')}
min={1}
max={30}
value={graphLayoutMaxIterations}
onEditFinished={setGraphLayoutMaxIterations}
/>
<Separator />
<div className="flex flex-col gap-2">
<label className="text-sm font-medium">{t('graphPanel.sideBar.settings.apiKey')}</label>
<form className="flex h-6 gap-2" onSubmit={(e) => e.preventDefault()}>
<div className="w-0 flex-1">
<Input
type="password"
value={tempApiKey}
onChange={handleTempApiKeyChange}
placeholder={t('graphPanel.sideBar.settings.enterYourAPIkey')}
className="max-h-full w-full min-w-0"
autoComplete="off"
/>
</div>
<Button
onClick={setApiKey}
variant="outline"
size="sm"
className="max-h-full shrink-0"
>
{t('graphPanel.sideBar.settings.save')}
</Button>
</form>
</div>
</div>
</div>
</PopoverContent>
</Popover>
</PopoverContent>
</Popover>
</>
)
}

View File

@@ -0,0 +1,21 @@
import { useSettingsStore } from '@/stores/settings'
import { useTranslation } from 'react-i18next'
/**
* Component that displays current values of important graph settings
* Positioned to the right of the toolbar at the bottom-left corner
*/
const SettingsDisplay = () => {
const { t } = useTranslation()
const graphQueryMaxDepth = useSettingsStore.use.graphQueryMaxDepth()
const graphMinDegree = useSettingsStore.use.graphMinDegree()
return (
<div className="absolute bottom-2 left-[calc(2rem+2.5rem)] flex items-center gap-2 text-xs text-gray-400">
<div>{t('graphPanel.sideBar.settings.depth')}: {graphQueryMaxDepth}</div>
<div>{t('graphPanel.sideBar.settings.degree')}: {graphMinDegree}</div>
</div>
)
}
export default SettingsDisplay

View File

@@ -25,7 +25,7 @@ export default function QuerySettings() {
}, [])
return (
<Card className="flex shrink-0 flex-col">
<Card className="flex shrink-0 flex-col min-w-[180px]">
<CardHeader className="px-4 pt-4 pb-2">
<CardTitle>{t('retrievePanel.querySettings.parametersTitle')}</CardTitle>
<CardDescription>{t('retrievePanel.querySettings.parametersDescription')}</CardDescription>

View File

@@ -1,4 +1,4 @@
import { useState, useEffect, useCallback } from 'react'
import React, { useState, useEffect, useCallback } from 'react'
import { Loader2 } from 'lucide-react'
import { useDebounce } from '@/hooks/useDebounce'
@@ -193,7 +193,7 @@ export function AsyncSearch<T>({
</div>
)}
</div>
<CommandList hidden={!open || debouncedSearchTerm.length === 0}>
<CommandList hidden={!open}>
{error && <div className="text-destructive p-4 text-center">{error}</div>}
{loading && options.length === 0 && (loadingSkeleton || <DefaultLoadingSkeleton />)}
{!loading &&
@@ -204,7 +204,7 @@ export function AsyncSearch<T>({
))}
<CommandGroup>
{options.map((option, idx) => (
<>
<React.Fragment key={getOptionValue(option) + `-fragment-${idx}`}>
<CommandItem
key={getOptionValue(option) + `${idx}`}
value={getOptionValue(option)}
@@ -215,9 +215,9 @@ export function AsyncSearch<T>({
{renderOption(option)}
</CommandItem>
{idx !== options.length - 1 && (
<div key={idx} className="bg-foreground/10 h-[1px]" />
<div key={`divider-${idx}`} className="bg-foreground/10 h-[1px]" />
)}
</>
</React.Fragment>
))}
</CommandGroup>
</CommandList>

View File

@@ -0,0 +1,37 @@
import React, { useEffect } from 'react';
import { useTabVisibility } from '@/contexts/useTabVisibility';
interface TabContentProps {
tabId: string;
children: React.ReactNode;
className?: string;
}
/**
* TabContent component that manages visibility based on tab selection
* Works with the TabVisibilityContext to show/hide content based on active tab
*/
const TabContent: React.FC<TabContentProps> = ({ tabId, children, className = '' }) => {
const { isTabVisible, setTabVisibility } = useTabVisibility();
const isVisible = isTabVisible(tabId);
// Register this tab with the context when mounted
useEffect(() => {
setTabVisibility(tabId, true);
// Cleanup when unmounted
return () => {
setTabVisibility(tabId, false);
};
}, [tabId, setTabVisibility]);
// Use CSS to hide content instead of not rendering it
// This prevents components from unmounting when tabs are switched
return (
<div className={`${className} ${isVisible ? '' : 'hidden'}`}>
{children}
</div>
);
};
export default TabContent;

View File

@@ -42,9 +42,13 @@ const TabsContent = React.forwardRef<
<TabsPrimitive.Content
ref={ref}
className={cn(
'ring-offset-background focus-visible:ring-ring mt-2 focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:outline-none',
'ring-offset-background focus-visible:ring-ring focus-visible:ring-2 focus-visible:ring-offset-2 focus-visible:outline-none',
'data-[state=inactive]:invisible data-[state=active]:visible',
'h-full w-full',
className
)}
// Force mounting of inactive tabs to preserve WebGL contexts
forceMount
{...props}
/>
))

View File

@@ -10,30 +10,43 @@ const TooltipTrigger = TooltipPrimitive.Trigger
const processTooltipContent = (content: string) => {
if (typeof content !== 'string') return content
return content.split('\\n').map((line, i) => (
<React.Fragment key={i}>
{line}
{i < content.split('\\n').length - 1 && <br />}
</React.Fragment>
))
return (
<div className="relative top-0 pt-1 whitespace-pre-wrap break-words">
{content}
</div>
)
}
const TooltipContent = React.forwardRef<
React.ComponentRef<typeof TooltipPrimitive.Content>,
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
>(({ className, sideOffset = 4, children, ...props }, ref) => (
<TooltipPrimitive.Content
ref={ref}
sideOffset={sideOffset}
className={cn(
'bg-popover text-popover-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 mx-1 max-w-sm overflow-hidden rounded-md border px-3 py-2 text-sm shadow-md',
className
)}
{...props}
>
{typeof children === 'string' ? processTooltipContent(children) : children}
</TooltipPrimitive.Content>
))
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content> & {
side?: 'top' | 'right' | 'bottom' | 'left'
align?: 'start' | 'center' | 'end'
}
>(({ className, side = 'left', align = 'start', children, ...props }, ref) => {
const contentRef = React.useRef<HTMLDivElement>(null);
React.useEffect(() => {
if (contentRef.current) {
contentRef.current.scrollTop = 0;
}
}, [children]);
return (
<TooltipPrimitive.Content
ref={ref}
side={side}
align={align}
className={cn(
'bg-popover text-popover-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 max-h-[60vh] overflow-y-auto whitespace-pre-wrap break-words rounded-md border px-3 py-2 text-sm shadow-md',
className
)}
{...props}
>
{typeof children === 'string' ? processTooltipContent(children) : children}
</TooltipPrimitive.Content>
);
})
TooltipContent.displayName = TooltipPrimitive.Content.displayName
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }

View File

@@ -0,0 +1,53 @@
import React, { useState, useEffect, useMemo } from 'react';
import { TabVisibilityContext } from './context';
import { TabVisibilityContextType } from './types';
import { useSettingsStore } from '@/stores/settings';
interface TabVisibilityProviderProps {
children: React.ReactNode;
}
/**
* Provider component for the TabVisibility context
* Manages the visibility state of tabs throughout the application
*/
export const TabVisibilityProvider: React.FC<TabVisibilityProviderProps> = ({ children }) => {
// Get current tab from settings store
const currentTab = useSettingsStore.use.currentTab();
// Initialize visibility state with current tab as visible
const [visibleTabs, setVisibleTabs] = useState<Record<string, boolean>>(() => ({
[currentTab]: true
}));
// Update visibility when current tab changes
useEffect(() => {
setVisibleTabs((prev) => ({
...prev,
[currentTab]: true
}));
}, [currentTab]);
// Create the context value with memoization to prevent unnecessary re-renders
const contextValue = useMemo<TabVisibilityContextType>(
() => ({
visibleTabs,
setTabVisibility: (tabId: string, isVisible: boolean) => {
setVisibleTabs((prev) => ({
...prev,
[tabId]: isVisible,
}));
},
isTabVisible: (tabId: string) => !!visibleTabs[tabId],
}),
[visibleTabs]
);
return (
<TabVisibilityContext.Provider value={contextValue}>
{children}
</TabVisibilityContext.Provider>
);
};
export default TabVisibilityProvider;

View File

@@ -0,0 +1,12 @@
import { createContext } from 'react';
import { TabVisibilityContextType } from './types';
// Default context value
const defaultContext: TabVisibilityContextType = {
visibleTabs: {},
setTabVisibility: () => {},
isTabVisible: () => false,
};
// Create the context
export const TabVisibilityContext = createContext<TabVisibilityContextType>(defaultContext);

View File

@@ -0,0 +1,5 @@
export interface TabVisibilityContextType {
visibleTabs: Record<string, boolean>;
setTabVisibility: (tabId: string, isVisible: boolean) => void;
isTabVisible: (tabId: string) => boolean;
}

View File

@@ -0,0 +1,17 @@
import { useContext } from 'react';
import { TabVisibilityContext } from './context';
import { TabVisibilityContextType } from './types';
/**
* Custom hook to access the tab visibility context
* @returns The tab visibility context
*/
export const useTabVisibility = (): TabVisibilityContextType => {
const context = useContext(TabVisibilityContext);
if (!context) {
throw new Error('useTabVisibility must be used within a TabVisibilityProvider');
}
return context;
};

View File

@@ -1,5 +1,40 @@
import { useState, useEffect } from 'react'
import { useTabVisibility } from '@/contexts/useTabVisibility'
import { backendBaseUrl } from '@/lib/constants'
import { useTranslation } from 'react-i18next'
export default function ApiSite() {
return <iframe src={backendBaseUrl + '/docs'} className="size-full" />
const { t } = useTranslation()
const { isTabVisible } = useTabVisibility()
const isApiTabVisible = isTabVisible('api')
const [iframeLoaded, setIframeLoaded] = useState(false)
// Load the iframe once on component mount
useEffect(() => {
if (!iframeLoaded) {
setIframeLoaded(true)
}
}, [iframeLoaded])
// Use CSS to hide content when tab is not visible
return (
<div className={`size-full ${isApiTabVisible ? '' : 'hidden'}`}>
{iframeLoaded ? (
<iframe
src={backendBaseUrl + '/docs'}
className="size-full w-full h-full"
style={{ width: '100%', height: '100%', border: 'none' }}
// Use key to ensure iframe doesn't reload
key="api-docs-iframe"
/>
) : (
<div className="flex h-full w-full items-center justify-center bg-background">
<div className="text-center">
<div className="mb-2 h-8 w-8 animate-spin rounded-full border-4 border-primary border-t-transparent"></div>
<p>{t('apiSite.loading')}</p>
</div>
</div>
)}
</div>
)
}

View File

@@ -1,5 +1,6 @@
import { useState, useEffect, useCallback } from 'react'
import { useState, useEffect, useCallback, useRef } from 'react'
import { useTranslation } from 'react-i18next'
import { useTabVisibility } from '@/contexts/useTabVisibility'
import Button from '@/components/ui/Button'
import {
Table,
@@ -26,6 +27,9 @@ export default function DocumentManager() {
const { t } = useTranslation()
const health = useBackendState.use.health()
const [docs, setDocs] = useState<DocsStatusesResponse | null>(null)
const { isTabVisible } = useTabVisibility()
const isDocumentsTabVisible = isTabVisible('documents')
const initialLoadRef = useRef(false)
const fetchDocuments = useCallback(async () => {
try {
@@ -48,11 +52,15 @@ export default function DocumentManager() {
} catch (err) {
toast.error(t('documentPanel.documentManager.errors.loadFailed', { error: errorMessage(err) }))
}
}, [setDocs])
}, [setDocs, t])
// Only fetch documents when the tab becomes visible for the first time
useEffect(() => {
fetchDocuments()
}, []) // eslint-disable-line react-hooks/exhaustive-deps
if (isDocumentsTabVisible && !initialLoadRef.current) {
fetchDocuments()
initialLoadRef.current = true
}
}, [isDocumentsTabVisible, fetchDocuments])
const scanDocuments = useCallback(async () => {
try {
@@ -61,21 +69,24 @@ export default function DocumentManager() {
} catch (err) {
toast.error(t('documentPanel.documentManager.errors.scanFailed', { error: errorMessage(err) }))
}
}, [])
}, [t])
// Only set up polling when the tab is visible and health is good
useEffect(() => {
if (!isDocumentsTabVisible || !health) {
return
}
const interval = setInterval(async () => {
if (!health) {
return
}
try {
await fetchDocuments()
} catch (err) {
toast.error(t('documentPanel.documentManager.errors.scanProgressFailed', { error: errorMessage(err) }))
}
}, 5000)
return () => clearInterval(interval)
}, [health, fetchDocuments])
}, [health, fetchDocuments, t, isDocumentsTabVisible])
return (
<Card className="!size-full !rounded-none !border-none">

View File

@@ -1,4 +1,5 @@
import { useEffect, useState, useCallback, useMemo } from 'react'
import { useEffect, useState, useCallback, useMemo, useRef } from 'react'
import { useTabVisibility } from '@/contexts/useTabVisibility'
// import { MiniMap } from '@react-sigma/minimap'
import { SigmaContainer, useRegisterEvents, useSigma } from '@react-sigma/core'
import { Settings as SigmaSettings } from 'sigma/settings'
@@ -17,6 +18,7 @@ import Settings from '@/components/graph/Settings'
import GraphSearch from '@/components/graph/GraphSearch'
import GraphLabels from '@/components/graph/GraphLabels'
import PropertiesView from '@/components/graph/PropertiesView'
import SettingsDisplay from '@/components/graph/SettingsDisplay'
import { useSettingsStore } from '@/stores/settings'
import { useGraphStore } from '@/stores/graph'
@@ -90,8 +92,12 @@ const GraphEvents = () => {
}
},
// Disable the autoscale at the first down interaction
mousedown: () => {
if (!sigma.getCustomBBox()) sigma.setCustomBBox(sigma.getBBox())
mousedown: (e) => {
// Only set custom BBox if it's a drag operation (mouse button is pressed)
const mouseEvent = e.original as MouseEvent;
if (mouseEvent.buttons !== 0 && !sigma.getCustomBBox()) {
sigma.setCustomBBox(sigma.getBBox())
}
}
})
}, [registerEvents, sigma, draggedNode])
@@ -101,27 +107,46 @@ const GraphEvents = () => {
const GraphViewer = () => {
const [sigmaSettings, setSigmaSettings] = useState(defaultSigmaSettings)
const sigmaRef = useRef<any>(null)
const initAttemptedRef = useRef(false)
const selectedNode = useGraphStore.use.selectedNode()
const focusedNode = useGraphStore.use.focusedNode()
const moveToSelectedNode = useGraphStore.use.moveToSelectedNode()
const isFetching = useGraphStore.use.isFetching()
const shouldRender = useGraphStore.use.shouldRender() // Rendering control state
// Get tab visibility
const { isTabVisible } = useTabVisibility()
const isGraphTabVisible = isTabVisible('knowledge-graph')
const showPropertyPanel = useSettingsStore.use.showPropertyPanel()
const showNodeSearchBar = useSettingsStore.use.showNodeSearchBar()
const renderLabels = useSettingsStore.use.showNodeLabel()
const enableEdgeEvents = useSettingsStore.use.enableEdgeEvents()
const enableNodeDrag = useSettingsStore.use.enableNodeDrag()
const renderEdgeLabels = useSettingsStore.use.showEdgeLabel()
// Handle component mount/unmount and tab visibility
useEffect(() => {
setSigmaSettings({
...defaultSigmaSettings,
enableEdgeEvents,
renderEdgeLabels,
renderLabels
})
}, [renderLabels, enableEdgeEvents, renderEdgeLabels])
// When component mounts or tab becomes visible
if (isGraphTabVisible && !shouldRender && !isFetching && !initAttemptedRef.current) {
// If tab is visible but graph is not rendering, try to enable rendering
useGraphStore.getState().setShouldRender(true)
initAttemptedRef.current = true
console.log('Graph viewer initialized')
}
// Cleanup function when component unmounts
return () => {
// Only log cleanup, don't actually clean up the WebGL context
// This allows the WebGL context to persist across tab switches
console.log('Graph viewer cleanup')
}
}, [isGraphTabVisible, shouldRender, isFetching])
// Initialize sigma settings once on component mount
// All dynamic settings will be updated in GraphControl using useSetSettings
useEffect(() => {
setSigmaSettings(defaultSigmaSettings)
}, [])
const onSearchFocus = useCallback((value: GraphSearchOption | null) => {
if (value === null) useGraphStore.getState().setFocusedNode(null)
@@ -142,43 +167,73 @@ const GraphViewer = () => {
[selectedNode]
)
// Since TabsContent now forces mounting of all tabs, we need to conditionally render
// the SigmaContainer based on visibility to avoid unnecessary rendering
return (
<SigmaContainer settings={sigmaSettings} className="!bg-background !size-full overflow-hidden">
<GraphControl />
<div className="relative h-full w-full">
{/* Only render the SigmaContainer when the tab is visible */}
{isGraphTabVisible ? (
<SigmaContainer
settings={sigmaSettings}
className="!bg-background !size-full overflow-hidden"
ref={sigmaRef}
>
<GraphControl />
{enableNodeDrag && <GraphEvents />}
{enableNodeDrag && <GraphEvents />}
<FocusOnNode node={autoFocusedNode} move={moveToSelectedNode} />
<FocusOnNode node={autoFocusedNode} move={moveToSelectedNode} />
<div className="absolute top-2 left-2 flex items-start gap-2">
<GraphLabels />
{showNodeSearchBar && (
<GraphSearch
value={searchInitSelectedNode}
onFocus={onSearchFocus}
onChange={onSearchSelect}
/>
)}
</div>
<div className="absolute top-2 left-2 flex items-start gap-2">
<GraphLabels />
{showNodeSearchBar && (
<GraphSearch
value={searchInitSelectedNode}
onFocus={onSearchFocus}
onChange={onSearchSelect}
/>
)}
</div>
<div className="bg-background/60 absolute bottom-2 left-2 flex flex-col rounded-xl border-2 backdrop-blur-lg">
<Settings />
<ZoomControl />
<LayoutsControl />
<FullScreenControl />
{/* <ThemeToggle /> */}
</div>
<div className="bg-background/60 absolute bottom-2 left-2 flex flex-col rounded-xl border-2 backdrop-blur-lg">
<Settings />
<ZoomControl />
<LayoutsControl />
<FullScreenControl />
{/* <ThemeToggle /> */}
</div>
{showPropertyPanel && (
<div className="absolute top-2 right-2">
<PropertiesView />
{showPropertyPanel && (
<div className="absolute top-2 right-2">
<PropertiesView />
</div>
)}
{/* <div className="absolute bottom-2 right-2 flex flex-col rounded-xl border-2">
<MiniMap width="100px" height="100px" />
</div> */}
<SettingsDisplay />
</SigmaContainer>
) : (
// Placeholder when tab is not visible
<div className="flex h-full w-full items-center justify-center">
<div className="text-center text-muted-foreground">
{/* Placeholder content */}
</div>
</div>
)}
{/* <div className="absolute bottom-2 right-2 flex flex-col rounded-xl border-2">
<MiniMap width="100px" height="100px" />
</div> */}
</SigmaContainer>
{/* Loading overlay - shown when data is loading */}
{isFetching && (
<div className="absolute inset-0 flex items-center justify-center bg-background/80 z-10">
<div className="text-center">
<div className="mb-2 h-8 w-8 animate-spin rounded-full border-4 border-primary border-t-transparent"></div>
<p>Loading Graph Data...</p>
</div>
</div>
)}
</div>
)
}

View File

@@ -1,6 +1,6 @@
import Button from '@/components/ui/Button'
import { SiteInfo } from '@/lib/constants'
import ThemeToggle from '@/components/ThemeToggle'
import AppSettings from '@/components/AppSettings'
import LanguageToggle from '@/components/LanguageToggle'
import { TabsList, TabsTrigger } from '@/components/ui/Tabs'
import { useSettingsStore } from '@/stores/settings'
@@ -77,23 +77,15 @@ export default function SiteHeader() {
<TabsNavigation />
</div>
<nav className="flex items-center gap-2">
<Button variant="ghost" size="icon" side="bottom" tooltip={t('header.projectRepository')}>
<a href={SiteInfo.github} target="_blank" rel="noopener noreferrer">
<GithubIcon className="size-4" aria-hidden="true" />
</a>
</Button>
<LanguageToggle />
<ThemeToggle />
<Button
variant="ghost"
size="icon"
side="bottom"
tooltip="Log Out"
onClick={handleLogout}
>
<LogOutIcon className="size-4" aria-hidden="true" />
</Button>
<nav className="flex items-center">
<div className="flex items-center gap-2">
<Button variant="ghost" size="icon" side="bottom" tooltip={t('header.projectRepository')}>
<a href={SiteInfo.github} target="_blank" rel="noopener noreferrer">
<GithubIcon className="size-4" aria-hidden="true" />
</a>
</Button>
<AppSettings />
</div>
</nav>
</header>
)

View File

@@ -1,11 +1,12 @@
import Graph, { DirectedGraph } from 'graphology'
import { useCallback, useEffect } from 'react'
import { useCallback, useEffect, useRef } from 'react'
import { randomColor, errorMessage } from '@/lib/utils'
import * as Constants from '@/lib/constants'
import { useGraphStore, RawGraph } from '@/stores/graph'
import { queryGraphs } from '@/api/lightrag'
import { useBackendState } from '@/stores/state'
import { useSettingsStore } from '@/stores/settings'
import { useTabVisibility } from '@/contexts/useTabVisibility'
import seedrandom from 'seedrandom'
@@ -136,15 +137,23 @@ const fetchGraph = async (label: string, maxDepth: number, minDegree: number) =>
return rawGraph
}
// Create a new graph instance with the raw graph data
const createSigmaGraph = (rawGraph: RawGraph | null) => {
// Always create a new graph instance
const graph = new DirectedGraph()
// Add nodes from raw graph data
for (const rawNode of rawGraph?.nodes ?? []) {
// Ensure we have fresh random positions for nodes
seedrandom(rawNode.id + Date.now().toString(), { global: true })
const x = Math.random()
const y = Math.random()
graph.addNode(rawNode.id, {
label: rawNode.labels.join(', '),
color: rawNode.color,
x: rawNode.x,
y: rawNode.y,
x: x,
y: y,
size: rawNode.size,
// for node-border
borderColor: Constants.nodeBorderColor,
@@ -152,6 +161,7 @@ const createSigmaGraph = (rawGraph: RawGraph | null) => {
})
}
// Add edges from raw graph data
for (const rawEdge of rawGraph?.edges ?? []) {
rawEdge.dynamicId = graph.addDirectedEdge(rawEdge.source, rawEdge.target, {
label: rawEdge.type || undefined
@@ -161,14 +171,30 @@ const createSigmaGraph = (rawGraph: RawGraph | null) => {
return graph
}
const lastQueryLabel = { label: '', maxQueryDepth: 0, minDegree: 0 }
const useLightrangeGraph = () => {
const queryLabel = useSettingsStore.use.queryLabel()
const rawGraph = useGraphStore.use.rawGraph()
const sigmaGraph = useGraphStore.use.sigmaGraph()
const maxQueryDepth = useSettingsStore.use.graphQueryMaxDepth()
const minDegree = useSettingsStore.use.graphMinDegree()
const isFetching = useGraphStore.use.isFetching()
// Get tab visibility
const { isTabVisible } = useTabVisibility()
const isGraphTabVisible = isTabVisible('knowledge-graph')
// Track previous parameters to detect actual changes
const prevParamsRef = useRef({ queryLabel, maxQueryDepth, minDegree })
// Use ref to track if data has been loaded and initial load
const dataLoadedRef = useRef(false)
const initialLoadRef = useRef(false)
// Check if parameters have changed
const paramsChanged =
prevParamsRef.current.queryLabel !== queryLabel ||
prevParamsRef.current.maxQueryDepth !== maxQueryDepth ||
prevParamsRef.current.minDegree !== minDegree
const getNode = useCallback(
(nodeId: string) => {
@@ -184,35 +210,131 @@ const useLightrangeGraph = () => {
[rawGraph]
)
useEffect(() => {
if (queryLabel) {
if (lastQueryLabel.label !== queryLabel ||
lastQueryLabel.maxQueryDepth !== maxQueryDepth ||
lastQueryLabel.minDegree !== minDegree) {
lastQueryLabel.label = queryLabel
lastQueryLabel.maxQueryDepth = maxQueryDepth
lastQueryLabel.minDegree = minDegree
// Track if a fetch is in progress to prevent multiple simultaneous fetches
const fetchInProgressRef = useRef(false)
// Data fetching logic - simplified but preserving TAB visibility check
useEffect(() => {
// Skip if fetch is already in progress
if (fetchInProgressRef.current) {
return
}
// If there's no query label, reset the graph
if (!queryLabel) {
if (rawGraph !== null || sigmaGraph !== null) {
const state = useGraphStore.getState()
state.reset()
fetchGraph(queryLabel, maxQueryDepth, minDegree).then((data) => {
// console.debug('Query label: ' + queryLabel)
state.setSigmaGraph(createSigmaGraph(data))
data?.buildDynamicMap()
state.setRawGraph(data)
state.setGraphDataFetchAttempted(false)
state.setLabelsFetchAttempted(false)
}
dataLoadedRef.current = false
initialLoadRef.current = false
return
}
// Check if parameters have changed
if (!isFetching && !fetchInProgressRef.current &&
(paramsChanged || !useGraphStore.getState().graphDataFetchAttempted)) {
// Only fetch data if the Graph tab is visible
if (!isGraphTabVisible) {
console.log('Graph tab not visible, skipping data fetch');
return;
}
// Set flags
fetchInProgressRef.current = true
useGraphStore.getState().setGraphDataFetchAttempted(true)
const state = useGraphStore.getState()
state.setIsFetching(true)
state.setShouldRender(false) // Disable rendering during data loading
// Clear selection and highlighted nodes before fetching new graph
state.clearSelection()
if (state.sigmaGraph) {
state.sigmaGraph.forEachNode((node) => {
state.sigmaGraph?.setNodeAttribute(node, 'highlighted', false)
})
}
} else {
const state = useGraphStore.getState()
state.reset()
state.setSigmaGraph(new DirectedGraph())
// Update parameter reference
prevParamsRef.current = { queryLabel, maxQueryDepth, minDegree }
console.log('Fetching graph data...')
// Use a local copy of the parameters
const currentQueryLabel = queryLabel
const currentMaxQueryDepth = maxQueryDepth
const currentMinDegree = minDegree
// Fetch graph data
fetchGraph(currentQueryLabel, currentMaxQueryDepth, currentMinDegree).then((data) => {
const state = useGraphStore.getState()
// Reset state
state.reset()
// Create and set new graph directly
const newSigmaGraph = createSigmaGraph(data)
data?.buildDynamicMap()
// Set new graph data
state.setSigmaGraph(newSigmaGraph)
state.setRawGraph(data)
// No longer need to extract labels from graph data
// Update flags
dataLoadedRef.current = true
initialLoadRef.current = true
fetchInProgressRef.current = false
// Reset camera view
state.setMoveToSelectedNode(true)
// Enable rendering if the tab is visible
state.setShouldRender(isGraphTabVisible)
state.setIsFetching(false)
}).catch((error) => {
console.error('Error fetching graph data:', error)
// Reset state on error
const state = useGraphStore.getState()
state.setIsFetching(false)
state.setShouldRender(isGraphTabVisible)
dataLoadedRef.current = false
fetchInProgressRef.current = false
state.setGraphDataFetchAttempted(false)
})
}
}, [queryLabel, maxQueryDepth, minDegree])
}, [queryLabel, maxQueryDepth, minDegree, isFetching, paramsChanged, isGraphTabVisible, rawGraph, sigmaGraph])
// Update rendering state and handle tab visibility changes
useEffect(() => {
// When tab becomes visible
if (isGraphTabVisible) {
// If we have data, enable rendering
if (rawGraph) {
useGraphStore.getState().setShouldRender(true)
}
// We no longer reset the fetch attempted flag here to prevent continuous API calls
} else {
// When tab becomes invisible, disable rendering
useGraphStore.getState().setShouldRender(false)
}
}, [isGraphTabVisible, rawGraph])
const lightrageGraph = useCallback(() => {
// If we already have a graph instance, return it
if (sigmaGraph) {
return sigmaGraph as Graph<NodeType, EdgeType>
}
// If no graph exists yet, create a new one and store it
console.log('Creating new Sigma graph instance')
const graph = new DirectedGraph()
useGraphStore.getState().setSigmaGraph(graph)
return graph as Graph<NodeType, EdgeType>

View File

@@ -0,0 +1,37 @@
import i18n from 'i18next'
import { initReactI18next } from 'react-i18next'
import { useSettingsStore } from '@/stores/settings'
import en from './locales/en.json'
import zh from './locales/zh.json'
// Function to sync i18n with store state
export const initializeI18n = async (): Promise<typeof i18n> => {
// Get initial language from store
const initialLanguage = useSettingsStore.getState().language
// Initialize with store language
await i18n.use(initReactI18next).init({
resources: {
en: { translation: en },
zh: { translation: zh }
},
lng: initialLanguage,
fallbackLng: 'en',
interpolation: {
escapeValue: false
}
})
// Subscribe to language changes
useSettingsStore.subscribe((state) => {
const currentLanguage = state.language
if (i18n.language !== currentLanguage) {
i18n.changeLanguage(currentLanguage)
}
})
return i18n
}
export default i18n

View File

@@ -1,7 +1,7 @@
import { ButtonVariantType } from '@/components/ui/Button'
export const backendBaseUrl = 'http://localhost:9621/'
export const webuiPrefix = '/webui'
export const webuiPrefix = ''
export const controlButtonVariant: ButtonVariantType = 'ghost'
@@ -16,8 +16,8 @@ export const edgeColorDarkTheme = '#969696'
export const edgeColorSelected = '#F57F17'
export const edgeColorHighlighted = '#B2EBF2'
export const searchResultLimit = 20
export const labelListLimit = 40
export const searchResultLimit = 50
export const labelListLimit = 100
export const minNodeSize = 4
export const maxNodeSize = 20

View File

@@ -1,4 +1,11 @@
{
"settings": {
"language": "Language",
"theme": "Theme",
"light": "Light",
"dark": "Dark",
"system": "System"
},
"header": {
"documents": "Documents",
"knowledgeGraph": "Knowledge Graph",
@@ -91,9 +98,12 @@
"maxQueryDepth": "Max Query Depth",
"minDegree": "Minimum Degree",
"maxLayoutIterations": "Max Layout Iterations",
"depth": "Depth",
"degree": "Degree",
"apiKey": "API Key",
"enterYourAPIkey": "Enter your API key",
"save": "Save"
"save": "Save",
"refreshLayout": "Refresh Layout"
},
"zoomControl": {
@@ -152,7 +162,14 @@
"labels": "Labels",
"degree": "Degree",
"properties": "Properties",
"relationships": "Relationships"
"relationships": "Relationships",
"propertyNames": {
"description": "Description",
"entity_id": "Name",
"entity_type": "Type",
"source_id": "SrcID",
"Neighbour": "Neigh"
}
},
"edge": {
"title": "Relationship",
@@ -242,5 +259,8 @@
"streamResponse": "Stream Response",
"streamResponseTooltip": "If True, enables streaming output for real-time responses"
}
},
"apiSite": {
"loading": "Loading API Documentation..."
}
}

View File

@@ -1,4 +1,11 @@
{
"settings": {
"language": "语言",
"theme": "主题",
"light": "浅色",
"dark": "深色",
"system": "系统"
},
"header": {
"documents": "文档",
"knowledgeGraph": "知识图谱",
@@ -6,8 +13,8 @@
"api": "API",
"projectRepository": "项目仓库",
"themeToggle": {
"switchToLight": "切换到色主题",
"switchToDark": "切换到色主题"
"switchToLight": "切换到色主题",
"switchToDark": "切换到色主题"
}
},
"login": {
@@ -24,35 +31,35 @@
},
"documentPanel": {
"clearDocuments": {
"button": "清",
"tooltip": "清文档",
"title": "清文档",
"confirm": "确定要清所有文档吗?",
"button": "清",
"tooltip": "清文档",
"title": "清文档",
"confirm": "确定要清所有文档吗?",
"confirmButton": "确定",
"success": "文档已成功清除",
"failed": "清文档失败:\n{{message}}",
"error": "清文档失败:\n{{error}}"
"success": "文档清空成功",
"failed": "清文档失败\n{{message}}",
"error": "清文档失败\n{{error}}"
},
"uploadDocuments": {
"button": "上传",
"tooltip": "上传文档",
"title": "上传文档",
"description": "拖放文档到此处或点击浏览",
"uploading": "正在上传 {{name}}: {{percent}}%",
"success": "上传成功:\n{{name}} 上传成",
"failed": "上传失败:\n{{name}}\n{{message}}",
"error": "上传失败:\n{{name}}\n{{error}}",
"description": "拖拽文件到此处或点击浏览",
"uploading": "正在上传 {{name}}{{percent}}%",
"success": "上传成功\n{{name}} 上传成",
"failed": "上传失败\n{{name}}\n{{message}}",
"error": "上传失败\n{{name}}\n{{error}}",
"generalError": "上传失败\n{{error}}",
"fileTypes": "支持的文件类型: TXT, MD, DOCX, PDF, PPTX, RTF, ODT, EPUB, HTML, HTM, TEX, JSON, XML, YAML, YML, CSV, LOG, CONF, INI, PROPERTIES, SQL, BAT, SH, C, CPP, PY, JAVA, JS, TS, SWIFT, GO, RB, PHP, CSS, SCSS, LESS"
"fileTypes": "支持的文件类型TXT, MD, DOCX, PDF, PPTX, RTF, ODT, EPUB, HTML, HTM, TEX, JSON, XML, YAML, YML, CSV, LOG, CONF, INI, PROPERTIES, SQL, BAT, SH, C, CPP, PY, JAVA, JS, TS, SWIFT, GO, RB, PHP, CSS, SCSS, LESS"
},
"documentManager": {
"title": "文档管理",
"scanButton": "扫描",
"scanTooltip": "扫描文档",
"uploadedTitle": "已上传文档",
"uploadedDescription": "已上传文档及其状态列表。",
"emptyTitle": "无文档",
"emptyDescription": "尚未上传任何文档",
"uploadedDescription": "已上传文档列表及其状态",
"emptyTitle": "无文档",
"emptyDescription": "还没有上传任何文档",
"columns": {
"id": "ID",
"summary": "摘要",
@@ -66,7 +73,7 @@
"status": {
"completed": "已完成",
"processing": "处理中",
"pending": "待处理",
"pending": "等待中",
"failed": "失败"
},
"errors": {
@@ -86,39 +93,39 @@
"showNodeLabel": "显示节点标签",
"nodeDraggable": "节点可拖动",
"showEdgeLabel": "显示边标签",
"hideUnselectedEdges": "隐藏未选中边",
"hideUnselectedEdges": "隐藏未选中边",
"edgeEvents": "边事件",
"maxQueryDepth": "最大查询深度",
"minDegree": "最小度数",
"maxLayoutIterations": "最大布局迭代次数",
"apiKey": "API 密钥",
"enterYourAPIkey": "输入您的 API 密钥",
"save": "保存"
"depth": "深度",
"degree": "邻边",
"apiKey": "API密钥",
"enterYourAPIkey": "输入您的API密钥",
"save": "保存",
"refreshLayout": "刷新布局"
},
"zoomControl": {
"zoomIn": "放大",
"zoomOut": "缩小",
"resetZoom": "重置缩放"
},
"layoutsControl": {
"startAnimation": "开始布局动画",
"stopAnimation": "停止布局动画",
"layoutGraph": "布局",
"layoutGraph": "布局",
"layouts": {
"Circular": "环形布局",
"Circlepack": "圆形打包布局",
"Random": "随机布局",
"Noverlaps": "无重叠布局",
"Force Directed": "力导向布局",
"Force Atlas": "力导向图谱布局"
"Circular": "环形",
"Circlepack": "圆形打包",
"Random": "随机",
"Noverlaps": "无重叠",
"Force Directed": "力导向",
"Force Atlas": "力"
}
},
"fullScreenControl": {
"fullScreen": "全屏",
"windowed": "窗口模式"
"windowed": "窗口"
}
},
"statusIndicator": {
@@ -130,17 +137,17 @@
"storageInfo": "存储信息",
"workingDirectory": "工作目录",
"inputDirectory": "输入目录",
"llmConfig": "LLM 配置",
"llmBinding": "LLM 绑定",
"llmBindingHost": "LLM 绑定主机",
"llmModel": "LLM 模型",
"maxTokens": "最大 Token 数",
"llmConfig": "LLM配置",
"llmBinding": "LLM绑定",
"llmBindingHost": "LLM绑定主机",
"llmModel": "LLM模型",
"maxTokens": "最大令牌数",
"embeddingConfig": "嵌入配置",
"embeddingBinding": "嵌入绑定",
"embeddingBindingHost": "嵌入绑定主机",
"embeddingModel": "嵌入模型",
"storageConfig": "存储配置",
"kvStorage": "KV 存储",
"kvStorage": "KV存储",
"docStatusStorage": "文档状态存储",
"graphStorage": "图存储",
"vectorStorage": "向量存储"
@@ -152,96 +159,93 @@
"labels": "标签",
"degree": "度数",
"properties": "属性",
"relationships": "关系"
"relationships": "关系",
"propertyNames": {
"description": "描述",
"entity_id": "名称",
"entity_type": "类型",
"source_id": "信源ID",
"Neighbour": "邻接"
}
},
"edge": {
"title": "关系",
"id": "ID",
"type": "类型",
"source": "源",
"target": "目标",
"source": "源节点",
"target": "目标节点",
"properties": "属性"
}
},
"search": {
"placeholder": "搜索节点...",
"message": "以及其它 {count} "
"message": "还有 {count} "
},
"graphLabels": {
"selectTooltip": "选择查询标签",
"noLabels": "未找到标签",
"label": "标签",
"placeholder": "搜索标签...",
"andOthers": "以及其它 {count} 个"
"andOthers": "还有 {count} 个"
}
},
"retrievePanel": {
"chatMessage": {
"copyTooltip": "复制到剪贴板",
"copyError": "无法复制文本到剪贴板"
"copyError": "复制文本到剪贴板失败"
},
"retrieval": {
"startPrompt": "在下面输入您的查询开始检索",
"clear": "清",
"startPrompt": "输入查询开始检索",
"clear": "清",
"send": "发送",
"placeholder": "输入您的查询...",
"error": "错误:无法获取响应"
"placeholder": "输入查询...",
"error": "错误:获取响应失败"
},
"querySettings": {
"parametersTitle": "参数设置",
"parametersTitle": "参数",
"parametersDescription": "配置查询参数",
"queryMode": "查询模式",
"queryModeTooltip": "选择检索策略:\n• 朴素:不使用高级技术的基本搜索\n• 本地:基于上下文信息检索\n• 全局:利用全局知识库\n• 混合:结合本地和全局检索\n• 综合:集成知识图谱向量检索",
"queryModeTooltip": "选择检索策略:\n• Naive基础搜索无高级技术\n• Local上下文相关信息检索\n• Global:利用全局知识库\n• Hybrid:结合本地和全局检索\n• Mix整合知识图谱向量检索",
"queryModeOptions": {
"naive": "朴素",
"local": "本地",
"global": "全局",
"hybrid": "混合",
"mix": "合"
"mix": "合"
},
"responseFormat": "响应格式",
"responseFormatTooltip": "定义响应格式。例如:\n• 多段落\n• 单段落\n• 项目符号",
"responseFormatTooltip": "定义响应格式。例如:\n• 多段落\n• 单段落\n• 要点",
"responseFormatOptions": {
"multipleParagraphs": "多段落",
"singleParagraph": "单段落",
"bulletPoints": "项目符号"
"multipleParagraphs": "多段落",
"singleParagraph": "单段落",
"bulletPoints": "要点"
},
"topK": "Top K 结果数",
"topKTooltip": "要检索的前 K 个项目数量。在“本地”模式下表示实体,在“全局”模式下表示关系",
"topKPlaceholder": "结果数",
"maxTokensTextUnit": "文本单元最大 Token 数",
"maxTokensTextUnitTooltip": "每个检索到的文本块允许的最大 Token 数",
"maxTokensGlobalContext": "全局上下文最大 Token 数",
"maxTokensGlobalContextTooltip": "在全局检索中为关系描述分配的最大 Token 数",
"maxTokensLocalContext": "本地上下文最大 Token 数",
"maxTokensLocalContextTooltip": "在本地检索中为实体描述分配的最大 Token 数",
"topK": "Top K结果",
"topKTooltip": "检索的顶部项目数。在'local'模式下表示实体,在'global'模式下表示关系",
"topKPlaceholder": "结果数量",
"maxTokensTextUnit": "文本单元最大令牌数",
"maxTokensTextUnitTooltip": "每个检索文本块允许的最大令牌数",
"maxTokensGlobalContext": "全局上下文最大令牌数",
"maxTokensGlobalContextTooltip": "全局检索中关系描述的最大令牌数",
"maxTokensLocalContext": "本地上下文最大令牌数",
"maxTokensLocalContextTooltip": "本地检索中实体描述的最大令牌数",
"historyTurns": "历史轮次",
"historyTurnsTooltip": "响应上下文中考虑的完整对话轮次(用户-助手对)",
"historyTurnsPlaceholder": "历史轮次的数量",
"historyTurnsTooltip": "响应上下文中考虑的完整对话轮次(用户-助手对)数量",
"historyTurnsPlaceholder": "历史轮次",
"hlKeywords": "高级关键词",
"hlKeywordsTooltip": "检索优先考虑的高级关键词。用逗号分隔",
"hlKeywordsTooltip": "检索优先考虑的高级关键词列表。用逗号分隔",
"hlkeywordsPlaceHolder": "输入关键词",
"llKeywords": "低级关键词",
"llKeywordsTooltip": "用于化检索点的低级关键词。用逗号分隔",
"onlyNeedContext": "仅需要上下文",
"onlyNeedContextTooltip": "如果为 True则仅返回检索到的上下文而不会生成回复",
"onlyNeedPrompt": "仅需要提示",
"onlyNeedPromptTooltip": "如果为 True则仅返回生成的提示而不会生成回复",
"llKeywordsTooltip": "用于化检索点的低级关键词列表。用逗号分隔",
"onlyNeedContext": "仅需上下文",
"onlyNeedContextTooltip": "如果为True仅返回检索到的上下文而不生成响应",
"onlyNeedPrompt": "仅需提示",
"onlyNeedPromptTooltip": "如果为True仅返回生成的提示而不产生响应",
"streamResponse": "流式响应",
"streamResponseTooltip": "如果为 True启用流式输出以获得实时响应"
"streamResponseTooltip": "如果为True启用实时流式输出响应"
}
},
"apiSite": {
"loading": "正在加载 API 文档..."
}
}

View File

@@ -5,6 +5,7 @@ import AppRouter from './AppRouter'
import "./i18n";
createRoot(document.getElementById('root')!).render(
<StrictMode>
<AppRouter />

View File

@@ -1,6 +1,7 @@
import { create } from 'zustand'
import { createSelectors } from '@/lib/utils'
import { DirectedGraph } from 'graphology'
import { getGraphLabels } from '@/api/lightrag'
export type RawNodeType = {
id: string
@@ -65,9 +66,17 @@ interface GraphState {
rawGraph: RawGraph | null
sigmaGraph: DirectedGraph | null
allDatabaseLabels: string[]
moveToSelectedNode: boolean
isFetching: boolean
shouldRender: boolean
// Global flags to track data fetching attempts
graphDataFetchAttempted: boolean
labelsFetchAttempted: boolean
refreshLayout: () => void
setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) => void
setFocusedNode: (nodeId: string | null) => void
setSelectedEdge: (edgeId: string | null) => void
@@ -79,19 +88,47 @@ interface GraphState {
setRawGraph: (rawGraph: RawGraph | null) => void
setSigmaGraph: (sigmaGraph: DirectedGraph | null) => void
setAllDatabaseLabels: (labels: string[]) => void
fetchAllDatabaseLabels: () => Promise<void>
setIsFetching: (isFetching: boolean) => void
setShouldRender: (shouldRender: boolean) => void
// Methods to set global flags
setGraphDataFetchAttempted: (attempted: boolean) => void
setLabelsFetchAttempted: (attempted: boolean) => void
}
const useGraphStoreBase = create<GraphState>()((set) => ({
const useGraphStoreBase = create<GraphState>()((set, get) => ({
selectedNode: null,
focusedNode: null,
selectedEdge: null,
focusedEdge: null,
moveToSelectedNode: false,
isFetching: false,
shouldRender: false,
// Initialize global flags
graphDataFetchAttempted: false,
labelsFetchAttempted: false,
rawGraph: null,
sigmaGraph: null,
allDatabaseLabels: ['*'],
refreshLayout: () => {
const currentGraph = get().sigmaGraph;
if (currentGraph) {
get().clearSelection();
get().setSigmaGraph(null);
setTimeout(() => {
get().setSigmaGraph(currentGraph);
}, 10);
}
},
setIsFetching: (isFetching: boolean) => set({ isFetching }),
setShouldRender: (shouldRender: boolean) => set({ shouldRender }),
setSelectedNode: (nodeId: string | null, moveToSelectedNode?: boolean) =>
set({ selectedNode: nodeId, moveToSelectedNode }),
setFocusedNode: (nodeId: string | null) => set({ focusedNode: nodeId }),
@@ -104,25 +141,58 @@ const useGraphStoreBase = create<GraphState>()((set) => ({
selectedEdge: null,
focusedEdge: null
}),
reset: () =>
reset: () => {
// Get the existing graph
const existingGraph = get().sigmaGraph;
// If we have an existing graph, clear it by removing all nodes
if (existingGraph) {
const nodes = Array.from(existingGraph.nodes());
nodes.forEach(node => existingGraph.dropNode(node));
}
set({
selectedNode: null,
focusedNode: null,
selectedEdge: null,
focusedEdge: null,
rawGraph: null,
sigmaGraph: null,
moveToSelectedNode: false
}),
// Keep the existing graph instance but with cleared data
moveToSelectedNode: false,
shouldRender: false
});
},
setRawGraph: (rawGraph: RawGraph | null) =>
set({
rawGraph
}),
setSigmaGraph: (sigmaGraph: DirectedGraph | null) => set({ sigmaGraph }),
setSigmaGraph: (sigmaGraph: DirectedGraph | null) => {
// Replace graph instance, no need to keep WebGL context
set({ sigmaGraph });
},
setMoveToSelectedNode: (moveToSelectedNode?: boolean) => set({ moveToSelectedNode })
setAllDatabaseLabels: (labels: string[]) => set({ allDatabaseLabels: labels }),
fetchAllDatabaseLabels: async () => {
try {
console.log('Fetching all database labels...');
const labels = await getGraphLabels();
set({ allDatabaseLabels: ['*', ...labels] });
return;
} catch (error) {
console.error('Failed to fetch all database labels:', error);
set({ allDatabaseLabels: ['*'] });
throw error;
}
},
setMoveToSelectedNode: (moveToSelectedNode?: boolean) => set({ moveToSelectedNode }),
// Methods to set global flags
setGraphDataFetchAttempted: (attempted: boolean) => set({ graphDataFetchAttempted: attempted }),
setLabelsFetchAttempted: (attempted: boolean) => set({ labelsFetchAttempted: attempted })
}))
const useGraphStore = createSelectors(useGraphStoreBase)

View File

@@ -5,6 +5,7 @@ import { defaultQueryLabel } from '@/lib/constants'
import { Message, QueryRequest } from '@/api/lightrag'
type Theme = 'dark' | 'light' | 'system'
type Language = 'en' | 'zh'
type Tab = 'documents' | 'knowledge-graph' | 'retrieval' | 'api'
type Language = 'en' | 'zh'
@@ -48,7 +49,7 @@ interface SettingsState {
setTheme: (theme: Theme) => void
language: Language
setLanguage: (language: Language) => void
setLanguage: (lang: Language) => void
enableHealthCheck: boolean
setEnableHealthCheck: (enable: boolean) => void
@@ -62,7 +63,6 @@ const useSettingsStoreBase = create<SettingsState>()(
(set) => ({
theme: 'system',
language: 'en',
showPropertyPanel: true,
showNodeSearchBar: true,
@@ -75,7 +75,7 @@ const useSettingsStoreBase = create<SettingsState>()(
graphQueryMaxDepth: 3,
graphMinDegree: 0,
graphLayoutMaxIterations: 10,
graphLayoutMaxIterations: 15,
queryLabel: defaultQueryLabel,
@@ -104,7 +104,15 @@ const useSettingsStoreBase = create<SettingsState>()(
setTheme: (theme: Theme) => set({ theme }),
setLanguage: (language: Language) => set({ language }),
setLanguage: (language: Language) => {
set({ language })
// Update i18n after state is updated
import('i18next').then(({ default: i18n }) => {
if (i18n.language !== language) {
i18n.changeLanguage(language)
}
})
},
setGraphLayoutMaxIterations: (iterations: number) =>
set({
@@ -136,7 +144,7 @@ const useSettingsStoreBase = create<SettingsState>()(
{
name: 'settings-storage',
storage: createJSONStorage(() => localStorage),
version: 7,
version: 8,
migrate: (state: any, version: number) => {
if (version < 2) {
state.showEdgeLabel = false
@@ -173,7 +181,11 @@ const useSettingsStoreBase = create<SettingsState>()(
}
if (version < 7) {
state.graphQueryMaxDepth = 3
state.graphLayoutMaxIterations = 10
state.graphLayoutMaxIterations = 15
}
if (version < 8) {
state.graphMinDegree = 0
state.language = 'en'
}
return state
}

View File

@@ -27,7 +27,9 @@ export default defineConfig({
target: import.meta.env.VITE_BACKEND_URL || 'http://localhost:9621',
changeOrigin: true,
rewrite: endpoint === '/api' ?
(path) => path.replace(/^\/api/, '') : undefined
(path) => path.replace(/^\/api/, '') :
endpoint === '/docs' || endpoint === '/openapi.json' ?
(path) => path : undefined
}
])
) : {}

View File

@@ -4,6 +4,12 @@ future
# Basic modules
gensim
# Additional Packages for export Functionality
pandas>=2.0.0
# Extra libraries are installed when needed using pipmaster
pipmaster
pydantic
python-dotenv
@@ -13,5 +19,4 @@ tenacity
# LLM packages
tiktoken
# Extra libraries are installed when needed using pipmaster
xlsxwriter>=3.1.0