diff --git a/LICENSE b/LICENSE index c65e8258..3152fbcd 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2025 Gustavo Ye +Copyright (c) 2025 LarFii Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index d6d22522..6bc2fb3f 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@

- +

@@ -637,7 +637,7 @@ if __name__ == "__main__": | **llm\_model\_kwargs** | `dict` | Additional parameters for LLM generation | | | **vector\_db\_storage\_cls\_kwargs** | `dict` | Additional parameters for vector database (currently not used) | | | **enable\_llm\_cache** | `bool` | If `TRUE`, stores LLM results in cache; repeated prompts return cached responses | `TRUE` | -| **enable\_llm\_cache\_for\_entity\_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `FALSE` | +| **enable\_llm\_cache\_for\_entity\_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `TRUE` | | **addon\_params** | `dict` | Additional parameters, e.g., `{"example_number": 1, "language": "Simplified Chinese", "entity_types": ["organization", "person", "geo", "event"], "insert_batch_size": 10}`: sets example limit, output language, and batch size for document processing | `example_number: all examples, language: English, insert_batch_size: 10` | | **convert\_response\_to\_json\_func** | `callable` | Not used | `convert_response_to_json` | | **embedding\_cache\_config** | `dict` | Configuration for question-answer caching. Contains three parameters:
- `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers.
- `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM.
- `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default: `{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` | @@ -892,69 +892,6 @@ def extract_queries(file_path): ``` -## Code Structure - -```python -. -├── .github/ -│ ├── workflows/ -│ │ └── linting.yaml -├── examples/ -│ ├── batch_eval.py -│ ├── generate_query.py -│ ├── graph_visual_with_html.py -│ ├── graph_visual_with_neo4j.py -│ ├── insert_custom_kg.py -│ ├── lightrag_api_openai_compatible_demo.py -│ ├── lightrag_api_oracle_demo..py -│ ├── lightrag_azure_openai_demo.py -│ ├── lightrag_bedrock_demo.py -│ ├── lightrag_hf_demo.py -│ ├── lightrag_lmdeploy_demo.py -│ ├── lightrag_nvidia_demo.py -│ ├── lightrag_ollama_demo.py -│ ├── lightrag_openai_compatible_demo.py -│ ├── lightrag_openai_demo.py -│ ├── lightrag_oracle_demo.py -│ ├── lightrag_siliconcloud_demo.py -│ └── vram_management_demo.py -├── lightrag/ -│ ├── api/ -│ │ ├── lollms_lightrag_server.py -│ │ ├── ollama_lightrag_server.py -│ │ ├── openai_lightrag_server.py -│ │ ├── azure_openai_lightrag_server.py -│ │ └── requirements.txt -│ ├── kg/ -│ │ ├── __init__.py -│ │ ├── oracle_impl.py -│ │ └── neo4j_impl.py -│ ├── __init__.py -│ ├── base.py -│ ├── lightrag.py -│ ├── llm.py -│ ├── operate.py -│ ├── prompt.py -│ ├── storage.py -│ └── utils.py -├── reproduce/ -│ ├── Step_0.py -│ ├── Step_1_openai_compatible.py -│ ├── Step_1.py -│ ├── Step_2.py -│ ├── Step_3_openai_compatible.py -│ └── Step_3.py -├── .gitignore -├── .pre-commit-config.yaml -├── get_all_edges_nx.py -├── LICENSE -├── README.md -├── requirements.txt -├── setup.py -├── test_neo4j.py -└── test.py -``` - ## Install with API Support LightRAG provides optional API support through FastAPI servers that add RAG capabilities to existing LLM services. You can install LightRAG with API support in two ways: diff --git a/contributor-readme.MD b/contributor-README.md similarity index 100% rename from contributor-readme.MD rename to contributor-README.md diff --git a/examples/test_split_by_character.ipynb b/examples/test_split_by_character.ipynb new file mode 100644 index 00000000..e8e08b92 --- /dev/null +++ b/examples/test_split_by_character.ipynb @@ -0,0 +1,1296 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "4b5690db12e34685", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:40:58.307102Z", + "start_time": "2025-01-09T03:40:51.935233Z" + } + }, + "outputs": [], + "source": [ + "import os\n", + "import logging\n", + "import numpy as np\n", + "from lightrag import LightRAG, QueryParam\n", + "from lightrag.llm import openai_complete_if_cache, openai_embedding\n", + "from lightrag.utils import EmbeddingFunc\n", + "import nest_asyncio" + ] + }, + { + "cell_type": "markdown", + "id": "dd17956ec322b361", + "metadata": {}, + "source": "#### split by character" + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "8c8ee7c061bf9159", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:41:13.961167Z", + "start_time": "2025-01-09T03:41:13.958357Z" + } + }, + "outputs": [], + "source": [ + "nest_asyncio.apply()\n", + "WORKING_DIR = \"../../llm_rag/paper_db/R000088_test1\"\n", + "logging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.INFO)\n", + "if not os.path.exists(WORKING_DIR):\n", + " os.mkdir(WORKING_DIR)\n", + "API = os.environ.get(\"DOUBAO_API_KEY\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a5009d16e0851dca", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:41:16.862036Z", + "start_time": "2025-01-09T03:41:16.859306Z" + } + }, + "outputs": [], + "source": [ + "async def llm_model_func(\n", + " prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs\n", + ") -> str:\n", + " return await openai_complete_if_cache(\n", + " \"ep-20241218114828-2tlww\",\n", + " prompt,\n", + " system_prompt=system_prompt,\n", + " history_messages=history_messages,\n", + " api_key=API,\n", + " base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n", + " **kwargs,\n", + " )\n", + "\n", + "\n", + "async def embedding_func(texts: list[str]) -> np.ndarray:\n", + " return await openai_embedding(\n", + " texts,\n", + " model=\"ep-20241231173413-pgjmk\",\n", + " api_key=API,\n", + " base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "397fcad24ce4d0ed", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:41:24.950307Z", + "start_time": "2025-01-09T03:41:24.940353Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:lightrag:Logger initialized for working directory: ../../llm_rag/paper_db/R000088_test1\n", + "INFO:lightrag:Load KV llm_response_cache with 0 data\n", + "INFO:lightrag:Load KV full_docs with 0 data\n", + "INFO:lightrag:Load KV text_chunks with 0 data\n", + "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test1/vdb_entities.json'} 0 data\n", + "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test1/vdb_relationships.json'} 0 data\n", + "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test1/vdb_chunks.json'} 0 data\n", + "INFO:lightrag:Loaded document status storage with 0 records\n" + ] + } + ], + "source": [ + "rag = LightRAG(\n", + " working_dir=WORKING_DIR,\n", + " llm_model_func=llm_model_func,\n", + " embedding_func=EmbeddingFunc(\n", + " embedding_dim=4096, max_token_size=8192, func=embedding_func\n", + " ),\n", + " chunk_token_size=512,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "1dc3603677f7484d", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:41:37.947456Z", + "start_time": "2025-01-09T03:41:37.941901Z" + } + }, + "outputs": [], + "source": [ + "with open(\n", + " \"../../llm_rag/example/R000088/auto/R000088_full_txt.md\", \"r\", encoding=\"utf-8\"\n", + ") as f:\n", + " content = f.read()\n", + "\n", + "\n", + "async def embedding_func(texts: list[str]) -> np.ndarray:\n", + " return await openai_embedding(\n", + " texts,\n", + " model=\"ep-20241231173413-pgjmk\",\n", + " api_key=API,\n", + " base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n", + " )\n", + "\n", + "\n", + "async def get_embedding_dim():\n", + " test_text = [\"This is a test sentence.\"]\n", + " embedding = await embedding_func(test_text)\n", + " embedding_dim = embedding.shape[1]\n", + " return embedding_dim" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "6844202606acfbe5", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:41:39.608541Z", + "start_time": "2025-01-09T03:41:39.165057Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n" + ] + } + ], + "source": [ + "embedding_dimension = await get_embedding_dim()" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "d6273839d9681403", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:44:34.295345Z", + "start_time": "2025-01-09T03:41:48.324171Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:lightrag:Processing 1 new unique documents\n", + "Processing batch 1: 0%| | 0/1 [00:00标签中,针对每个问题详细分析你的思考过程。然后在<回答>标签中给出所有问题的最终答案。\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "7a6491385b050095", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:45:40.829111Z", + "start_time": "2025-01-09T03:45:13.530298Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n", + "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n", + "INFO:lightrag:Local query uses 5 entites, 12 relations, 3 text units\n", + "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n", + "INFO:lightrag:Global query uses 8 entites, 5 relations, 4 text units\n", + "INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "<分析>\n", + "1. **该文献主要研究的问题是什么?**\n", + " - 思考过程:通过浏览论文内容,查找作者明确阐述研究目的的部分。文中多处提及“Our study was performed to explore whether folic acid treatment was associated with cancer outcomes and all-cause mortality after extended follow-up”,表明作者旨在探究叶酸治疗与癌症结局及全因死亡率之间的关系,尤其是在经过长期随访后。\n", + "2. **该文献采用什么方法进行分析?**\n", + " - 思考过程:寻找描述研究方法和数据分析过程的段落。文中提到“Survival curves were constructed using the Kaplan-Meier method and differences in survival between groups were analyzed using the log-rank test. Estimates of hazard ratios (HRs) with 95% CIs were obtained by using Cox proportional hazards regression models stratified by trial”,可以看出作者使用了Kaplan-Meier法构建生存曲线、log-rank检验分析组间生存差异以及Cox比例风险回归模型估计风险比等方法。\n", + "3. **该文献的主要结论是什么?**\n", + " - 思考过程:定位到论文中总结结论的部分,如“Conclusion Treatment with folic acid plus vitamin $\\mathsf{B}_{12}$ was associated with increased cancer outcomes and all-cause mortality in patients with ischemic heart disease in Norway, where there is no folic acid fortification of foods”,可知作者得出叶酸加维生素$\\mathsf{B}_{12}$治疗与癌症结局和全因死亡率增加有关的结论。\n", + "<回答>\n", + "1. 该文献主要研究的问题是:叶酸治疗与癌症结局及全因死亡率之间的关系,尤其是在经过长期随访后,叶酸治疗是否与癌症结局和全因死亡率相关。\n", + "2. 该文献采用的分析方法包括:使用Kaplan-Meier法构建生存曲线、log-rank检验分析组间生存差异、Cox比例风险回归模型估计风险比等。\n", + "3. 该文献的主要结论是:在挪威没有叶酸强化食品的情况下,叶酸加维生素$\\mathsf{B}_{12}$治疗与缺血性心脏病患者的癌症结局和全因死亡率增加有关。\n", + "\n", + "**参考文献**\n", + "- [VD] In2Norwegianhomocysteine-lowering trialsamongpatientswithischemicheart disease, there was a statistically nonsignificantincreaseincancerincidenceinthe groupsassignedtofolicacidtreatment.15,16 Our study was performed to explore whetherfolicacidtreatmentwasassociatedwithcanceroutcomesandall-cause mortality after extended follow-up.\n", + "- [VD] Survivalcurveswereconstructedusing theKaplan-Meiermethodanddifferences insurvivalbetweengroupswereanalyzed usingthelog-ranktest.Estimatesofhazard ratios (HRs) with $95\\%$ CIs were obtainedbyusingCoxproportionalhazards regressionmodelsstratifiedbytrial.\n", + "- [VD] Conclusion Treatment with folic acid plus vitamin $\\mathsf{B}_{12}$ was associated with increased cancer outcomes and all-cause mortality in patients with ischemic heart disease in Norway, where there is no folic acid fortification of foods.\n" + ] + } + ], + "source": [ + "resp = rag.query(prompt1, param=QueryParam(mode=\"mix\", top_k=5))\n", + "print(resp)" + ] + }, + { + "cell_type": "markdown", + "id": "4e5bfad24cb721a8", + "metadata": {}, + "source": "#### split by character only" + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "44e2992dc95f8ce0", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:47:40.988796Z", + "start_time": "2025-01-09T03:47:40.982648Z" + } + }, + "outputs": [], + "source": [ + "WORKING_DIR = \"../../llm_rag/paper_db/R000088_test2\"\n", + "if not os.path.exists(WORKING_DIR):\n", + " os.mkdir(WORKING_DIR)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "62c63385d2d973d5", + "metadata": { + "ExecuteTime": { + "end_time": "2025-01-09T03:51:39.951329Z", + "start_time": "2025-01-09T03:49:15.218976Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:lightrag:Logger initialized for working directory: ../../llm_rag/paper_db/R000088_test2\n", + "INFO:lightrag:Load KV llm_response_cache with 0 data\n", + "INFO:lightrag:Load KV full_docs with 0 data\n", + "INFO:lightrag:Load KV text_chunks with 0 data\n", + "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test2/vdb_entities.json'} 0 data\n", + "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test2/vdb_relationships.json'} 0 data\n", + "INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../../llm_rag/paper_db/R000088_test2/vdb_chunks.json'} 0 data\n", + "INFO:lightrag:Loaded document status storage with 0 records\n", + "INFO:lightrag:Processing 1 new unique documents\n", + "Processing batch 1: 0%| | 0/1 [00:00\n", + "- **该文献主要研究的问题是什么?**\n", + " - **思考过程**:通过浏览论文的标题、摘要、引言等部分,寻找关于研究目的和问题的描述。论文标题为“Cancer Incidence and Mortality After Treatment With Folic Acid and Vitamin B12”,摘要中的“Objective”部分明确指出研究目的是“To evaluate effects of treatment with B vitamins on cancer outcomes and all-cause mortality in 2 randomized controlled trials”。因此,可以确定该文献主要研究的问题是评估B族维生素治疗对两项随机对照试验中癌症结局和全因死亡率的影响。\n", + "- **该文献采用什么方法进行分析?**\n", + " - **思考过程**:在论文的“METHODS”部分详细描述了研究方法。文中提到这是一个对两项随机、双盲、安慰剂对照临床试验(Norwegian Vitamin [NORVIT] trial和Western Norway B Vitamin Intervention Trial [WENBIT])数据的联合分析,并进行了观察性的试验后随访。具体包括对参与者进行分组干预(不同剂量的叶酸、维生素B12、维生素B6或安慰剂),收集临床信息和血样,分析循环B族维生素、同型半胱氨酸和可替宁等指标,并进行基因分型等,还涉及到多种统计分析方法,如计算预期癌症发生率、构建生存曲线、进行Cox比例风险回归模型分析等。\n", + "- **该文献的主要结论是什么?**\n", + " - **思考过程**:在论文的“Results”和“Conclusion”部分寻找主要结论。研究结果表明,在治疗期间,接受叶酸加维生素B12治疗的参与者血清叶酸浓度显著增加,且在后续随访中,该组癌症发病率、癌症死亡率和全因死亡率均有所上升,主要是肺癌发病率增加,而维生素B6治疗未显示出显著影响。结论部分明确指出“Treatment with folic acid plus vitamin $\\mathsf{B}_{12}$ was associated with increased cancer outcomes and all-cause mortality in patients with ischemic heart disease in Norway, where there is no folic acid fortification of foods”。\n", + "\n", + "\n", + "<回答>\n", + "- **主要研究问题**:评估B族维生素治疗对两项随机对照试验中癌症结局和全因死亡率的影响。\n", + "- **研究方法**:采用对两项随机、双盲、安慰剂对照临床试验(Norwegian Vitamin [NORVIT] trial和Western Norway B Vitamin Intervention Trial [WENBIT])数据的联合分析,并进行观察性的试验后随访,涉及分组干预、多种指标检测以及多种统计分析方法。\n", + "- **主要结论**:在挪威(食品中未添加叶酸),对于缺血性心脏病患者,叶酸加维生素B12治疗与癌症结局和全因死亡率的增加有关,而维生素B6治疗未显示出显著影响。\n", + "\n", + "**参考文献**\n", + "- [VD] Cancer Incidence and Mortality After Treatment With Folic Acid and Vitamin B12\n", + "- [VD] METHODS Study Design, Participants, and Study Intervention\n", + "- [VD] RESULTS\n", + "- [VD] Conclusion\n", + "- [VD] Objective To evaluate effects of treatment with B vitamins on cancer outcomes and all-cause mortality in 2 randomized controlled trials.\n" + ] + } + ], + "source": [ + "resp = rag.query(prompt1, param=QueryParam(mode=\"mix\", top_k=5))\n", + "print(resp)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7ba6fa79a2550d10", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/lightrag/__init__.py b/lightrag/__init__.py index cd2ccf04..b8037813 100644 --- a/lightrag/__init__.py +++ b/lightrag/__init__.py @@ -1,5 +1,5 @@ from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam -__version__ = "1.0.9" +__version__ = "1.1.0" __author__ = "Zirui Guo" __url__ = "https://github.com/HKUDS/LightRAG" diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 05de8d9f..596fbdbf 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -45,6 +45,7 @@ from .storage import ( from .prompt import GRAPH_FIELD_SEP + # future KG integrations # from .kg.ArangoDB_impl import ( @@ -167,7 +168,7 @@ class LightRAG: # LLM llm_model_func: callable = gpt_4o_mini_complete # hf_model_complete# - llm_model_name: str = "meta-llama/Llama-3.2-1B-Instruct" #'meta-llama/Llama-3.2-1B'#'google/gemma-2-2b-it' + llm_model_name: str = "meta-llama/Llama-3.2-1B-Instruct" # 'meta-llama/Llama-3.2-1B'#'google/gemma-2-2b-it' llm_model_max_token_size: int = 32768 llm_model_max_async: int = 16 llm_model_kwargs: dict = field(default_factory=dict) @@ -177,7 +178,7 @@ class LightRAG: enable_llm_cache: bool = True # Sometimes there are some reason the LLM failed at Extracting Entities, and we want to continue without LLM cost, we can use this flag - enable_llm_cache_for_entity_extract: bool = False + enable_llm_cache_for_entity_extract: bool = True # extension addon_params: dict = field(default_factory=dict) @@ -186,6 +187,10 @@ class LightRAG: # Add new field for document status storage type doc_status_storage: str = field(default="JsonDocStatusStorage") + # Custom Chunking Function + chunking_func: callable = chunking_by_token_size + chunking_func_kwargs: dict = field(default_factory=dict) + def __post_init__(self): log_file = os.path.join("lightrag.log") set_logger(log_file) @@ -313,15 +318,25 @@ class LightRAG: "JsonDocStatusStorage": JsonDocStatusStorage, } - def insert(self, string_or_strings): + def insert( + self, string_or_strings, split_by_character=None, split_by_character_only=False + ): loop = always_get_an_event_loop() - return loop.run_until_complete(self.ainsert(string_or_strings)) + return loop.run_until_complete( + self.ainsert(string_or_strings, split_by_character, split_by_character_only) + ) - async def ainsert(self, string_or_strings): + async def ainsert( + self, string_or_strings, split_by_character=None, split_by_character_only=False + ): """Insert documents with checkpoint support Args: string_or_strings: Single document string or list of document strings + split_by_character: if split_by_character is not None, split the string by character, if chunk longer than + chunk_size, split the sub chunk by token size. + split_by_character_only: if split_by_character_only is True, split the string by character only, when + split_by_character is None, this parameter is ignored. """ if isinstance(string_or_strings, str): string_or_strings = [string_or_strings] @@ -358,7 +373,7 @@ class LightRAG: batch_docs = dict(list(new_docs.items())[i : i + batch_size]) for doc_id, doc in tqdm_async( - batch_docs.items(), desc=f"Processing batch {i//batch_size + 1}" + batch_docs.items(), desc=f"Processing batch {i // batch_size + 1}" ): try: # Update status to processing @@ -377,11 +392,14 @@ class LightRAG: **dp, "full_doc_id": doc_id, } - for dp in chunking_by_token_size( + for dp in self.chunking_func( doc["content"], + split_by_character=split_by_character, + split_by_character_only=split_by_character_only, overlap_token_size=self.chunk_overlap_token_size, max_token_size=self.chunk_token_size, tiktoken_model=self.tiktoken_model_name, + **self.chunking_func_kwargs, ) } @@ -453,6 +471,73 @@ class LightRAG: # Ensure all indexes are updated after each document await self._insert_done() + def insert_custom_chunks(self, full_text: str, text_chunks: list[str]): + loop = always_get_an_event_loop() + return loop.run_until_complete( + self.ainsert_custom_chunks(full_text, text_chunks) + ) + + async def ainsert_custom_chunks(self, full_text: str, text_chunks: list[str]): + update_storage = False + try: + doc_key = compute_mdhash_id(full_text.strip(), prefix="doc-") + new_docs = {doc_key: {"content": full_text.strip()}} + + _add_doc_keys = await self.full_docs.filter_keys([doc_key]) + new_docs = {k: v for k, v in new_docs.items() if k in _add_doc_keys} + if not len(new_docs): + logger.warning("This document is already in the storage.") + return + + update_storage = True + logger.info(f"[New Docs] inserting {len(new_docs)} docs") + + inserting_chunks = {} + for chunk_text in text_chunks: + chunk_text_stripped = chunk_text.strip() + chunk_key = compute_mdhash_id(chunk_text_stripped, prefix="chunk-") + + inserting_chunks[chunk_key] = { + "content": chunk_text_stripped, + "full_doc_id": doc_key, + } + + _add_chunk_keys = await self.text_chunks.filter_keys( + list(inserting_chunks.keys()) + ) + inserting_chunks = { + k: v for k, v in inserting_chunks.items() if k in _add_chunk_keys + } + if not len(inserting_chunks): + logger.warning("All chunks are already in the storage.") + return + + logger.info(f"[New Chunks] inserting {len(inserting_chunks)} chunks") + + await self.chunks_vdb.upsert(inserting_chunks) + + logger.info("[Entity Extraction]...") + maybe_new_kg = await extract_entities( + inserting_chunks, + knowledge_graph_inst=self.chunk_entity_relation_graph, + entity_vdb=self.entities_vdb, + relationships_vdb=self.relationships_vdb, + global_config=asdict(self), + ) + + if maybe_new_kg is None: + logger.warning("No new entities and relationships found") + return + else: + self.chunk_entity_relation_graph = maybe_new_kg + + await self.full_docs.upsert(new_docs) + await self.text_chunks.upsert(inserting_chunks) + + finally: + if update_storage: + await self._insert_done() + async def _insert_done(self): tasks = [] for storage_inst in [ diff --git a/lightrag/operate.py b/lightrag/operate.py index b2c4d215..7216c07f 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -4,7 +4,6 @@ import re from tqdm.asyncio import tqdm as tqdm_async from typing import Union from collections import Counter, defaultdict -import warnings from .utils import ( logger, clean_str, @@ -34,23 +33,61 @@ import time def chunking_by_token_size( - content: str, overlap_token_size=128, max_token_size=1024, tiktoken_model="gpt-4o" + content: str, + split_by_character=None, + split_by_character_only=False, + overlap_token_size=128, + max_token_size=1024, + tiktoken_model="gpt-4o", + **kwargs, ): tokens = encode_string_by_tiktoken(content, model_name=tiktoken_model) results = [] - for index, start in enumerate( - range(0, len(tokens), max_token_size - overlap_token_size) - ): - chunk_content = decode_tokens_by_tiktoken( - tokens[start : start + max_token_size], model_name=tiktoken_model - ) - results.append( - { - "tokens": min(max_token_size, len(tokens) - start), - "content": chunk_content.strip(), - "chunk_order_index": index, - } - ) + if split_by_character: + raw_chunks = content.split(split_by_character) + new_chunks = [] + if split_by_character_only: + for chunk in raw_chunks: + _tokens = encode_string_by_tiktoken(chunk, model_name=tiktoken_model) + new_chunks.append((len(_tokens), chunk)) + else: + for chunk in raw_chunks: + _tokens = encode_string_by_tiktoken(chunk, model_name=tiktoken_model) + if len(_tokens) > max_token_size: + for start in range( + 0, len(_tokens), max_token_size - overlap_token_size + ): + chunk_content = decode_tokens_by_tiktoken( + _tokens[start : start + max_token_size], + model_name=tiktoken_model, + ) + new_chunks.append( + (min(max_token_size, len(_tokens) - start), chunk_content) + ) + else: + new_chunks.append((len(_tokens), chunk)) + for index, (_len, chunk) in enumerate(new_chunks): + results.append( + { + "tokens": _len, + "content": chunk.strip(), + "chunk_order_index": index, + } + ) + else: + for index, start in enumerate( + range(0, len(tokens), max_token_size - overlap_token_size) + ): + chunk_content = decode_tokens_by_tiktoken( + tokens[start : start + max_token_size], model_name=tiktoken_model + ) + results.append( + { + "tokens": min(max_token_size, len(tokens) - start), + "content": chunk_content.strip(), + "chunk_order_index": index, + } + ) return results @@ -574,15 +611,22 @@ async def kg_query( logger.warning("low_level_keywords and high_level_keywords is empty") return PROMPTS["fail_response"] if ll_keywords == [] and query_param.mode in ["local", "hybrid"]: - logger.warning("low_level_keywords is empty") - return PROMPTS["fail_response"] - else: - ll_keywords = ", ".join(ll_keywords) + logger.warning( + "low_level_keywords is empty, switching from %s mode to global mode", + query_param.mode, + ) + query_param.mode = "global" if hl_keywords == [] and query_param.mode in ["global", "hybrid"]: - logger.warning("high_level_keywords is empty") - return PROMPTS["fail_response"] - else: - hl_keywords = ", ".join(hl_keywords) + logger.warning( + "high_level_keywords is empty, switching from %s mode to local mode", + query_param.mode, + ) + query_param.mode = "local" + + ll_keywords = ", ".join(ll_keywords) if ll_keywords else "" + hl_keywords = ", ".join(hl_keywords) if hl_keywords else "" + + logger.info("Using %s mode for query processing", query_param.mode) # Build context keywords = [ll_keywords, hl_keywords] @@ -648,78 +692,52 @@ async def _build_query_context( # ll_entities_context, ll_relations_context, ll_text_units_context = "", "", "" # hl_entities_context, hl_relations_context, hl_text_units_context = "", "", "" - ll_kewwords, hl_keywrds = query[0], query[1] - if query_param.mode in ["local", "hybrid"]: - if ll_kewwords == "": - ll_entities_context, ll_relations_context, ll_text_units_context = ( - "", - "", - "", - ) - warnings.warn( - "Low Level context is None. Return empty Low entity/relationship/source" - ) - query_param.mode = "global" - else: - ( - ll_entities_context, - ll_relations_context, - ll_text_units_context, - ) = await _get_node_data( - ll_kewwords, - knowledge_graph_inst, - entities_vdb, - text_chunks_db, - query_param, - ) - if query_param.mode in ["global", "hybrid"]: - if hl_keywrds == "": - hl_entities_context, hl_relations_context, hl_text_units_context = ( - "", - "", - "", - ) - warnings.warn( - "High Level context is None. Return empty High entity/relationship/source" - ) - query_param.mode = "local" - else: - ( - hl_entities_context, - hl_relations_context, - hl_text_units_context, - ) = await _get_edge_data( - hl_keywrds, - knowledge_graph_inst, - relationships_vdb, - text_chunks_db, - query_param, - ) - if ( - hl_entities_context == "" - and hl_relations_context == "" - and hl_text_units_context == "" - ): - logger.warn("No high level context found. Switching to local mode.") - query_param.mode = "local" - if query_param.mode == "hybrid": + ll_keywords, hl_keywords = query[0], query[1] + + if query_param.mode == "local": + entities_context, relations_context, text_units_context = await _get_node_data( + ll_keywords, + knowledge_graph_inst, + entities_vdb, + text_chunks_db, + query_param, + ) + elif query_param.mode == "global": + entities_context, relations_context, text_units_context = await _get_edge_data( + hl_keywords, + knowledge_graph_inst, + relationships_vdb, + text_chunks_db, + query_param, + ) + else: # hybrid mode + ( + ll_entities_context, + ll_relations_context, + ll_text_units_context, + ) = await _get_node_data( + ll_keywords, + knowledge_graph_inst, + entities_vdb, + text_chunks_db, + query_param, + ) + ( + hl_entities_context, + hl_relations_context, + hl_text_units_context, + ) = await _get_edge_data( + hl_keywords, + knowledge_graph_inst, + relationships_vdb, + text_chunks_db, + query_param, + ) entities_context, relations_context, text_units_context = combine_contexts( [hl_entities_context, ll_entities_context], [hl_relations_context, ll_relations_context], [hl_text_units_context, ll_text_units_context], ) - elif query_param.mode == "local": - entities_context, relations_context, text_units_context = ( - ll_entities_context, - ll_relations_context, - ll_text_units_context, - ) - elif query_param.mode == "global": - entities_context, relations_context, text_units_context = ( - hl_entities_context, - hl_relations_context, - hl_text_units_context, - ) return f""" -----Entities----- ```csv diff --git a/requirements.txt b/requirements.txt index 79249e7e..48c25ff8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,38 +1,38 @@ accelerate -aioboto3~=13.3.0 -aiofiles~=24.1.0 -aiohttp~=3.11.11 -asyncpg~=0.30.0 +aioboto3 +aiofiles +aiohttp +asyncpg # database packages graspologic gremlinpython hnswlib nano-vectordb -neo4j~=5.27.0 -networkx~=3.2.1 +neo4j +networkx -numpy~=2.2.0 -ollama~=0.4.4 -openai~=1.58.1 +numpy +ollama +openai oracledb -psycopg-pool~=3.2.4 -psycopg[binary,pool]~=3.2.3 -pydantic~=2.10.4 +psycopg-pool +psycopg[binary,pool] +pydantic pymilvus pymongo pymysql -python-dotenv~=1.0.1 -pyvis~=0.3.2 -setuptools~=70.0.0 +python-dotenv +pyvis +setuptools # lmdeploy[all] -sqlalchemy~=2.0.36 -tenacity~=9.0.0 +sqlalchemy +tenacity # LLM packages -tiktoken~=0.8.0 -torch~=2.5.1+cu121 -tqdm~=4.67.1 -transformers~=4.47.1 +tiktoken +torch +tqdm +transformers xxhash