Refactor storage implementations to support both single and multi-process modes

• Add shared storage management module
• Support process/thread lock based on mode
This commit is contained in:
yangdx
2025-02-26 05:38:38 +08:00
parent 8050b0f91b
commit 2752a764ae
10 changed files with 608 additions and 623 deletions

View File

@@ -1,8 +1,6 @@
import asyncio
import os
from dataclasses import dataclass
from typing import Any, final
import threading
from lightrag.base import (
BaseKVStorage,
@@ -12,26 +10,7 @@ from lightrag.utils import (
logger,
write_json,
)
from lightrag.api.utils_api import manager as main_process_manager
# Global variables for shared memory management
_init_lock = threading.Lock()
_manager = None
_shared_kv_data = None
def _get_manager():
"""Get or create the global manager instance"""
global _manager, _shared_kv_data
with _init_lock:
if _manager is None:
try:
_manager = main_process_manager
_shared_kv_data = _manager.dict()
except Exception as e:
logger.error(f"Failed to initialize shared memory manager: {e}")
raise RuntimeError(f"Shared memory initialization failed: {e}")
return _manager
from .shared_storage import get_namespace_data, get_storage_lock
@final
@@ -39,57 +18,49 @@ def _get_manager():
class JsonKVStorage(BaseKVStorage):
def __post_init__(self):
working_dir = self.global_config["working_dir"]
self._file_name = os.path.join(working_dir, f"kv_store_{self.namespace}.json")
self._lock = asyncio.Lock()
# Ensure manager is initialized
_get_manager()
# Get or create namespace data
if self.namespace not in _shared_kv_data:
with _init_lock:
if self.namespace not in _shared_kv_data:
try:
initial_data = load_json(self._file_name) or {}
_shared_kv_data[self.namespace] = initial_data
except Exception as e:
logger.error(f"Failed to initialize shared data for namespace {self.namespace}: {e}")
raise RuntimeError(f"Shared data initialization failed: {e}")
try:
self._data = _shared_kv_data[self.namespace]
logger.info(f"Load KV {self.namespace} with {len(self._data)} data")
except Exception as e:
logger.error(f"Failed to access shared memory: {e}")
raise RuntimeError(f"Cannot access shared memory: {e}")
self._storage_lock = get_storage_lock()
self._data = get_namespace_data(self.namespace)
with self._storage_lock:
if not self._data:
self._file_name = os.path.join(working_dir, f"kv_store_{self.namespace}.json")
self._data: dict[str, Any] = load_json(self._file_name) or {}
logger.info(f"Load KV {self.namespace} with {len(self._data)} data")
async def index_done_callback(self) -> None:
write_json(self._data, self._file_name)
# 文件写入需要加锁,防止多个进程同时写入导致文件损坏
with self._storage_lock:
write_json(self._data, self._file_name)
async def get_by_id(self, id: str) -> dict[str, Any] | None:
return self._data.get(id)
with self._storage_lock:
return self._data.get(id)
async def get_by_ids(self, ids: list[str]) -> list[dict[str, Any]]:
return [
(
{k: v for k, v in self._data[id].items()}
if self._data.get(id, None)
else None
)
for id in ids
]
with self._storage_lock:
return [
(
{k: v for k, v in self._data[id].items()}
if self._data.get(id, None)
else None
)
for id in ids
]
async def filter_keys(self, keys: set[str]) -> set[str]:
return set(keys) - set(self._data.keys())
with self._storage_lock:
return set(keys) - set(self._data.keys())
async def upsert(self, data: dict[str, dict[str, Any]]) -> None:
logger.info(f"Inserting {len(data)} to {self.namespace}")
if not data:
return
left_data = {k: v for k, v in data.items() if k not in self._data}
self._data.update(left_data)
with self._storage_lock:
left_data = {k: v for k, v in data.items() if k not in self._data}
self._data.update(left_data)
async def delete(self, ids: list[str]) -> None:
for doc_id in ids:
self._data.pop(doc_id, None)
with self._storage_lock:
for doc_id in ids:
self._data.pop(doc_id, None)
await self.index_done_callback()