diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index e1b24731..3bfa4943 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -13,18 +13,6 @@ from fastapi import ( from typing import Dict import threading -# Global progress tracker -scan_progress: Dict = { - "is_scanning": False, - "current_file": "", - "indexed_count": 0, - "total_files": 0, - "progress": 0, -} - -# Lock for thread-safe operations -progress_lock = threading.Lock() - import json import os @@ -34,7 +22,7 @@ import logging import argparse import time import re -from typing import List, Dict, Any, Optional, Union +from typing import List, Any, Optional, Union from lightrag import LightRAG, QueryParam from lightrag.api import __api_version__ @@ -57,8 +45,21 @@ import pipmaster as pm from dotenv import load_dotenv +# Load environment variables load_dotenv() +# Global progress tracker +scan_progress: Dict = { + "is_scanning": False, + "current_file": "", + "indexed_count": 0, + "total_files": 0, + "progress": 0, +} + +# Lock for thread-safe operations +progress_lock = threading.Lock() + def estimate_tokens(text: str) -> int: """Estimate the number of tokens in text diff --git a/lightrag/utils.py b/lightrag/utils.py index 8ab052b6..4f06d718 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -463,7 +463,7 @@ def quantize_embedding(embedding: Union[np.ndarray, list], bits=8) -> tuple: # Convert list to numpy array if needed if isinstance(embedding, list): embedding = np.array(embedding) - + # Calculate min/max values for reconstruction min_val = embedding.min() max_val = embedding.max()