Fixed missing imports bug and fixed linting
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -23,4 +23,4 @@ examples/input/
|
||||
examples/output/
|
||||
.DS_Store
|
||||
#Remove config.ini from repo
|
||||
*.ini
|
||||
*.ini
|
||||
|
@@ -684,7 +684,9 @@ def create_app(args):
|
||||
trace_exception(e)
|
||||
logging.error(f"Error indexing file {file_path}: {str(e)}")
|
||||
|
||||
ASCIIColors.info(f"Indexed {len(new_files)} documents from {args.input_dir}")
|
||||
ASCIIColors.info(
|
||||
f"Indexed {len(new_files)} documents from {args.input_dir}"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Error during startup indexing: {str(e)}")
|
||||
yield
|
||||
@@ -917,7 +919,6 @@ def create_app(args):
|
||||
else:
|
||||
logging.warning(f"No content extracted from file: {file_path}")
|
||||
|
||||
|
||||
@app.post("/documents/scan", dependencies=[Depends(optional_api_key)])
|
||||
async def scan_for_new_documents():
|
||||
"""
|
||||
|
@@ -622,7 +622,7 @@
|
||||
const data = await response.json();
|
||||
// Convert indexed_files to array if it's not already
|
||||
const files = Array.isArray(data.indexed_files) ? data.indexed_files : data.indexed_files.split(',');
|
||||
|
||||
|
||||
healthInfo.innerHTML = `
|
||||
<div class="space-y-4">
|
||||
<div class="flex items-center">
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import httpx
|
||||
from typing import Literal
|
||||
|
||||
|
||||
class APIStatusError(Exception):
|
||||
"""Raised when an API response has a status code of 4xx or 5xx."""
|
||||
|
||||
@@ -8,14 +9,19 @@ class APIStatusError(Exception):
|
||||
status_code: int
|
||||
request_id: str | None
|
||||
|
||||
def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
|
||||
def __init__(
|
||||
self, message: str, *, response: httpx.Response, body: object | None
|
||||
) -> None:
|
||||
super().__init__(message, response.request, body=body)
|
||||
self.response = response
|
||||
self.status_code = response.status_code
|
||||
self.request_id = response.headers.get("x-request-id")
|
||||
|
||||
|
||||
class APIConnectionError(Exception):
|
||||
def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None:
|
||||
def __init__(
|
||||
self, *, message: str = "Connection error.", request: httpx.Request
|
||||
) -> None:
|
||||
super().__init__(message, request, body=None)
|
||||
|
||||
|
||||
@@ -46,10 +52,7 @@ class UnprocessableEntityError(APIStatusError):
|
||||
class RateLimitError(APIStatusError):
|
||||
status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
||||
|
||||
class APITimeoutError(APIConnectionError):
|
||||
def __init__(self, request: httpx.Request) -> None:
|
||||
super().__init__(message="Request timed out.", request=request)
|
||||
|
||||
|
||||
class BadRequestError(APIStatusError):
|
||||
status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride]
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import os
|
||||
from tqdm.asyncio import tqdm as tqdm_async
|
||||
from dataclasses import dataclass
|
||||
|
||||
# aioredis is a depricated library, replaced with redis
|
||||
from redis.asyncio import Redis
|
||||
from lightrag.utils import logger
|
||||
|
@@ -42,7 +42,7 @@ __status__ = "Production"
|
||||
|
||||
|
||||
import os
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("openai"):
|
||||
@@ -71,6 +71,7 @@ from lightrag.utils import (
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
@@ -153,6 +154,7 @@ async def azure_openai_complete(
|
||||
return locate_json_string_body_from_string(result)
|
||||
return result
|
||||
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8191)
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
@@ -185,4 +187,3 @@ async def azure_openai_embed(
|
||||
model=model, input=texts, encoding_format="float"
|
||||
)
|
||||
return np.array([dp.embedding for dp in response.data])
|
||||
|
||||
|
@@ -41,12 +41,12 @@ __author__ = "lightrag Team"
|
||||
__status__ = "Production"
|
||||
|
||||
|
||||
import sys
|
||||
import copy
|
||||
import os
|
||||
import json
|
||||
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
if not pm.is_installed("aioboto3"):
|
||||
pm.install("aioboto3")
|
||||
if not pm.is_installed("tenacity"):
|
||||
@@ -60,15 +60,11 @@ from tenacity import (
|
||||
retry_if_exception_type,
|
||||
)
|
||||
|
||||
from lightrag.exceptions import (
|
||||
APIConnectionError,
|
||||
RateLimitError,
|
||||
APITimeoutError,
|
||||
)
|
||||
from lightrag.utils import (
|
||||
locate_json_string_body_from_string,
|
||||
)
|
||||
|
||||
|
||||
class BedrockError(Exception):
|
||||
"""Generic error for issues related to Amazon Bedrock"""
|
||||
|
||||
|
@@ -42,7 +42,7 @@ __status__ = "Production"
|
||||
|
||||
import copy
|
||||
import os
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("transformers"):
|
||||
@@ -69,9 +69,11 @@ from lightrag.utils import (
|
||||
locate_json_string_body_from_string,
|
||||
)
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def initialize_hf_model(model_name):
|
||||
hf_tokenizer = AutoTokenizer.from_pretrained(
|
||||
@@ -155,7 +157,6 @@ async def hf_model_if_cache(
|
||||
return response_text
|
||||
|
||||
|
||||
|
||||
async def hf_model_complete(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
) -> str:
|
||||
|
@@ -39,7 +39,7 @@ __author__ = "lightrag Team"
|
||||
__status__ = "Production"
|
||||
|
||||
import os
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("lmdeploy"):
|
||||
@@ -47,25 +47,8 @@ if not pm.is_installed("lmdeploy"):
|
||||
if not pm.is_installed("tenacity"):
|
||||
pm.install("tenacity")
|
||||
|
||||
from tenacity import (
|
||||
retry,
|
||||
stop_after_attempt,
|
||||
wait_exponential,
|
||||
retry_if_exception_type,
|
||||
)
|
||||
|
||||
from lightrag.utils import (
|
||||
wrap_embedding_func_with_attrs,
|
||||
locate_json_string_body_from_string,
|
||||
safe_unicode_decode,
|
||||
logger,
|
||||
)
|
||||
|
||||
from lightrag.types import GPTKeywordExtractionFormat
|
||||
from functools import lru_cache
|
||||
|
||||
import numpy as np
|
||||
from typing import Union
|
||||
import aiohttp
|
||||
|
||||
|
||||
@@ -101,4 +84,3 @@ async def jina_embed(
|
||||
}
|
||||
data_list = await fetch_data(url, headers, data)
|
||||
return np.array([dp["embedding"] for dp in data_list])
|
||||
|
||||
|
@@ -40,7 +40,7 @@ __version__ = "1.0.0"
|
||||
__author__ = "lightrag Team"
|
||||
__status__ = "Production"
|
||||
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("lmdeploy"):
|
||||
@@ -63,6 +63,7 @@ from tenacity import (
|
||||
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def initialize_lmdeploy_pipeline(
|
||||
model,
|
||||
@@ -187,4 +188,4 @@ async def lmdeploy_model_if_cache(
|
||||
session_id=1,
|
||||
):
|
||||
response += res.response
|
||||
return response
|
||||
return response
|
||||
|
@@ -62,11 +62,13 @@ __status__ = "Production"
|
||||
__project_url__ = "https://github.com/ParisNeo/lollms"
|
||||
__doc_url__ = "https://github.com/ParisNeo/lollms/docs"
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from typing import AsyncIterator
|
||||
else:
|
||||
from collections.abc import AsyncIterator
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
if not pm.is_installed("aiohttp"):
|
||||
pm.install("aiohttp")
|
||||
if not pm.is_installed("tenacity"):
|
||||
@@ -89,6 +91,7 @@ from lightrag.exceptions import (
|
||||
from typing import Union, List
|
||||
import numpy as np
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
@@ -185,7 +188,6 @@ async def lollms_model_complete(
|
||||
)
|
||||
|
||||
|
||||
|
||||
async def lollms_embed(
|
||||
texts: List[str], embed_model=None, base_url="http://localhost:9600", **kwargs
|
||||
) -> np.ndarray:
|
||||
@@ -219,4 +221,4 @@ async def lollms_embed(
|
||||
result = await response.json()
|
||||
embeddings.append(result["vector"])
|
||||
|
||||
return np.array(embeddings)
|
||||
return np.array(embeddings)
|
||||
|
@@ -41,15 +41,14 @@ __author__ = "lightrag Team"
|
||||
__status__ = "Production"
|
||||
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from typing import AsyncIterator
|
||||
pass
|
||||
else:
|
||||
from collections.abc import AsyncIterator
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
pass
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("openai"):
|
||||
@@ -70,15 +69,12 @@ from tenacity import (
|
||||
|
||||
from lightrag.utils import (
|
||||
wrap_embedding_func_with_attrs,
|
||||
locate_json_string_body_from_string,
|
||||
safe_unicode_decode,
|
||||
logger,
|
||||
)
|
||||
|
||||
from lightrag.types import GPTKeywordExtractionFormat
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=2048, max_token_size=512)
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
|
@@ -41,11 +41,12 @@ __author__ = "lightrag Team"
|
||||
__status__ = "Production"
|
||||
|
||||
import sys
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from typing import AsyncIterator
|
||||
else:
|
||||
from collections.abc import AsyncIterator
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("ollama"):
|
||||
@@ -114,6 +115,7 @@ async def ollama_model_if_cache(
|
||||
else:
|
||||
return response["message"]["content"]
|
||||
|
||||
|
||||
async def ollama_model_complete(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
) -> Union[str, AsyncIterator[str]]:
|
||||
@@ -129,6 +131,7 @@ async def ollama_model_complete(
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
||||
async def ollama_embedding(texts: list[str], embed_model, **kwargs) -> np.ndarray:
|
||||
"""
|
||||
Deprecated in favor of `embed`.
|
||||
@@ -152,4 +155,4 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray:
|
||||
kwargs["headers"] = headers
|
||||
ollama_client = ollama.Client(**kwargs)
|
||||
data = ollama_client.embed(model=embed_model, input=texts)
|
||||
return data["embeddings"]
|
||||
return data["embeddings"]
|
||||
|
@@ -41,7 +41,6 @@ __author__ = "lightrag Team"
|
||||
__status__ = "Production"
|
||||
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
@@ -49,7 +48,7 @@ if sys.version_info < (3, 9):
|
||||
from typing import AsyncIterator
|
||||
else:
|
||||
from collections.abc import AsyncIterator
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("openai"):
|
||||
@@ -78,6 +77,7 @@ from lightrag.types import GPTKeywordExtractionFormat
|
||||
import numpy as np
|
||||
from typing import Union
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
@@ -141,7 +141,6 @@ async def openai_complete_if_cache(
|
||||
return content
|
||||
|
||||
|
||||
|
||||
async def openai_complete(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
) -> Union[str, AsyncIterator[str]]:
|
||||
@@ -205,7 +204,6 @@ async def nvidia_openai_complete(
|
||||
return result
|
||||
|
||||
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192)
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
|
@@ -39,23 +39,18 @@ __author__ = "lightrag Team"
|
||||
__status__ = "Production"
|
||||
|
||||
import sys
|
||||
import copy
|
||||
import os
|
||||
import json
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from typing import AsyncIterator
|
||||
pass
|
||||
else:
|
||||
from collections.abc import AsyncIterator
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
pass
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("lmdeploy"):
|
||||
pm.install("lmdeploy")
|
||||
|
||||
from openai import (
|
||||
AsyncOpenAI,
|
||||
AsyncAzureOpenAI,
|
||||
APIConnectionError,
|
||||
RateLimitError,
|
||||
APITimeoutError,
|
||||
@@ -67,19 +62,12 @@ from tenacity import (
|
||||
retry_if_exception_type,
|
||||
)
|
||||
|
||||
from lightrag.utils import (
|
||||
wrap_embedding_func_with_attrs,
|
||||
locate_json_string_body_from_string,
|
||||
safe_unicode_decode,
|
||||
logger,
|
||||
)
|
||||
|
||||
from lightrag.types import GPTKeywordExtractionFormat
|
||||
from functools import lru_cache
|
||||
|
||||
import numpy as np
|
||||
from typing import Union
|
||||
import aiohttp
|
||||
import base64
|
||||
import struct
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
|
@@ -45,18 +45,16 @@ import re
|
||||
import json
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from typing import AsyncIterator
|
||||
pass
|
||||
else:
|
||||
from collections.abc import AsyncIterator
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
pass
|
||||
import pipmaster as pm # Pipmaster for dynamic library install
|
||||
|
||||
# install specific modules
|
||||
if not pm.is_installed("zhipuai"):
|
||||
pm.install("zhipuai")
|
||||
|
||||
from openai import (
|
||||
AsyncOpenAI,
|
||||
AsyncAzureOpenAI,
|
||||
APIConnectionError,
|
||||
RateLimitError,
|
||||
APITimeoutError,
|
||||
@@ -70,17 +68,15 @@ from tenacity import (
|
||||
|
||||
from lightrag.utils import (
|
||||
wrap_embedding_func_with_attrs,
|
||||
locate_json_string_body_from_string,
|
||||
safe_unicode_decode,
|
||||
logger,
|
||||
)
|
||||
|
||||
from lightrag.types import GPTKeywordExtractionFormat
|
||||
from functools import lru_cache
|
||||
|
||||
import numpy as np
|
||||
from typing import Union, List, Optional, Dict
|
||||
|
||||
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=4, max=10),
|
||||
@@ -247,4 +243,4 @@ async def zhipu_embedding(
|
||||
except Exception as e:
|
||||
raise Exception(f"Error calling ChatGLM Embedding API: {str(e)}")
|
||||
|
||||
return np.array(embeddings)
|
||||
return np.array(embeddings)
|
||||
|
@@ -6,7 +6,6 @@ from dataclasses import dataclass
|
||||
from typing import Any, Union, cast, Dict
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
import pipmaster as pm
|
||||
|
||||
from nano_vectordb import NanoVectorDB
|
||||
import time
|
||||
|
@@ -1,6 +1,7 @@
|
||||
from pydantic import BaseModel
|
||||
from typing import List
|
||||
|
||||
|
||||
class GPTKeywordExtractionFormat(BaseModel):
|
||||
high_level_keywords: List[str]
|
||||
low_level_keywords: List[str]
|
||||
|
@@ -535,7 +535,8 @@ class CacheData:
|
||||
min_val: Optional[float] = None
|
||||
max_val: Optional[float] = None
|
||||
mode: str = "default"
|
||||
cache_type: str ="query"
|
||||
cache_type: str = "query"
|
||||
|
||||
|
||||
async def save_to_cache(hashing_kv, cache_data: CacheData):
|
||||
if hashing_kv is None or hasattr(cache_data.content, "__aiter__"):
|
||||
|
@@ -1,7 +1,6 @@
|
||||
accelerate
|
||||
aiofiles
|
||||
aiohttp
|
||||
redis
|
||||
asyncpg
|
||||
configparser
|
||||
|
||||
@@ -30,6 +29,7 @@ python-docx
|
||||
python-dotenv
|
||||
python-pptx
|
||||
pyvis
|
||||
redis
|
||||
setuptools
|
||||
sqlalchemy
|
||||
tenacity
|
||||
@@ -39,4 +39,3 @@ tenacity
|
||||
tiktoken
|
||||
tqdm
|
||||
xxhash
|
||||
|
||||
|
Reference in New Issue
Block a user