From ea41d08b9fbd5eade74ce04ffe0a870ad73278f0 Mon Sep 17 00:00:00 2001 From: Yannick Stephan Date: Tue, 18 Feb 2025 20:05:51 +0100 Subject: [PATCH] removed torch from requirement lightrag server --- README.md | 3 +++ lightrag/api/requirements.txt | 1 - lightrag/llm/hf.py | 49 +++++------------------------------ 3 files changed, 9 insertions(+), 44 deletions(-) diff --git a/README.md b/README.md index eaa058ad..e675efc7 100644 --- a/README.md +++ b/README.md @@ -246,6 +246,9 @@ rag = LightRAG( Using Hugging Face Models * If you want to use Hugging Face models, you only need to set LightRAG as follows: + +See lightrag_hf_demo.py + ```python from lightrag.llm import hf_model_complete, hf_embed from transformers import AutoModel, AutoTokenizer diff --git a/lightrag/api/requirements.txt b/lightrag/api/requirements.txt index d0d00e29..0e7dfc90 100644 --- a/lightrag/api/requirements.txt +++ b/lightrag/api/requirements.txt @@ -6,5 +6,4 @@ python-dotenv python-multipart tenacity tiktoken -torch uvicorn diff --git a/lightrag/llm/hf.py b/lightrag/llm/hf.py index 075fc357..d678c611 100644 --- a/lightrag/llm/hf.py +++ b/lightrag/llm/hf.py @@ -1,47 +1,7 @@ -""" -Hugging face LLM Interface Module -========================== - -This module provides interfaces for interacting with Hugging face's language models, -including text generation and embedding capabilities. - -Author: Lightrag team -Created: 2024-01-24 -License: MIT License - -Copyright (c) 2024 Lightrag - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -Version: 1.0.0 - -Change Log: -- 1.0.0 (2024-01-24): Initial release - * Added async chat completion support - * Added embedding generation - * Added stream response capability - -Dependencies: - - transformers - - numpy - - pipmaster - - Python >= 3.10 - -Usage: - from llm_interfaces.hf import hf_model_complete, hf_embed -""" - -__version__ = "1.0.0" -__author__ = "lightrag Team" -__status__ = "Production" - import copy import os +from functools import lru_cache + import pipmaster as pm # Pipmaster for dynamic library install # install specific modules @@ -51,9 +11,12 @@ if not pm.is_installed("torch"): pm.install("torch") if not pm.is_installed("tenacity"): pm.install("tenacity") +if not pm.is_installed("numpy"): + pm.install("numpy") +if not pm.is_installed("tenacity"): + pm.install("tenacity") from transformers import AutoTokenizer, AutoModelForCausalLM -from functools import lru_cache from tenacity import ( retry, stop_after_attempt,