removed never used method

This commit is contained in:
Yannick Stephan
2025-02-18 19:38:04 +01:00
parent 161baa6f08
commit 24ae083284
2 changed files with 8 additions and 46 deletions

View File

@@ -13,7 +13,9 @@ if not pm.is_installed("ollama"):
if not pm.is_installed("tenacity"): if not pm.is_installed("tenacity"):
pm.install("tenacity") pm.install("tenacity")
import ollama import ollama
from tenacity import ( from tenacity import (
retry, retry,
stop_after_attempt, stop_after_attempt,
@@ -26,7 +28,7 @@ from lightrag.exceptions import (
APITimeoutError, APITimeoutError,
) )
from lightrag.api import __api_version__ from lightrag.api import __api_version__
from lightrag.utils import extract_reasoning
import numpy as np import numpy as np
from typing import Union from typing import Union
@@ -38,7 +40,7 @@ from typing import Union
(RateLimitError, APIConnectionError, APITimeoutError) (RateLimitError, APIConnectionError, APITimeoutError)
), ),
) )
async def ollama_model_if_cache( async def _ollama_model_if_cache(
model, model,
prompt, prompt,
system_prompt=None, system_prompt=None,
@@ -46,7 +48,7 @@ async def ollama_model_if_cache(
**kwargs, **kwargs,
) -> Union[str, AsyncIterator[str]]: ) -> Union[str, AsyncIterator[str]]:
stream = True if kwargs.get("stream") else False stream = True if kwargs.get("stream") else False
reasoning_tag = kwargs.pop("reasoning_tag", None)
kwargs.pop("max_tokens", None) kwargs.pop("max_tokens", None)
# kwargs.pop("response_format", None) # allow json # kwargs.pop("response_format", None) # allow json
host = kwargs.pop("host", None) host = kwargs.pop("host", None)
@@ -84,11 +86,7 @@ async def ollama_model_if_cache(
response and can simply be trimmed. response and can simply be trimmed.
""" """
return ( return model_response
model_response
if reasoning_tag is None
else extract_reasoning(model_response, reasoning_tag).response_content
)
async def ollama_model_complete( async def ollama_model_complete(
@@ -98,7 +96,7 @@ async def ollama_model_complete(
if keyword_extraction: if keyword_extraction:
kwargs["format"] = "json" kwargs["format"] = "json"
model_name = kwargs["hashing_kv"].global_config["llm_model_name"] model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
return await ollama_model_if_cache( return await _ollama_model_if_cache(
model_name, model_name,
prompt, prompt,
system_prompt=system_prompt, system_prompt=system_prompt,

View File

@@ -18,13 +18,7 @@ import tiktoken
from lightrag.prompt import PROMPTS from lightrag.prompt import PROMPTS
import pipmaster as pm # Pipmaster for dynamic library install
# install specific modules
if not pm.is_installed("bs4"):
pm.install("bs4")
import bs4
VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true" VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true"
@@ -90,12 +84,6 @@ class EmbeddingFunc:
return await self.func(*args, **kwargs) return await self.func(*args, **kwargs)
@dataclass
class ReasoningResponse:
reasoning_content: str | None
response_content: str
tag: str
def locate_json_string_body_from_string(content: str) -> str | None: def locate_json_string_body_from_string(content: str) -> str | None:
"""Locate the JSON string body from a string""" """Locate the JSON string body from a string"""
@@ -728,27 +716,3 @@ def get_conversation_turns(
return "\n".join(formatted_turns) return "\n".join(formatted_turns)
def extract_reasoning(response: str, tag: str) -> ReasoningResponse:
"""Extract the reasoning section and the following section from the LLM response.
Args:
response: LLM response
tag: Tag to extract
Returns:
ReasoningResponse: Reasoning section and following section
"""
soup = bs4.BeautifulSoup(response, "html.parser")
reasoning_section = soup.find(tag)
if reasoning_section is None:
return ReasoningResponse(None, response, tag)
reasoning_content = reasoning_section.get_text().strip()
after_reasoning_section = reasoning_section.next_sibling
if after_reasoning_section is None:
return ReasoningResponse(reasoning_content, "", tag)
after_reasoning_content = after_reasoning_section.get_text().strip()
return ReasoningResponse(reasoning_content, after_reasoning_content, tag)