From 13c67c2bcffbf50c6a7be1fb4bd6a259ab515f18 Mon Sep 17 00:00:00 2001 From: Larfii <834462287@qq.com> Date: Mon, 7 Oct 2024 17:28:18 +0800 Subject: [PATCH] update --- lightrag/lightrag.py | 6 +----- lightrag/llm.py | 17 ++--------------- 2 files changed, 3 insertions(+), 20 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 9311480e..f11e868b 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -97,11 +97,7 @@ class LightRAG: addon_params: dict = field(default_factory=dict) convert_response_to_json_func: callable = convert_response_to_json - def __post_init__(self): - # use proxy - os.environ['http_proxy'] = 'http://127.0.0.1:7890' - os.environ['https_proxy'] = 'http://127.0.0.1:7890' - + def __post_init__(self): log_file = os.path.join(self.working_dir, "lightrag.log") set_logger(log_file) logger.info(f"Logger initialized for working directory: {self.working_dir}") diff --git a/lightrag/llm.py b/lightrag/llm.py index dbf6abe4..4b4f7e94 100644 --- a/lightrag/llm.py +++ b/lightrag/llm.py @@ -17,7 +17,7 @@ from .utils import compute_args_hash, wrap_embedding_func_with_attrs retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)), ) async def openai_complete_if_cache( - model, prompt, api_key='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' + model, prompt, api_key='' , system_prompt=None, history_messages=[], **kwargs ) -> str: openai_async_client = AsyncOpenAI(api_key=api_key) @@ -72,26 +72,13 @@ async def gpt_4o_mini_complete( wait=wait_exponential(multiplier=1, min=4, max=10), retry=retry_if_exception_type((RateLimitError, APIConnectionError, Timeout)), ) -async def openai_embedding(texts: list[str]) -> np.ndarray: - api_key = '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' +async def openai_embedding(texts: list[str], api_key='') -> np.ndarray: openai_async_client = AsyncOpenAI(api_key=api_key) response = await openai_async_client.embeddings.create( model="text-embedding-3-small", input=texts, encoding_format="float" ) return np.array([dp.embedding for dp in response.data]) -async def moonshot_complete( - prompt, system_prompt=None, history_messages=[], **kwargs -) -> str: - return await openai_complete_if_cache( - "moonshot-v1-128k", - prompt, - api_key='sk-OsvLvHgFFH3tz6Yhym3OAhcTfZ9y7rHEgQ3JDLmnuLpTw9C0', - system_prompt=system_prompt, - history_messages=history_messages, - **kwargs, - ) - if __name__ == "__main__": import asyncio