diff --git a/lightrag/llm/ollama.py b/lightrag/llm/ollama.py index 3541bd67..296e263e 100644 --- a/lightrag/llm/ollama.py +++ b/lightrag/llm/ollama.py @@ -66,6 +66,7 @@ from lightrag.exceptions import ( RateLimitError, APITimeoutError, ) +from lightrag.api import __api_version__ from lightrag.utils import extract_reasoning import numpy as np from typing import Union @@ -93,11 +94,12 @@ async def ollama_model_if_cache( timeout = kwargs.pop("timeout", None) kwargs.pop("hashing_kv", None) api_key = kwargs.pop("api_key", None) - headers = ( - {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} - if api_key - else {"Content-Type": "application/json"} - ) + headers = { + "Content-Type": "application/json", + "User-Agent": f"LightRAG/{__api_version__}", + } + if api_key: + headers["Authorization"] = f"Bearer {api_key}" ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers) messages = [] if system_prompt: @@ -161,11 +163,12 @@ async def ollama_embedding(texts: list[str], embed_model, **kwargs) -> np.ndarra async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray: api_key = kwargs.pop("api_key", None) - headers = ( - {"Content-Type": "application/json", "Authorization": api_key} - if api_key - else {"Content-Type": "application/json"} - ) + headers = { + "Content-Type": "application/json", + "User-Agent": f"LightRAG/{__api_version__}", + } + if api_key: + headers["Authorization"] = api_key kwargs["headers"] = headers ollama_client = ollama.Client(**kwargs) data = ollama_client.embed(model=embed_model, input=texts) diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py index 3f939d62..ca451bcf 100644 --- a/lightrag/llm/openai.py +++ b/lightrag/llm/openai.py @@ -73,6 +73,7 @@ from lightrag.utils import ( logger, ) from lightrag.types import GPTKeywordExtractionFormat +from lightrag.api import __api_version__ import numpy as np from typing import Union @@ -102,8 +103,13 @@ async def openai_complete_if_cache( if api_key: os.environ["OPENAI_API_KEY"] = api_key + default_headers = { + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_8) LightRAG/{__api_version__}", + "Content-Type": "application/json" + } openai_async_client = ( - AsyncOpenAI() if base_url is None else AsyncOpenAI(base_url=base_url) + AsyncOpenAI(default_headers=default_headers) if base_url is None + else AsyncOpenAI(base_url=base_url, default_headers=default_headers) ) kwargs.pop("hashing_kv", None) kwargs.pop("keyword_extraction", None) @@ -287,8 +293,13 @@ async def openai_embed( if api_key: os.environ["OPENAI_API_KEY"] = api_key + default_headers = { + "User-Agent": f"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_8) LightRAG/{__api_version__}", + "Content-Type": "application/json" + } openai_async_client = ( - AsyncOpenAI() if base_url is None else AsyncOpenAI(base_url=base_url) + AsyncOpenAI(default_headers=default_headers) if base_url is None + else AsyncOpenAI(base_url=base_url, default_headers=default_headers) ) response = await openai_async_client.embeddings.create( model=model, input=texts, encoding_format="float"