From 2760433634275ca1dc1a28802fe58612e8110760 Mon Sep 17 00:00:00 2001 From: yangdx Date: Thu, 6 Feb 2025 22:55:22 +0800 Subject: [PATCH] Add LightRAG version to User-Agent header for better request tracking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Add User-Agent header with version info • Update header creation in Ollama client • Update header creation in OpenAI client • Ensure consistent header format • Include Mozilla UA string for OpenAI --- lightrag/llm/ollama.py | 23 +++++++++++++---------- lightrag/llm/openai.py | 15 +++++++++++++-- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/lightrag/llm/ollama.py b/lightrag/llm/ollama.py index 19f560e7..c65954f1 100644 --- a/lightrag/llm/ollama.py +++ b/lightrag/llm/ollama.py @@ -66,6 +66,7 @@ from lightrag.exceptions import ( RateLimitError, APITimeoutError, ) +from lightrag.api import __api_version__ import numpy as np from typing import Union @@ -91,11 +92,12 @@ async def ollama_model_if_cache( timeout = kwargs.pop("timeout", None) kwargs.pop("hashing_kv", None) api_key = kwargs.pop("api_key", None) - headers = ( - {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} - if api_key - else {"Content-Type": "application/json"} - ) + headers = { + "Content-Type": "application/json", + "User-Agent": f"LightRAG/{__api_version__}" + } + if api_key: + headers["Authorization"] = f"Bearer {api_key}" ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers) messages = [] if system_prompt: @@ -147,11 +149,12 @@ async def ollama_embedding(texts: list[str], embed_model, **kwargs) -> np.ndarra async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray: api_key = kwargs.pop("api_key", None) - headers = ( - {"Content-Type": "application/json", "Authorization": api_key} - if api_key - else {"Content-Type": "application/json"} - ) + headers = { + "Content-Type": "application/json", + "User-Agent": f"LightRAG/{__api_version__}" + } + if api_key: + headers["Authorization"] = api_key kwargs["headers"] = headers ollama_client = ollama.Client(**kwargs) data = ollama_client.embed(model=embed_model, input=texts) diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py index 3f939d62..ca451bcf 100644 --- a/lightrag/llm/openai.py +++ b/lightrag/llm/openai.py @@ -73,6 +73,7 @@ from lightrag.utils import ( logger, ) from lightrag.types import GPTKeywordExtractionFormat +from lightrag.api import __api_version__ import numpy as np from typing import Union @@ -102,8 +103,13 @@ async def openai_complete_if_cache( if api_key: os.environ["OPENAI_API_KEY"] = api_key + default_headers = { + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_8) LightRAG/{__api_version__}", + "Content-Type": "application/json" + } openai_async_client = ( - AsyncOpenAI() if base_url is None else AsyncOpenAI(base_url=base_url) + AsyncOpenAI(default_headers=default_headers) if base_url is None + else AsyncOpenAI(base_url=base_url, default_headers=default_headers) ) kwargs.pop("hashing_kv", None) kwargs.pop("keyword_extraction", None) @@ -287,8 +293,13 @@ async def openai_embed( if api_key: os.environ["OPENAI_API_KEY"] = api_key + default_headers = { + "User-Agent": f"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_8) LightRAG/{__api_version__}", + "Content-Type": "application/json" + } openai_async_client = ( - AsyncOpenAI() if base_url is None else AsyncOpenAI(base_url=base_url) + AsyncOpenAI(default_headers=default_headers) if base_url is None + else AsyncOpenAI(base_url=base_url, default_headers=default_headers) ) response = await openai_async_client.embeddings.create( model=model, input=texts, encoding_format="float"