From 991fa339864ba2e17254c85870e97cf80a627ac4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Enrique=20Catal=C3=A1?= Date: Thu, 13 Feb 2025 22:45:03 +0100 Subject: [PATCH] Enable LiteLLM proxy with embedding_binding_host --- lightrag/api/lightrag_server.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index fe52f592..4cc6f775 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -1021,7 +1021,8 @@ def create_app(args): if args.embedding_binding == "azure_openai" else openai_embed( texts, - model=args.embedding_model, # no host is used for openai, + model=args.embedding_model, + base_url=args.embedding_binding_host, # If you decide to use litellm as a proxy for azure openai, this is relevant api_key=args.embedding_binding_api_key, ), )