From d644ee52f53416adc90f560d98d52016345441a6 Mon Sep 17 00:00:00 2001 From: 90houlaoheshang <907333918@qq.com> Date: Wed, 6 Nov 2024 10:48:59 +0800 Subject: [PATCH] =?UTF-8?q?feat(lightrag):=20=E6=B7=BB=E5=8A=A0=E7=8E=AF?= =?UTF-8?q?=E5=A2=83=E5=8F=98=E9=87=8F=E6=8E=A7=E5=88=B6=E5=B5=8C=E5=85=A5?= =?UTF-8?q?=E5=B1=82=E6=9C=80=E5=A4=A7=20token=20=E6=95=B0=E9=87=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 在 lightrag_api_openai_compatible_demo.py 中,使用环境变量 EMBEDDING_MAX_TOKEN_SIZE 来设置嵌入层的最大 token 数量,默认值为 8192 --- examples/lightrag_api_openai_compatible_demo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/lightrag_api_openai_compatible_demo.py b/examples/lightrag_api_openai_compatible_demo.py index 94475199..bc56ac59 100644 --- a/examples/lightrag_api_openai_compatible_demo.py +++ b/examples/lightrag_api_openai_compatible_demo.py @@ -60,7 +60,7 @@ rag = LightRAG( working_dir=WORKING_DIR, llm_model_func=llm_model_func, embedding_func=EmbeddingFunc(embedding_dim=asyncio.run(get_embedding_dim()), - max_token_size=8192, + max_token_size=os.environ.get("EMBEDDING_MAX_TOKEN_SIZE", 8192), func=embedding_func), )