Merge branch 'HKUDS:main' into main

This commit is contained in:
Saifeddine ALOUI
2025-01-24 13:38:20 +01:00
committed by GitHub
3 changed files with 5 additions and 8 deletions

View File

@@ -551,10 +551,10 @@ def get_api_key_dependency(api_key: Optional[str]):
def create_app(args): def create_app(args):
# Verify that bindings arer correctly setup # Verify that bindings arer correctly setup
if args.llm_binding not in ["lollms", "ollama", "openai"]: if args.llm_binding not in ["lollms", "ollama", "openai", "azure_openai"]:
raise Exception("llm binding not supported") raise Exception("llm binding not supported")
if args.embedding_binding not in ["lollms", "ollama", "openai"]: if args.embedding_binding not in ["lollms", "ollama", "openai", "azure_openai"]:
raise Exception("embedding binding not supported") raise Exception("embedding binding not supported")
# Add SSL validation # Add SSL validation

View File

@@ -469,9 +469,8 @@ class LightRAG:
error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}" error_msg = f"Failed to process document {doc_id}: {str(e)}\n{traceback.format_exc()}"
logger.error(error_msg) logger.error(error_msg)
continue continue
else:
finally: # Only update index when processing succeeds
# Ensure all indexes are updated after each document
await self._insert_done() await self._insert_done()
def insert_custom_chunks(self, full_text: str, text_chunks: list[str]): def insert_custom_chunks(self, full_text: str, text_chunks: list[str]):

View File

@@ -479,9 +479,7 @@ async def handle_cache(hashing_kv, args_hash, prompt, mode="default"):
quantized = min_val = max_val = None quantized = min_val = max_val = None
if is_embedding_cache_enabled: if is_embedding_cache_enabled:
# Use embedding cache # Use embedding cache
embedding_model_func = hashing_kv.global_config[ embedding_model_func = hashing_kv.global_config["embedding_func"]["func"]
"embedding_func"
].func # ["func"]
llm_model_func = hashing_kv.global_config.get("llm_model_func") llm_model_func = hashing_kv.global_config.get("llm_model_func")
current_embedding = await embedding_model_func([prompt]) current_embedding = await embedding_model_func([prompt])