diff --git a/lightrag/llm.py b/lightrag/llm.py index d725ea85..e0277248 100644 --- a/lightrag/llm.py +++ b/lightrag/llm.py @@ -29,7 +29,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM from .utils import ( wrap_embedding_func_with_attrs, locate_json_string_body_from_string, - safe_unicode_decode, + safe_unicode_decode, logger, ) import sys @@ -69,6 +69,11 @@ async def openai_complete_if_cache( messages.extend(history_messages) messages.append({"role": "user", "content": prompt}) + # 添加日志输出 + logger.debug("===== Query Input to LLM =====") + logger.debug(f"Query: {prompt}") + logger.debug(f"System prompt: {system_prompt}") + logger.debug("Full context:") if "response_format" in kwargs: response = await openai_async_client.beta.chat.completions.parse( model=model, messages=messages, **kwargs diff --git a/lightrag/prompt.py b/lightrag/prompt.py index b62f02b5..d5674f15 100644 --- a/lightrag/prompt.py +++ b/lightrag/prompt.py @@ -8,7 +8,7 @@ PROMPTS["DEFAULT_RECORD_DELIMITER"] = "##" PROMPTS["DEFAULT_COMPLETION_DELIMITER"] = "<|COMPLETE|>" PROMPTS["process_tickers"] = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] -PROMPTS["DEFAULT_ENTITY_TYPES"] = ["organization", "person", "geo", "event"] +PROMPTS["DEFAULT_ENTITY_TYPES"] = ["organization", "person", "geo", "event", "category"] PROMPTS["entity_extraction"] = """-Goal- Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.