diff --git a/README.md b/README.md index 480b8d00..cf1d86aa 100644 --- a/README.md +++ b/README.md @@ -361,7 +361,7 @@ class QueryParam: - "hybrid": Combines local and global retrieval methods. - "naive": Performs a basic search without advanced techniques. - "mix": Integrates knowledge graph and vector retrieval. - """ + """ only_need_context: bool = False """If True, only returns the retrieved context without generating a response.""" response_type: str = "Multiple Paragraphs" diff --git a/lightrag/base.py b/lightrag/base.py index ae5ce92e..0e3f1dc6 100644 --- a/lightrag/base.py +++ b/lightrag/base.py @@ -62,13 +62,13 @@ class QueryParam: max_token_for_local_context: int = 4000 """Maximum number of tokens allocated for entity descriptions in local retrieval.""" - hl_keywords: List[str] = field(default_factory=list) + hl_keywords: list[str] = field(default_factory=list) """List of high-level keywords to prioritize in retrieval.""" - ll_keywords: List[str] = field(default_factory=list) + ll_keywords: list[str] = field(default_factory=list) """List of low-level keywords to refine retrieval focus.""" - conversation_history: List[dict[str, Any]] = field(default_factory=list) + conversation_history: list[dict[str, Any]] = field(default_factory=list) """Stores past conversation history to maintain context. Format: [{"role": "user/assistant", "content": "message"}]. """ @@ -76,6 +76,7 @@ class QueryParam: history_turns: int = 3 """Number of complete conversation turns (user-assistant pairs) to consider in the response context.""" + @dataclass class StorageNameSpace: namespace: str