Update README for QueryParam description

This commit is contained in:
yangdx
2025-05-05 18:30:49 +08:00
parent 027c67a73c
commit 9aedf1b38a
2 changed files with 95 additions and 40 deletions

View File

@@ -201,37 +201,65 @@ if __name__ == "__main__":
```python ```python
class QueryParam: class QueryParam:
mode: Literal["local", "global", "hybrid", "naive", "mix"] = "global" """Configuration parameters for query execution in LightRAG."""
"""指定检索模式:
- "local":专注于上下文相关信息。 mode: Literal["local", "global", "hybrid", "naive", "mix", "bypass"] = "global"
- "global":利用全局知识。 """Specifies the retrieval mode:
- "hybrid":结合本地和全局检索方法。 - "local": Focuses on context-dependent information.
- "naive":执行基本搜索,不使用高级技术。 - "global": Utilizes global knowledge.
- "mix":集成知识图谱和向量检索。混合模式结合知识图谱和向量搜索: - "hybrid": Combines local and global retrieval methods.
- 同时使用结构化KG和非结构化向量信息 - "naive": Performs a basic search without advanced techniques.
- 通过分析关系和上下文提供全面的答案 - "mix": Integrates knowledge graph and vector retrieval.
- 通过HTML img标签支持图像内容
- 允许通过top_k参数控制检索深度
""" """
only_need_context: bool = False only_need_context: bool = False
"""如果为True,仅返回检索到的上下文而不生成响应。""" """If True, only returns the retrieved context without generating a response."""
only_need_prompt: bool = False
"""If True, only returns the generated prompt without producing a response."""
response_type: str = "Multiple Paragraphs" response_type: str = "Multiple Paragraphs"
"""定义响应格式。示例:'Multiple Paragraphs'(多段落), 'Single Paragraph'(单段落), 'Bullet Points'(要点列表)。""" """Defines the response format. Examples: 'Multiple Paragraphs', 'Single Paragraph', 'Bullet Points'."""
top_k: int = 60
"""要检索的顶部项目数量。在'local'模式下代表实体,在'global'模式下代表关系。""" stream: bool = False
max_token_for_text_unit: int = 4000 """If True, enables streaming output for real-time responses."""
"""每个检索文本块允许的最大令牌数。"""
max_token_for_global_context: int = 4000 top_k: int = int(os.getenv("TOP_K", "60"))
"""全局检索中关系描述的最大令牌分配。""" """Number of top items to retrieve. Represents entities in 'local' mode and relationships in 'global' mode."""
max_token_for_local_context: int = 4000
"""本地检索中实体描述的最大令牌分配。""" max_token_for_text_unit: int = int(os.getenv("MAX_TOKEN_TEXT_CHUNK", "4000"))
ids: list[str] | None = None # 仅支持PG向量数据库 """Maximum number of tokens allowed for each retrieved text chunk."""
"""用于过滤RAG的ID列表。"""
model_func: Callable[..., object] | None = None max_token_for_global_context: int = int(
"""查询使用的LLM模型函数。如果提供了此选项它将代替LightRAG全局模型函数。 os.getenv("MAX_TOKEN_RELATION_DESC", "4000")
这允许为不同的查询模式使用不同的模型。 )
"""Maximum number of tokens allocated for relationship descriptions in global retrieval."""
max_token_for_local_context: int = int(os.getenv("MAX_TOKEN_ENTITY_DESC", "4000"))
"""Maximum number of tokens allocated for entity descriptions in local retrieval."""
hl_keywords: list[str] = field(default_factory=list)
"""List of high-level keywords to prioritize in retrieval."""
ll_keywords: list[str] = field(default_factory=list)
"""List of low-level keywords to refine retrieval focus."""
conversation_history: list[dict[str, str]] = field(default_factory=list)
"""Stores past conversation history to maintain context.
Format: [{"role": "user/assistant", "content": "message"}].
"""
history_turns: int = 3
"""Number of complete conversation turns (user-assistant pairs) to consider in the response context."""
ids: list[str] | None = None
"""List of ids to filter the results."""
model_func: Callable[..., object] | None = None
"""Optional override for the LLM model function to use for this specific query.
If provided, this will be used instead of the global model function.
This allows using different models for different query modes.
""" """
...
``` ```
> top_k的默认值可以通过环境变量TOP_K更改。 > top_k的默认值可以通过环境变量TOP_K更改。

View File

@@ -237,38 +237,65 @@ Use QueryParam to control the behavior your query:
```python ```python
class QueryParam: class QueryParam:
mode: Literal["local", "global", "hybrid", "naive", "mix"] = "global" """Configuration parameters for query execution in LightRAG."""
mode: Literal["local", "global", "hybrid", "naive", "mix", "bypass"] = "global"
"""Specifies the retrieval mode: """Specifies the retrieval mode:
- "local": Focuses on context-dependent information. - "local": Focuses on context-dependent information.
- "global": Utilizes global knowledge. - "global": Utilizes global knowledge.
- "hybrid": Combines local and global retrieval methods. - "hybrid": Combines local and global retrieval methods.
- "naive": Performs a basic search without advanced techniques. - "naive": Performs a basic search without advanced techniques.
- "mix": Integrates knowledge graph and vector retrieval. Mix mode combines knowledge graph and vector search: - "mix": Integrates knowledge graph and vector retrieval.
- Uses both structured (KG) and unstructured (vector) information
- Provides comprehensive answers by analyzing relationships and context
- Supports image content through HTML img tags
- Allows control over retrieval depth via top_k parameter
""" """
only_need_context: bool = False only_need_context: bool = False
"""If True, only returns the retrieved context without generating a response.""" """If True, only returns the retrieved context without generating a response."""
only_need_prompt: bool = False
"""If True, only returns the generated prompt without producing a response."""
response_type: str = "Multiple Paragraphs" response_type: str = "Multiple Paragraphs"
"""Defines the response format. Examples: 'Multiple Paragraphs', 'Single Paragraph', 'Bullet Points'.""" """Defines the response format. Examples: 'Multiple Paragraphs', 'Single Paragraph', 'Bullet Points'."""
top_k: int = 60
stream: bool = False
"""If True, enables streaming output for real-time responses."""
top_k: int = int(os.getenv("TOP_K", "60"))
"""Number of top items to retrieve. Represents entities in 'local' mode and relationships in 'global' mode.""" """Number of top items to retrieve. Represents entities in 'local' mode and relationships in 'global' mode."""
max_token_for_text_unit: int = 4000
max_token_for_text_unit: int = int(os.getenv("MAX_TOKEN_TEXT_CHUNK", "4000"))
"""Maximum number of tokens allowed for each retrieved text chunk.""" """Maximum number of tokens allowed for each retrieved text chunk."""
max_token_for_global_context: int = 4000
max_token_for_global_context: int = int(
os.getenv("MAX_TOKEN_RELATION_DESC", "4000")
)
"""Maximum number of tokens allocated for relationship descriptions in global retrieval.""" """Maximum number of tokens allocated for relationship descriptions in global retrieval."""
max_token_for_local_context: int = 4000
max_token_for_local_context: int = int(os.getenv("MAX_TOKEN_ENTITY_DESC", "4000"))
"""Maximum number of tokens allocated for entity descriptions in local retrieval.""" """Maximum number of tokens allocated for entity descriptions in local retrieval."""
ids: list[str] | None = None # ONLY SUPPORTED FOR PG VECTOR DBs
"""List of ids to filter the RAG.""" hl_keywords: list[str] = field(default_factory=list)
"""List of high-level keywords to prioritize in retrieval."""
ll_keywords: list[str] = field(default_factory=list)
"""List of low-level keywords to refine retrieval focus."""
conversation_history: list[dict[str, str]] = field(default_factory=list)
"""Stores past conversation history to maintain context.
Format: [{"role": "user/assistant", "content": "message"}].
"""
history_turns: int = 3
"""Number of complete conversation turns (user-assistant pairs) to consider in the response context."""
ids: list[str] | None = None
"""List of ids to filter the results."""
model_func: Callable[..., object] | None = None model_func: Callable[..., object] | None = None
"""Optional override for the LLM model function to use for this specific query. """Optional override for the LLM model function to use for this specific query.
If provided, this will be used instead of the global model function. If provided, this will be used instead of the global model function.
This allows using different models for different query modes. This allows using different models for different query modes.
""" """
...
``` ```
> default value of Top_k can be change by environment variables TOP_K. > default value of Top_k can be change by environment variables TOP_K.