Merge branch 'HKUDS:main' into main

This commit is contained in:
Yannick Stephan
2025-02-26 12:10:43 +01:00
committed by GitHub
51 changed files with 3817 additions and 3073 deletions

View File

@@ -5,6 +5,7 @@ import json
import re
from typing import Any, AsyncIterator
from collections import Counter, defaultdict
from .utils import (
logger,
clean_str,
@@ -23,6 +24,7 @@ from .utils import (
CacheData,
statistic_data,
get_conversation_turns,
verbose_debug,
)
from .base import (
BaseGraphStorage,
@@ -33,6 +35,10 @@ from .base import (
)
from .prompt import GRAPH_FIELD_SEP, PROMPTS
import time
from dotenv import load_dotenv
# Load environment variables
load_dotenv(override=True)
def chunking_by_token_size(
@@ -295,7 +301,7 @@ async def _merge_edges_then_upsert(
node_data={
"source_id": source_id,
"description": description,
"entity_type": '"UNKNOWN"',
"entity_type": "UNKNOWN",
},
)
description = await _handle_entity_relation_summary(
@@ -375,9 +381,8 @@ async def extract_entities(
continue_prompt = PROMPTS["entiti_continue_extraction"]
if_loop_prompt = PROMPTS["entiti_if_loop_extraction"]
already_processed = 0
already_entities = 0
already_relations = 0
processed_chunks = 0
total_chunks = len(ordered_chunks)
async def _user_llm_func_with_cache(
input_text: str, history_messages: list[dict[str, str]] = None
@@ -431,7 +436,7 @@ async def extract_entities(
chunk_key_dp (tuple[str, TextChunkSchema]):
("chunck-xxxxxx", {"tokens": int, "content": str, "full_doc_id": str, "chunk_order_index": int})
"""
nonlocal already_processed, already_entities, already_relations
nonlocal processed_chunks
chunk_key = chunk_key_dp[0]
chunk_dp = chunk_key_dp[1]
content = chunk_dp["content"]
@@ -488,12 +493,11 @@ async def extract_entities(
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(
if_relation
)
already_processed += 1
already_entities += len(maybe_nodes)
already_relations += len(maybe_edges)
logger.debug(
f"Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
processed_chunks += 1
entities_count = len(maybe_nodes)
relations_count = len(maybe_edges)
logger.info(
f" Chunk {processed_chunks}/{total_chunks}: extracted {entities_count} entities and {relations_count} relationships (deduplicated)"
)
return dict(maybe_nodes), dict(maybe_edges)
@@ -532,8 +536,12 @@ async def extract_entities(
logger.info("Didn't extract any relationships")
logger.info(
f"New entities or relationships extracted, entities:{all_entities_data}, relationships:{all_relationships_data}"
f"Extracted {len(all_entities_data)} entities and {len(all_relationships_data)} relationships (deduplicated)"
)
verbose_debug(
f"New entities:{all_entities_data}, relationships:{all_relationships_data}"
)
verbose_debug(f"New relationships:{all_relationships_data}")
if entity_vdb is not None:
data_for_vdb = {