chunk split retry

This commit is contained in:
童石渊
2025-01-07 16:26:12 +08:00
parent 059e3882f1
commit 6b19401dc6
3 changed files with 886 additions and 135 deletions

View File

@@ -268,7 +268,7 @@ class LightRAG:
self.llm_model_func,
hashing_kv=self.llm_response_cache
if self.llm_response_cache
and hasattr(self.llm_response_cache, "global_config")
and hasattr(self.llm_response_cache, "global_config")
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
@@ -316,7 +316,9 @@ class LightRAG:
def insert(self, string_or_strings, split_by_character=None):
loop = always_get_an_event_loop()
return loop.run_until_complete(self.ainsert(string_or_strings, split_by_character))
return loop.run_until_complete(
self.ainsert(string_or_strings, split_by_character)
)
async def ainsert(self, string_or_strings, split_by_character):
"""Insert documents with checkpoint support
@@ -357,10 +359,10 @@ class LightRAG:
# Process documents in batches
batch_size = self.addon_params.get("insert_batch_size", 10)
for i in range(0, len(new_docs), batch_size):
batch_docs = dict(list(new_docs.items())[i: i + batch_size])
batch_docs = dict(list(new_docs.items())[i : i + batch_size])
for doc_id, doc in tqdm_async(
batch_docs.items(), desc=f"Processing batch {i // batch_size + 1}"
batch_docs.items(), desc=f"Processing batch {i // batch_size + 1}"
):
try:
# Update status to processing
@@ -548,7 +550,7 @@ class LightRAG:
# Check if nodes exist in the knowledge graph
for need_insert_id in [src_id, tgt_id]:
if not (
await self.chunk_entity_relation_graph.has_node(need_insert_id)
await self.chunk_entity_relation_graph.has_node(need_insert_id)
):
await self.chunk_entity_relation_graph.upsert_node(
need_insert_id,
@@ -597,9 +599,9 @@ class LightRAG:
"src_id": dp["src_id"],
"tgt_id": dp["tgt_id"],
"content": dp["keywords"]
+ dp["src_id"]
+ dp["tgt_id"]
+ dp["description"],
+ dp["src_id"]
+ dp["tgt_id"]
+ dp["description"],
}
for dp in all_relationships_data
}
@@ -624,7 +626,7 @@ class LightRAG:
asdict(self),
hashing_kv=self.llm_response_cache
if self.llm_response_cache
and hasattr(self.llm_response_cache, "global_config")
and hasattr(self.llm_response_cache, "global_config")
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
@@ -640,7 +642,7 @@ class LightRAG:
asdict(self),
hashing_kv=self.llm_response_cache
if self.llm_response_cache
and hasattr(self.llm_response_cache, "global_config")
and hasattr(self.llm_response_cache, "global_config")
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
@@ -659,7 +661,7 @@ class LightRAG:
asdict(self),
hashing_kv=self.llm_response_cache
if self.llm_response_cache
and hasattr(self.llm_response_cache, "global_config")
and hasattr(self.llm_response_cache, "global_config")
else self.key_string_value_json_storage_cls(
namespace="llm_response_cache",
global_config=asdict(self),
@@ -900,7 +902,7 @@ class LightRAG:
dp
for dp in self.entities_vdb.client_storage["data"]
if chunk_id
in (dp.get("source_id") or "").split(GRAPH_FIELD_SEP)
in (dp.get("source_id") or "").split(GRAPH_FIELD_SEP)
]
if entities_with_chunk:
logger.error(
@@ -912,7 +914,7 @@ class LightRAG:
dp
for dp in self.relationships_vdb.client_storage["data"]
if chunk_id
in (dp.get("source_id") or "").split(GRAPH_FIELD_SEP)
in (dp.get("source_id") or "").split(GRAPH_FIELD_SEP)
]
if relations_with_chunk:
logger.error(
@@ -929,7 +931,7 @@ class LightRAG:
return asyncio.run(self.adelete_by_doc_id(doc_id))
async def get_entity_info(
self, entity_name: str, include_vector_data: bool = False
self, entity_name: str, include_vector_data: bool = False
):
"""Get detailed information of an entity
@@ -980,7 +982,7 @@ class LightRAG:
tracemalloc.stop()
async def get_relation_info(
self, src_entity: str, tgt_entity: str, include_vector_data: bool = False
self, src_entity: str, tgt_entity: str, include_vector_data: bool = False
):
"""Get detailed information of a relationship
@@ -1022,7 +1024,7 @@ class LightRAG:
return result
def get_relation_info_sync(
self, src_entity: str, tgt_entity: str, include_vector_data: bool = False
self, src_entity: str, tgt_entity: str, include_vector_data: bool = False
):
"""Synchronous version of getting relationship information

View File

@@ -34,7 +34,11 @@ import time
def chunking_by_token_size(
content: str, split_by_character=None, overlap_token_size=128, max_token_size=1024, tiktoken_model="gpt-4o"
content: str,
split_by_character=None,
overlap_token_size=128,
max_token_size=1024,
tiktoken_model="gpt-4o",
):
tokens = encode_string_by_tiktoken(content, model_name=tiktoken_model)
results = []
@@ -44,11 +48,16 @@ def chunking_by_token_size(
for chunk in raw_chunks:
_tokens = encode_string_by_tiktoken(chunk, model_name=tiktoken_model)
if len(_tokens) > max_token_size:
for start in range(0, len(_tokens), max_token_size - overlap_token_size):
for start in range(
0, len(_tokens), max_token_size - overlap_token_size
):
chunk_content = decode_tokens_by_tiktoken(
_tokens[start: start + max_token_size], model_name=tiktoken_model
_tokens[start : start + max_token_size],
model_name=tiktoken_model,
)
new_chunks.append(
(min(max_token_size, len(_tokens) - start), chunk_content)
)
new_chunks.append((min(max_token_size, len(_tokens) - start), chunk_content))
else:
new_chunks.append((len(_tokens), chunk))
for index, (_len, chunk) in enumerate(new_chunks):
@@ -61,10 +70,10 @@ def chunking_by_token_size(
)
else:
for index, start in enumerate(
range(0, len(tokens), max_token_size - overlap_token_size)
range(0, len(tokens), max_token_size - overlap_token_size)
):
chunk_content = decode_tokens_by_tiktoken(
tokens[start: start + max_token_size], model_name=tiktoken_model
tokens[start : start + max_token_size], model_name=tiktoken_model
)
results.append(
{
@@ -77,9 +86,9 @@ def chunking_by_token_size(
async def _handle_entity_relation_summary(
entity_or_relation_name: str,
description: str,
global_config: dict,
entity_or_relation_name: str,
description: str,
global_config: dict,
) -> str:
use_llm_func: callable = global_config["llm_model_func"]
llm_max_tokens = global_config["llm_model_max_token_size"]
@@ -108,8 +117,8 @@ async def _handle_entity_relation_summary(
async def _handle_single_entity_extraction(
record_attributes: list[str],
chunk_key: str,
record_attributes: list[str],
chunk_key: str,
):
if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
return None
@@ -129,8 +138,8 @@ async def _handle_single_entity_extraction(
async def _handle_single_relationship_extraction(
record_attributes: list[str],
chunk_key: str,
record_attributes: list[str],
chunk_key: str,
):
if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
return None
@@ -156,10 +165,10 @@ async def _handle_single_relationship_extraction(
async def _merge_nodes_then_upsert(
entity_name: str,
nodes_data: list[dict],
knowledge_graph_inst: BaseGraphStorage,
global_config: dict,
entity_name: str,
nodes_data: list[dict],
knowledge_graph_inst: BaseGraphStorage,
global_config: dict,
):
already_entity_types = []
already_source_ids = []
@@ -203,11 +212,11 @@ async def _merge_nodes_then_upsert(
async def _merge_edges_then_upsert(
src_id: str,
tgt_id: str,
edges_data: list[dict],
knowledge_graph_inst: BaseGraphStorage,
global_config: dict,
src_id: str,
tgt_id: str,
edges_data: list[dict],
knowledge_graph_inst: BaseGraphStorage,
global_config: dict,
):
already_weights = []
already_source_ids = []
@@ -270,12 +279,12 @@ async def _merge_edges_then_upsert(
async def extract_entities(
chunks: dict[str, TextChunkSchema],
knowledge_graph_inst: BaseGraphStorage,
entity_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
global_config: dict,
llm_response_cache: BaseKVStorage = None,
chunks: dict[str, TextChunkSchema],
knowledge_graph_inst: BaseGraphStorage,
entity_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
global_config: dict,
llm_response_cache: BaseKVStorage = None,
) -> Union[BaseGraphStorage, None]:
use_llm_func: callable = global_config["llm_model_func"]
entity_extract_max_gleaning = global_config["entity_extract_max_gleaning"]
@@ -327,13 +336,13 @@ async def extract_entities(
already_relations = 0
async def _user_llm_func_with_cache(
input_text: str, history_messages: list[dict[str, str]] = None
input_text: str, history_messages: list[dict[str, str]] = None
) -> str:
if enable_llm_cache_for_entity_extract and llm_response_cache:
need_to_restore = False
if (
global_config["embedding_cache_config"]
and global_config["embedding_cache_config"]["enabled"]
global_config["embedding_cache_config"]
and global_config["embedding_cache_config"]["enabled"]
):
new_config = global_config.copy()
new_config["embedding_cache_config"] = None
@@ -435,7 +444,7 @@ async def extract_entities(
already_relations += len(maybe_edges)
now_ticks = PROMPTS["process_tickers"][
already_processed % len(PROMPTS["process_tickers"])
]
]
print(
f"{now_ticks} Processed {already_processed} chunks, {already_entities} entities(duplicated), {already_relations} relations(duplicated)\r",
end="",
@@ -445,10 +454,10 @@ async def extract_entities(
results = []
for result in tqdm_async(
asyncio.as_completed([_process_single_content(c) for c in ordered_chunks]),
total=len(ordered_chunks),
desc="Extracting entities from chunks",
unit="chunk",
asyncio.as_completed([_process_single_content(c) for c in ordered_chunks]),
total=len(ordered_chunks),
desc="Extracting entities from chunks",
unit="chunk",
):
results.append(await result)
@@ -462,32 +471,32 @@ async def extract_entities(
logger.info("Inserting entities into storage...")
all_entities_data = []
for result in tqdm_async(
asyncio.as_completed(
[
_merge_nodes_then_upsert(k, v, knowledge_graph_inst, global_config)
for k, v in maybe_nodes.items()
]
),
total=len(maybe_nodes),
desc="Inserting entities",
unit="entity",
asyncio.as_completed(
[
_merge_nodes_then_upsert(k, v, knowledge_graph_inst, global_config)
for k, v in maybe_nodes.items()
]
),
total=len(maybe_nodes),
desc="Inserting entities",
unit="entity",
):
all_entities_data.append(await result)
logger.info("Inserting relationships into storage...")
all_relationships_data = []
for result in tqdm_async(
asyncio.as_completed(
[
_merge_edges_then_upsert(
k[0], k[1], v, knowledge_graph_inst, global_config
)
for k, v in maybe_edges.items()
]
),
total=len(maybe_edges),
desc="Inserting relationships",
unit="relationship",
asyncio.as_completed(
[
_merge_edges_then_upsert(
k[0], k[1], v, knowledge_graph_inst, global_config
)
for k, v in maybe_edges.items()
]
),
total=len(maybe_edges),
desc="Inserting relationships",
unit="relationship",
):
all_relationships_data.append(await result)
@@ -518,9 +527,9 @@ async def extract_entities(
"src_id": dp["src_id"],
"tgt_id": dp["tgt_id"],
"content": dp["keywords"]
+ dp["src_id"]
+ dp["tgt_id"]
+ dp["description"],
+ dp["src_id"]
+ dp["tgt_id"]
+ dp["description"],
"metadata": {
"created_at": dp.get("metadata", {}).get("created_at", time.time())
},
@@ -533,14 +542,14 @@ async def extract_entities(
async def kg_query(
query,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
global_config: dict,
hashing_kv: BaseKVStorage = None,
query,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
global_config: dict,
hashing_kv: BaseKVStorage = None,
) -> str:
# Handle cache
use_model_func = global_config["llm_model_func"]
@@ -660,12 +669,12 @@ async def kg_query(
async def _build_query_context(
query: list,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
query: list,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
):
# ll_entities_context, ll_relations_context, ll_text_units_context = "", "", ""
# hl_entities_context, hl_relations_context, hl_text_units_context = "", "", ""
@@ -718,9 +727,9 @@ async def _build_query_context(
query_param,
)
if (
hl_entities_context == ""
and hl_relations_context == ""
and hl_text_units_context == ""
hl_entities_context == ""
and hl_relations_context == ""
and hl_text_units_context == ""
):
logger.warn("No high level context found. Switching to local mode.")
query_param.mode = "local"
@@ -759,11 +768,11 @@ async def _build_query_context(
async def _get_node_data(
query,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
query,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
):
# get similar entities
results = await entities_vdb.query(query, top_k=query_param.top_k)
@@ -850,10 +859,10 @@ async def _get_node_data(
async def _find_most_related_text_unit_from_entities(
node_datas: list[dict],
query_param: QueryParam,
text_chunks_db: BaseKVStorage[TextChunkSchema],
knowledge_graph_inst: BaseGraphStorage,
node_datas: list[dict],
query_param: QueryParam,
text_chunks_db: BaseKVStorage[TextChunkSchema],
knowledge_graph_inst: BaseGraphStorage,
):
text_units = [
split_string_by_multi_markers(dp["source_id"], [GRAPH_FIELD_SEP])
@@ -893,8 +902,8 @@ async def _find_most_related_text_unit_from_entities(
if this_edges:
for e in this_edges:
if (
e[1] in all_one_hop_text_units_lookup
and c_id in all_one_hop_text_units_lookup[e[1]]
e[1] in all_one_hop_text_units_lookup
and c_id in all_one_hop_text_units_lookup[e[1]]
):
all_text_units_lookup[c_id]["relation_counts"] += 1
@@ -924,9 +933,9 @@ async def _find_most_related_text_unit_from_entities(
async def _find_most_related_edges_from_entities(
node_datas: list[dict],
query_param: QueryParam,
knowledge_graph_inst: BaseGraphStorage,
node_datas: list[dict],
query_param: QueryParam,
knowledge_graph_inst: BaseGraphStorage,
):
all_related_edges = await asyncio.gather(
*[knowledge_graph_inst.get_node_edges(dp["entity_name"]) for dp in node_datas]
@@ -964,11 +973,11 @@ async def _find_most_related_edges_from_entities(
async def _get_edge_data(
keywords,
knowledge_graph_inst: BaseGraphStorage,
relationships_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
keywords,
knowledge_graph_inst: BaseGraphStorage,
relationships_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
):
results = await relationships_vdb.query(keywords, top_k=query_param.top_k)
@@ -1066,9 +1075,9 @@ async def _get_edge_data(
async def _find_most_related_entities_from_relationships(
edge_datas: list[dict],
query_param: QueryParam,
knowledge_graph_inst: BaseGraphStorage,
edge_datas: list[dict],
query_param: QueryParam,
knowledge_graph_inst: BaseGraphStorage,
):
entity_names = []
seen = set()
@@ -1103,10 +1112,10 @@ async def _find_most_related_entities_from_relationships(
async def _find_related_text_unit_from_relationships(
edge_datas: list[dict],
query_param: QueryParam,
text_chunks_db: BaseKVStorage[TextChunkSchema],
knowledge_graph_inst: BaseGraphStorage,
edge_datas: list[dict],
query_param: QueryParam,
text_chunks_db: BaseKVStorage[TextChunkSchema],
knowledge_graph_inst: BaseGraphStorage,
):
text_units = [
split_string_by_multi_markers(dp["source_id"], [GRAPH_FIELD_SEP])
@@ -1172,12 +1181,12 @@ def combine_contexts(entities, relationships, sources):
async def naive_query(
query,
chunks_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
global_config: dict,
hashing_kv: BaseKVStorage = None,
query,
chunks_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
global_config: dict,
hashing_kv: BaseKVStorage = None,
):
# Handle cache
use_model_func = global_config["llm_model_func"]
@@ -1235,7 +1244,7 @@ async def naive_query(
if len(response) > len(sys_prompt):
response = (
response[len(sys_prompt):]
response[len(sys_prompt) :]
.replace(sys_prompt, "")
.replace("user", "")
.replace("model", "")
@@ -1263,15 +1272,15 @@ async def naive_query(
async def mix_kg_vector_query(
query,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
chunks_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
global_config: dict,
hashing_kv: BaseKVStorage = None,
query,
knowledge_graph_inst: BaseGraphStorage,
entities_vdb: BaseVectorStorage,
relationships_vdb: BaseVectorStorage,
chunks_vdb: BaseVectorStorage,
text_chunks_db: BaseKVStorage[TextChunkSchema],
query_param: QueryParam,
global_config: dict,
hashing_kv: BaseKVStorage = None,
) -> str:
"""
Hybrid retrieval implementation combining knowledge graph and vector search.
@@ -1296,7 +1305,7 @@ async def mix_kg_vector_query(
# Reuse keyword extraction logic from kg_query
example_number = global_config["addon_params"].get("example_number", None)
if example_number and example_number < len(
PROMPTS["keywords_extraction_examples"]
PROMPTS["keywords_extraction_examples"]
):
examples = "\n".join(
PROMPTS["keywords_extraction_examples"][: int(example_number)]

740
test.ipynb Normal file
View File

@@ -0,0 +1,740 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "4b5690db12e34685",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:38:34.174205Z",
"start_time": "2025-01-07T05:38:29.978194Z"
}
},
"outputs": [],
"source": [
"import os\n",
"import logging\n",
"import numpy as np\n",
"from lightrag import LightRAG, QueryParam\n",
"from lightrag.llm import openai_complete_if_cache, openai_embedding\n",
"from lightrag.utils import EmbeddingFunc\n",
"import nest_asyncio"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "8c8ee7c061bf9159",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:38:37.440083Z",
"start_time": "2025-01-07T05:38:37.437666Z"
}
},
"outputs": [],
"source": [
"nest_asyncio.apply()\n",
"WORKING_DIR = \"../llm_rag/paper_db/R000088_test2\"\n",
"logging.basicConfig(format=\"%(levelname)s:%(message)s\", level=logging.INFO)\n",
"if not os.path.exists(WORKING_DIR):\n",
" os.mkdir(WORKING_DIR)\n",
"os.environ[\"doubao_api\"] = \"6b890250-0cf6-4eb1-aa82-9c9d711398a7\""
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "a5009d16e0851dca",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:38:42.594315Z",
"start_time": "2025-01-07T05:38:42.590800Z"
}
},
"outputs": [],
"source": [
"async def llm_model_func(\n",
" prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs\n",
") -> str:\n",
" return await openai_complete_if_cache(\n",
" \"ep-20241218114828-2tlww\",\n",
" prompt,\n",
" system_prompt=system_prompt,\n",
" history_messages=history_messages,\n",
" api_key=os.getenv(\"doubao_api\"),\n",
" base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n",
" **kwargs,\n",
" )\n",
"\n",
"\n",
"async def embedding_func(texts: list[str]) -> np.ndarray:\n",
" return await openai_embedding(\n",
" texts,\n",
" model=\"ep-20241231173413-pgjmk\",\n",
" api_key=os.getenv(\"doubao_api\"),\n",
" base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "397fcad24ce4d0ed",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:38:44.016901Z",
"start_time": "2025-01-07T05:38:44.006291Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:lightrag:Logger initialized for working directory: ../llm_rag/paper_db/R000088_test2\n",
"INFO:lightrag:Load KV llm_response_cache with 0 data\n",
"INFO:lightrag:Load KV full_docs with 0 data\n",
"INFO:lightrag:Load KV text_chunks with 0 data\n",
"INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../llm_rag/paper_db/R000088_test2/vdb_entities.json'} 0 data\n",
"INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../llm_rag/paper_db/R000088_test2/vdb_relationships.json'} 0 data\n",
"INFO:nano-vectordb:Init {'embedding_dim': 4096, 'metric': 'cosine', 'storage_file': '../llm_rag/paper_db/R000088_test2/vdb_chunks.json'} 0 data\n",
"INFO:lightrag:Loaded document status storage with 0 records\n"
]
}
],
"source": [
"rag = LightRAG(\n",
" working_dir=WORKING_DIR,\n",
" llm_model_func=llm_model_func,\n",
" embedding_func=EmbeddingFunc(\n",
" embedding_dim=4096, max_token_size=8192, func=embedding_func\n",
" ),\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "1dc3603677f7484d",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:38:47.509111Z",
"start_time": "2025-01-07T05:38:47.501997Z"
}
},
"outputs": [],
"source": [
"with open(\n",
" \"../llm_rag/example/R000088/auto/R000088_full_txt.md\", \"r\", encoding=\"utf-8\"\n",
") as f:\n",
" content = f.read()\n",
"\n",
"\n",
"async def embedding_func(texts: list[str]) -> np.ndarray:\n",
" return await openai_embedding(\n",
" texts,\n",
" model=\"ep-20241231173413-pgjmk\",\n",
" api_key=os.getenv(\"doubao_api\"),\n",
" base_url=\"https://ark.cn-beijing.volces.com/api/v3\",\n",
" )\n",
"\n",
"\n",
"async def get_embedding_dim():\n",
" test_text = [\"This is a test sentence.\"]\n",
" embedding = await embedding_func(test_text)\n",
" embedding_dim = embedding.shape[1]\n",
" return embedding_dim"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "6844202606acfbe5",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:38:50.666764Z",
"start_time": "2025-01-07T05:38:50.247712Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n"
]
}
],
"source": [
"embedding_dimension = await get_embedding_dim()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d6273839d9681403",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:42:33.085507Z",
"start_time": "2025-01-07T05:38:56.789348Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:lightrag:Processing 1 new unique documents\n",
"Processing batch 1: 0%| | 0/1 [00:00<?, ?it/s]INFO:lightrag:Inserting 22 vectors to chunks\n",
"\n",
"Generating embeddings: 0%| | 0/1 [00:00<?, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 100%|██████████| 1/1 [00:03<00:00, 3.85s/batch]\u001b[A\n",
"\n",
"Extracting entities from chunks: 0%| | 0/22 [00:00<?, ?chunk/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠙ Processed 1 chunks, 7 entities(duplicated), 6 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 5%|▍ | 1/22 [00:23<08:21, 23.90s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠹ Processed 2 chunks, 12 entities(duplicated), 15 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 9%|▉ | 2/22 [00:26<03:50, 11.51s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠸ Processed 3 chunks, 20 entities(duplicated), 22 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 14%|█▎ | 3/22 [00:34<03:08, 9.93s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠼ Processed 4 chunks, 30 entities(duplicated), 30 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 18%|█▊ | 4/22 [00:37<02:09, 7.21s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠴ Processed 5 chunks, 39 entities(duplicated), 39 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 23%|██▎ | 5/22 [00:38<01:19, 4.70s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠦ Processed 6 chunks, 39 entities(duplicated), 39 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 27%|██▋ | 6/22 [00:38<00:53, 3.32s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠧ Processed 7 chunks, 47 entities(duplicated), 50 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 32%|███▏ | 7/22 [00:39<00:39, 2.65s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠇ Processed 8 chunks, 56 entities(duplicated), 58 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 36%|███▋ | 8/22 [00:40<00:29, 2.13s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠏ Processed 9 chunks, 63 entities(duplicated), 69 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 41%|████ | 9/22 [00:47<00:43, 3.38s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠋ Processed 10 chunks, 81 entities(duplicated), 81 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 45%|████▌ | 10/22 [00:48<00:32, 2.73s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠙ Processed 11 chunks, 92 entities(duplicated), 89 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 50%|█████ | 11/22 [01:01<01:05, 5.99s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠹ Processed 12 chunks, 107 entities(duplicated), 107 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 55%|█████▍ | 12/22 [01:10<01:09, 6.94s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠸ Processed 13 chunks, 127 entities(duplicated), 126 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 59%|█████▉ | 13/22 [01:16<00:59, 6.59s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠼ Processed 14 chunks, 151 entities(duplicated), 137 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 64%|██████▎ | 14/22 [01:16<00:37, 4.68s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠴ Processed 15 chunks, 161 entities(duplicated), 144 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 68%|██████▊ | 15/22 [01:17<00:23, 3.31s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠦ Processed 16 chunks, 176 entities(duplicated), 154 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 73%|███████▎ | 16/22 [01:19<00:18, 3.04s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠧ Processed 17 chunks, 189 entities(duplicated), 162 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 77%|███████▋ | 17/22 [01:21<00:13, 2.80s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠇ Processed 18 chunks, 207 entities(duplicated), 186 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 82%|████████▏ | 18/22 [01:38<00:28, 7.06s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠏ Processed 19 chunks, 222 entities(duplicated), 200 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 86%|████████▋ | 19/22 [01:44<00:19, 6.61s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠋ Processed 20 chunks, 310 entities(duplicated), 219 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 91%|█████████ | 20/22 [02:12<00:26, 13.19s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠙ Processed 21 chunks, 345 entities(duplicated), 263 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 95%|█████████▌| 21/22 [02:32<00:15, 15.15s/chunk]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"⠹ Processed 22 chunks, 417 entities(duplicated), 285 relations(duplicated)\r"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"Extracting entities from chunks: 100%|██████████| 22/22 [03:21<00:00, 9.18s/chunk]\u001b[A\n",
"INFO:lightrag:Inserting entities into storage...\n",
"\n",
"Inserting entities: 100%|██████████| 327/327 [00:00<00:00, 13446.31entity/s]\n",
"INFO:lightrag:Inserting relationships into storage...\n",
"\n",
"Inserting relationships: 100%|██████████| 272/272 [00:00<00:00, 16740.29relationship/s]\n",
"INFO:lightrag:Inserting 327 vectors to entities\n",
"\n",
"Generating embeddings: 0%| | 0/11 [00:00<?, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 9%|▉ | 1/11 [00:00<00:09, 1.02batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 18%|█▊ | 2/11 [00:02<00:09, 1.07s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 27%|██▋ | 3/11 [00:02<00:06, 1.33batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 36%|███▋ | 4/11 [00:02<00:04, 1.67batch/s]\u001b[A\n",
"Generating embeddings: 45%|████▌ | 5/11 [00:03<00:03, 1.93batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 55%|█████▍ | 6/11 [00:03<00:02, 2.15batch/s]\u001b[A\n",
"Generating embeddings: 64%|██████▎ | 7/11 [00:03<00:01, 2.33batch/s]\u001b[A\n",
"Generating embeddings: 73%|███████▎ | 8/11 [00:04<00:01, 2.46batch/s]\u001b[A\n",
"Generating embeddings: 82%|████████▏ | 9/11 [00:04<00:00, 2.55batch/s]\u001b[A\n",
"Generating embeddings: 91%|█████████ | 10/11 [00:05<00:00, 2.64batch/s]\u001b[A\n",
"Generating embeddings: 100%|██████████| 11/11 [00:05<00:00, 2.04batch/s]\u001b[A\n",
"INFO:lightrag:Inserting 272 vectors to relationships\n",
"\n",
"Generating embeddings: 0%| | 0/9 [00:00<?, ?batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 11%|█ | 1/9 [00:01<00:11, 1.39s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 22%|██▏ | 2/9 [00:02<00:07, 1.01s/batch]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 33%|███▎ | 3/9 [00:02<00:04, 1.40batch/s]\u001b[AINFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"\n",
"Generating embeddings: 44%|████▍ | 4/9 [00:02<00:02, 1.74batch/s]\u001b[A\n",
"Generating embeddings: 56%|█████▌ | 5/9 [00:03<00:01, 2.01batch/s]\u001b[A\n",
"Generating embeddings: 67%|██████▋ | 6/9 [00:03<00:01, 2.23batch/s]\u001b[A\n",
"Generating embeddings: 78%|███████▊ | 7/9 [00:03<00:00, 2.39batch/s]\u001b[A\n",
"Generating embeddings: 89%|████████▉ | 8/9 [00:04<00:00, 2.52batch/s]\u001b[A\n",
"Generating embeddings: 100%|██████████| 9/9 [00:04<00:00, 1.93batch/s]\u001b[A\n",
"INFO:lightrag:Writing graph with 331 nodes, 272 edges\n",
"Processing batch 1: 100%|██████████| 1/1 [03:36<00:00, 216.27s/it]\n"
]
}
],
"source": [
"# rag.insert(content)\n",
"rag.insert(content, split_by_character=\"\\n#\")"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "c4f9ae517151a01d",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:42:50.044809Z",
"start_time": "2025-01-07T05:42:50.041256Z"
}
},
"outputs": [],
"source": [
"prompt1 = \"\"\"\n",
"你是一名经验丰富的论文分析科学家,你的任务是对一篇英文学术研究论文进行关键信息提取并深入分析。\n",
"\n",
"请按照以下步骤进行分析:\n",
"1. 对于论文的分析对象相关问题:\n",
" - 仔细查找论文中的研究队列相关信息,确定分析对象来自哪些研究队列。\n",
" - 查看如果来自多个队列,文中是单独分析还是联合分析。\n",
" - 找出这些队列的名称。\n",
" - 确定这些队列开展的国家有哪些(注意:“澳门”记为“中国澳门”,“香港”记为“中国香港”,“台湾”记为“中国台湾”,其余采用国家回答)。\n",
" - 明确队列研究对象的性别分布(“男性”、“女性”或“全体”)。\n",
" - 查找队列收集结束时,研究对象年龄分布(平均值/中位值、标准差或范围若信息缺失则根据年龄推理规则进行推理当论文只提供了队列开展时对象的年龄应根据队列结束时间推算最终年龄范围。例如1989建立队列时年龄为25 - 42岁随访至2011年结束则推算年龄范围为47 - 64岁。\n",
" - 确定队列研究时间线,即哪一年开始收集信息/建立队列哪一年结束若信息缺失则根据队列时间线推理规则进行推理如论文只提供了建立队列时间为1995进行了10年的随访则推算队列结束时间为2005年。\n",
" - 找出队列结束时实际参与研究人数是多少。\n",
"首先在<分析>标签中,针对每个问题详细分析你的思考过程。然后在<回答>标签中给出所有问题的最终答案。\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "7a6491385b050095",
"metadata": {
"ExecuteTime": {
"end_time": "2025-01-07T05:43:24.751628Z",
"start_time": "2025-01-07T05:42:50.865679Z"
}
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:lightrag:kw_prompt result:\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"high_level_keywords\": [\"英文学术研究论文分析\", \"关键信息提取\", \"深入分析\"],\n",
" \"low_level_keywords\": [\"研究队列\", \"队列名称\", \"队列开展国家\", \"性别分布\", \"年龄分布\", \"队列研究时间线\", \"实际参与研究人数\"]\n",
"}\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:lightrag:Local query uses 60 entites, 38 relations, 6 text units\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/embeddings \"HTTP/1.1 200 OK\"\n",
"INFO:lightrag:Global query uses 72 entites, 60 relations, 4 text units\n",
"INFO:httpx:HTTP Request: POST https://ark.cn-beijing.volces.com/api/v3/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"<分析>\n",
"- **分析对象来自哪些研究队列及是单独分析还是联合分析**\n",
" 通过查找论文内容发现文中提到“This is a combined analysis of data from 2 randomized, double-blind, placebo-controlled clinical trials (Norwegian Vitamin [NORVIT] trial15 and Western Norway B Vitamin Intervention Trial [WENBIT]16)”明确是对两个队列的数据进行联合分析队列名称分别为“Norwegian Vitamin (NORVIT) trial”和“Western Norway B Vitamin Intervention Trial (WENBIT)”。\n",
"- **队列开展的国家**\n",
" 文中多次提及研究在挪威进行如“combined analyses and extended follow-up of 2 vitamin B intervention trials among patients with ischemic heart disease in Norway”所以确定研究开展的国家是挪威。\n",
"- **队列研究对象的性别分布**\n",
" 从“Mean (SD) age was 62.3 (11.0) years and 23.5% of participants were women”可知研究对象包含男性和女性即全体。\n",
"- **队列收集结束时研究对象年龄分布**\n",
" 已知“Mean (SD) age was 62.3 (11.0) years”是基线时年龄信息“Median (interquartile range) duration of extended follow-up through December 31, 2007, was 78 (61 - 90) months”由于随访的中位时间是78个月约6.5年所以可推算队列收集结束时研究对象年龄均值约为62.3 + 6.5 = 68.8岁标准差仍为11.0年)。\n",
"- **队列研究时间线**\n",
" 根据“2 randomized, double-blind, placebo-controlled clinical trials (Norwegian Vitamin [NORVIT] trial15 and Western Norway B Vitamin Intervention Trial [WENBIT]16) conducted between 1998 and 2005, and an observational posttrial follow-up through December 31, 2007”可知队列开始收集信息时间为1998年结束时间为2007年12月31日。\n",
"- **队列结束时实际参与研究人数**\n",
" 由“A total of 6837 individuals were included in the combined analyses, of whom 6261 (91.6%) participated in posttrial follow-up”可知队列结束时实际参与研究人数为6261人。\n",
"</分析>\n",
"\n",
"<回答>\n",
"- 分析对象来自“Norwegian Vitamin (NORVIT) trial”和“Western Norway B Vitamin Intervention Trial (WENBIT)”两个研究队列,文中是对这两个队列的数据进行联合分析。\n",
"- 队列开展的国家是挪威。\n",
"- 队列研究对象的性别分布为全体。\n",
"- 队列收集结束时研究对象年龄分布均值约为68.8岁标准差为11.0年。\n",
"- 队列研究时间线为1998年开始收集信息/建立队列2007年12月31日结束。\n",
"- 队列结束时实际参与研究人数是6261人。\n"
]
}
],
"source": [
"print(rag.query(prompt1, param=QueryParam(mode=\"hybrid\")))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fef9d06983da47af",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 5
}