remove tqdm and cleaned readme and ollama
This commit is contained in:
10
README.md
10
README.md
@@ -344,16 +344,6 @@ rag = LightRAG(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
#### Fully functional example
|
|
||||||
|
|
||||||
There fully functional example `examples/lightrag_ollama_demo.py` that utilizes `gemma2:2b` model, runs only 4 requests in parallel and set context size to 32k.
|
|
||||||
|
|
||||||
#### Using "Thinking" Models (e.g., DeepSeek)
|
|
||||||
|
|
||||||
To return only the model's response, you can pass `reasoning_tag` in `llm_model_kwargs`.
|
|
||||||
|
|
||||||
For example, for DeepSeek models, `reasoning_tag` should be set to `think`.
|
|
||||||
|
|
||||||
#### Low RAM GPUs
|
#### Low RAM GPUs
|
||||||
|
|
||||||
In order to run this experiment on low RAM GPU you should select small model and tune context window (increasing context increase memory consumption). For example, running this ollama example on repurposed mining GPU with 6Gb of RAM required to set context size to 26k while using `gemma2:2b`. It was able to find 197 entities and 19 relations on `book.txt`.
|
In order to run this experiment on low RAM GPU you should select small model and tune context window (increasing context increase memory consumption). For example, running this ollama example on repurposed mining GPU with 6Gb of RAM required to set context size to 26k while using `gemma2:2b`. It was able to find 197 entities and 19 relations on `book.txt`.
|
||||||
|
@@ -7,5 +7,4 @@ python-multipart
|
|||||||
tenacity
|
tenacity
|
||||||
tiktoken
|
tiktoken
|
||||||
torch
|
torch
|
||||||
tqdm
|
|
||||||
uvicorn
|
uvicorn
|
||||||
|
@@ -22,7 +22,6 @@ if not pm.is_installed("faiss"):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
import faiss
|
import faiss
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
"`faiss` library is not installed. Please install it via pip: `pip install faiss`."
|
"`faiss` library is not installed. Please install it via pip: `pip install faiss`."
|
||||||
@@ -109,16 +108,7 @@ class FaissVectorDBStorage(BaseVectorStorage):
|
|||||||
for i in range(0, len(contents), self._max_batch_size)
|
for i in range(0, len(contents), self._max_batch_size)
|
||||||
]
|
]
|
||||||
|
|
||||||
pbar = tqdm_async(
|
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
||||||
total=len(batches), desc="Generating embeddings", unit="batch"
|
|
||||||
)
|
|
||||||
|
|
||||||
async def wrapped_task(batch):
|
|
||||||
result = await self.embedding_func(batch)
|
|
||||||
pbar.update(1)
|
|
||||||
return result
|
|
||||||
|
|
||||||
embedding_tasks = [wrapped_task(batch) for batch in batches]
|
|
||||||
embeddings_list = await asyncio.gather(*embedding_tasks)
|
embeddings_list = await asyncio.gather(*embedding_tasks)
|
||||||
|
|
||||||
# Flatten the list of arrays
|
# Flatten the list of arrays
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import os
|
import os
|
||||||
from typing import Any, final
|
from typing import Any, final
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from lightrag.utils import logger
|
from lightrag.utils import logger
|
||||||
@@ -94,15 +93,7 @@ class MilvusVectorDBStorage(BaseVectorStorage):
|
|||||||
for i in range(0, len(contents), self._max_batch_size)
|
for i in range(0, len(contents), self._max_batch_size)
|
||||||
]
|
]
|
||||||
|
|
||||||
async def wrapped_task(batch):
|
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
||||||
result = await self.embedding_func(batch)
|
|
||||||
pbar.update(1)
|
|
||||||
return result
|
|
||||||
|
|
||||||
embedding_tasks = [wrapped_task(batch) for batch in batches]
|
|
||||||
pbar = tqdm_async(
|
|
||||||
total=len(embedding_tasks), desc="Generating embeddings", unit="batch"
|
|
||||||
)
|
|
||||||
embeddings_list = await asyncio.gather(*embedding_tasks)
|
embeddings_list = await asyncio.gather(*embedding_tasks)
|
||||||
|
|
||||||
embeddings = np.concatenate(embeddings_list)
|
embeddings = np.concatenate(embeddings_list)
|
||||||
|
@@ -2,7 +2,6 @@ import os
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import configparser
|
import configparser
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from typing import Any, List, Union, final
|
from typing import Any, List, Union, final
|
||||||
@@ -854,17 +853,8 @@ class MongoVectorDBStorage(BaseVectorStorage):
|
|||||||
for i in range(0, len(contents), self._max_batch_size)
|
for i in range(0, len(contents), self._max_batch_size)
|
||||||
]
|
]
|
||||||
|
|
||||||
async def wrapped_task(batch):
|
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
||||||
result = await self.embedding_func(batch)
|
|
||||||
pbar.update(1)
|
|
||||||
return result
|
|
||||||
|
|
||||||
embedding_tasks = [wrapped_task(batch) for batch in batches]
|
|
||||||
pbar = tqdm_async(
|
|
||||||
total=len(embedding_tasks), desc="Generating embeddings", unit="batch"
|
|
||||||
)
|
|
||||||
embeddings_list = await asyncio.gather(*embedding_tasks)
|
embeddings_list = await asyncio.gather(*embedding_tasks)
|
||||||
|
|
||||||
embeddings = np.concatenate(embeddings_list)
|
embeddings = np.concatenate(embeddings_list)
|
||||||
for i, d in enumerate(list_data):
|
for i, d in enumerate(list_data):
|
||||||
d["vector"] = np.array(embeddings[i], dtype=np.float32).tolist()
|
d["vector"] = np.array(embeddings[i], dtype=np.float32).tolist()
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import os
|
import os
|
||||||
from typing import Any, final
|
from typing import Any, final
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
@@ -71,15 +70,7 @@ class NanoVectorDBStorage(BaseVectorStorage):
|
|||||||
for i in range(0, len(contents), self._max_batch_size)
|
for i in range(0, len(contents), self._max_batch_size)
|
||||||
]
|
]
|
||||||
|
|
||||||
async def wrapped_task(batch):
|
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
||||||
result = await self.embedding_func(batch)
|
|
||||||
pbar.update(1)
|
|
||||||
return result
|
|
||||||
|
|
||||||
embedding_tasks = [wrapped_task(batch) for batch in batches]
|
|
||||||
pbar = tqdm_async(
|
|
||||||
total=len(embedding_tasks), desc="Generating embeddings", unit="batch"
|
|
||||||
)
|
|
||||||
embeddings_list = await asyncio.gather(*embedding_tasks)
|
embeddings_list = await asyncio.gather(*embedding_tasks)
|
||||||
|
|
||||||
embeddings = np.concatenate(embeddings_list)
|
embeddings = np.concatenate(embeddings_list)
|
||||||
|
@@ -41,7 +41,6 @@ if not pm.is_installed("asyncpg"):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
import asyncpg
|
import asyncpg
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
|
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
@@ -380,15 +379,7 @@ class PGVectorStorage(BaseVectorStorage):
|
|||||||
for i in range(0, len(contents), self._max_batch_size)
|
for i in range(0, len(contents), self._max_batch_size)
|
||||||
]
|
]
|
||||||
|
|
||||||
async def wrapped_task(batch):
|
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
||||||
result = await self.embedding_func(batch)
|
|
||||||
pbar.update(1)
|
|
||||||
return result
|
|
||||||
|
|
||||||
embedding_tasks = [wrapped_task(batch) for batch in batches]
|
|
||||||
pbar = tqdm_async(
|
|
||||||
total=len(embedding_tasks), desc="Generating embeddings", unit="batch"
|
|
||||||
)
|
|
||||||
embeddings_list = await asyncio.gather(*embedding_tasks)
|
embeddings_list = await asyncio.gather(*embedding_tasks)
|
||||||
|
|
||||||
embeddings = np.concatenate(embeddings_list)
|
embeddings = np.concatenate(embeddings_list)
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import os
|
import os
|
||||||
from typing import Any, final
|
from typing import Any, final
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import hashlib
|
import hashlib
|
||||||
@@ -110,15 +109,7 @@ class QdrantVectorDBStorage(BaseVectorStorage):
|
|||||||
for i in range(0, len(contents), self._max_batch_size)
|
for i in range(0, len(contents), self._max_batch_size)
|
||||||
]
|
]
|
||||||
|
|
||||||
async def wrapped_task(batch):
|
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
||||||
result = await self.embedding_func(batch)
|
|
||||||
pbar.update(1)
|
|
||||||
return result
|
|
||||||
|
|
||||||
embedding_tasks = [wrapped_task(batch) for batch in batches]
|
|
||||||
pbar = tqdm_async(
|
|
||||||
total=len(embedding_tasks), desc="Generating embeddings", unit="batch"
|
|
||||||
)
|
|
||||||
embeddings_list = await asyncio.gather(*embedding_tasks)
|
embeddings_list = await asyncio.gather(*embedding_tasks)
|
||||||
|
|
||||||
embeddings = np.concatenate(embeddings_list)
|
embeddings = np.concatenate(embeddings_list)
|
||||||
|
@@ -1,6 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
from typing import Any, final
|
from typing import Any, final
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
import pipmaster as pm
|
import pipmaster as pm
|
||||||
import configparser
|
import configparser
|
||||||
@@ -51,7 +50,8 @@ class RedisKVStorage(BaseKVStorage):
|
|||||||
|
|
||||||
async def upsert(self, data: dict[str, dict[str, Any]]) -> None:
|
async def upsert(self, data: dict[str, dict[str, Any]]) -> None:
|
||||||
pipe = self._redis.pipeline()
|
pipe = self._redis.pipeline()
|
||||||
for k, v in tqdm_async(data.items(), desc="Upserting"):
|
|
||||||
|
for k, v in data.items():
|
||||||
pipe.set(f"{self.namespace}:{k}", json.dumps(v))
|
pipe.set(f"{self.namespace}:{k}", json.dumps(v))
|
||||||
await pipe.execute()
|
await pipe.execute()
|
||||||
|
|
||||||
|
@@ -7,7 +7,6 @@ import numpy as np
|
|||||||
|
|
||||||
from lightrag.types import KnowledgeGraph
|
from lightrag.types import KnowledgeGraph
|
||||||
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
from ..base import BaseGraphStorage, BaseKVStorage, BaseVectorStorage
|
from ..base import BaseGraphStorage, BaseKVStorage, BaseVectorStorage
|
||||||
from ..namespace import NameSpace, is_namespace
|
from ..namespace import NameSpace, is_namespace
|
||||||
@@ -270,15 +269,8 @@ class TiDBVectorDBStorage(BaseVectorStorage):
|
|||||||
for i in range(0, len(contents), self._max_batch_size)
|
for i in range(0, len(contents), self._max_batch_size)
|
||||||
]
|
]
|
||||||
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
embedding_tasks = [self.embedding_func(batch) for batch in batches]
|
||||||
embeddings_list = []
|
embeddings_list = await asyncio.gather(*embedding_tasks)
|
||||||
for f in tqdm(
|
|
||||||
asyncio.as_completed(embedding_tasks),
|
|
||||||
total=len(embedding_tasks),
|
|
||||||
desc="Generating embeddings",
|
|
||||||
unit="batch",
|
|
||||||
):
|
|
||||||
embeddings = await f
|
|
||||||
embeddings_list.append(embeddings)
|
|
||||||
embeddings = np.concatenate(embeddings_list)
|
embeddings = np.concatenate(embeddings_list)
|
||||||
for i, d in enumerate(list_data):
|
for i, d in enumerate(list_data):
|
||||||
d["content_vector"] = embeddings[i]
|
d["content_vector"] = embeddings[i]
|
||||||
|
@@ -4,7 +4,7 @@ if sys.version_info < (3, 9):
|
|||||||
from typing import AsyncIterator
|
from typing import AsyncIterator
|
||||||
else:
|
else:
|
||||||
from collections.abc import AsyncIterator
|
from collections.abc import AsyncIterator
|
||||||
|
|
||||||
import pipmaster as pm # Pipmaster for dynamic library install
|
import pipmaster as pm # Pipmaster for dynamic library install
|
||||||
|
|
||||||
# install specific modules
|
# install specific modules
|
||||||
@@ -48,7 +48,7 @@ async def _ollama_model_if_cache(
|
|||||||
**kwargs,
|
**kwargs,
|
||||||
) -> Union[str, AsyncIterator[str]]:
|
) -> Union[str, AsyncIterator[str]]:
|
||||||
stream = True if kwargs.get("stream") else False
|
stream = True if kwargs.get("stream") else False
|
||||||
|
|
||||||
kwargs.pop("max_tokens", None)
|
kwargs.pop("max_tokens", None)
|
||||||
# kwargs.pop("response_format", None) # allow json
|
# kwargs.pop("response_format", None) # allow json
|
||||||
host = kwargs.pop("host", None)
|
host = kwargs.pop("host", None)
|
||||||
@@ -129,4 +129,4 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray:
|
|||||||
kwargs["headers"] = headers
|
kwargs["headers"] = headers
|
||||||
ollama_client = ollama.Client(**kwargs)
|
ollama_client = ollama.Client(**kwargs)
|
||||||
data = ollama_client.embed(model=embed_model, input=texts)
|
data = ollama_client.embed(model=embed_model, input=texts)
|
||||||
return data["embeddings"]
|
return data["embeddings"]
|
||||||
|
@@ -3,7 +3,6 @@ from __future__ import annotations
|
|||||||
import asyncio
|
import asyncio
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
from tqdm.asyncio import tqdm as tqdm_async
|
|
||||||
from typing import Any, AsyncIterator
|
from typing import Any, AsyncIterator
|
||||||
from collections import Counter, defaultdict
|
from collections import Counter, defaultdict
|
||||||
from .utils import (
|
from .utils import (
|
||||||
@@ -500,16 +499,8 @@ async def extract_entities(
|
|||||||
)
|
)
|
||||||
return dict(maybe_nodes), dict(maybe_edges)
|
return dict(maybe_nodes), dict(maybe_edges)
|
||||||
|
|
||||||
results = []
|
tasks = [_process_single_content(c) for c in ordered_chunks]
|
||||||
for result in tqdm_async(
|
results = await asyncio.gather(*tasks)
|
||||||
asyncio.as_completed([_process_single_content(c) for c in ordered_chunks]),
|
|
||||||
total=len(ordered_chunks),
|
|
||||||
desc="Level 2 - Extracting entities and relationships",
|
|
||||||
unit="chunk",
|
|
||||||
position=1,
|
|
||||||
leave=False,
|
|
||||||
):
|
|
||||||
results.append(await result)
|
|
||||||
|
|
||||||
maybe_nodes = defaultdict(list)
|
maybe_nodes = defaultdict(list)
|
||||||
maybe_edges = defaultdict(list)
|
maybe_edges = defaultdict(list)
|
||||||
@@ -518,41 +509,20 @@ async def extract_entities(
|
|||||||
maybe_nodes[k].extend(v)
|
maybe_nodes[k].extend(v)
|
||||||
for k, v in m_edges.items():
|
for k, v in m_edges.items():
|
||||||
maybe_edges[tuple(sorted(k))].extend(v)
|
maybe_edges[tuple(sorted(k))].extend(v)
|
||||||
logger.debug("Inserting entities into storage...")
|
|
||||||
all_entities_data = []
|
|
||||||
for result in tqdm_async(
|
|
||||||
asyncio.as_completed(
|
|
||||||
[
|
|
||||||
_merge_nodes_then_upsert(k, v, knowledge_graph_inst, global_config)
|
|
||||||
for k, v in maybe_nodes.items()
|
|
||||||
]
|
|
||||||
),
|
|
||||||
total=len(maybe_nodes),
|
|
||||||
desc="Level 3 - Inserting entities",
|
|
||||||
unit="entity",
|
|
||||||
position=2,
|
|
||||||
leave=False,
|
|
||||||
):
|
|
||||||
all_entities_data.append(await result)
|
|
||||||
|
|
||||||
logger.debug("Inserting relationships into storage...")
|
all_entities_data = await asyncio.gather(
|
||||||
all_relationships_data = []
|
*[
|
||||||
for result in tqdm_async(
|
_merge_nodes_then_upsert(k, v, knowledge_graph_inst, global_config)
|
||||||
asyncio.as_completed(
|
for k, v in maybe_nodes.items()
|
||||||
[
|
]
|
||||||
_merge_edges_then_upsert(
|
)
|
||||||
k[0], k[1], v, knowledge_graph_inst, global_config
|
|
||||||
)
|
all_relationships_data = await asyncio.gather(
|
||||||
for k, v in maybe_edges.items()
|
*[
|
||||||
]
|
_merge_edges_then_upsert(k[0], k[1], v, knowledge_graph_inst, global_config)
|
||||||
),
|
for k, v in maybe_edges.items()
|
||||||
total=len(maybe_edges),
|
]
|
||||||
desc="Level 3 - Inserting relationships",
|
)
|
||||||
unit="relationship",
|
|
||||||
position=3,
|
|
||||||
leave=False,
|
|
||||||
):
|
|
||||||
all_relationships_data.append(await result)
|
|
||||||
|
|
||||||
if not len(all_entities_data) and not len(all_relationships_data):
|
if not len(all_entities_data) and not len(all_relationships_data):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
|
@@ -19,7 +19,6 @@ import tiktoken
|
|||||||
from lightrag.prompt import PROMPTS
|
from lightrag.prompt import PROMPTS
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true"
|
VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true"
|
||||||
|
|
||||||
|
|
||||||
@@ -84,7 +83,6 @@ class EmbeddingFunc:
|
|||||||
return await self.func(*args, **kwargs)
|
return await self.func(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def locate_json_string_body_from_string(content: str) -> str | None:
|
def locate_json_string_body_from_string(content: str) -> str | None:
|
||||||
"""Locate the JSON string body from a string"""
|
"""Locate the JSON string body from a string"""
|
||||||
try:
|
try:
|
||||||
@@ -715,4 +713,3 @@ def get_conversation_turns(
|
|||||||
)
|
)
|
||||||
|
|
||||||
return "\n".join(formatted_turns)
|
return "\n".join(formatted_turns)
|
||||||
|
|
||||||
|
@@ -2,7 +2,6 @@ import re
|
|||||||
import json
|
import json
|
||||||
import asyncio
|
import asyncio
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
|
|
||||||
def extract_queries(file_path):
|
def extract_queries(file_path):
|
||||||
@@ -44,7 +43,7 @@ def run_queries_and_save_to_json(
|
|||||||
result_file.write("[\n")
|
result_file.write("[\n")
|
||||||
first_entry = True
|
first_entry = True
|
||||||
|
|
||||||
for query_text in tqdm(queries, desc="Processing queries", unit="query"):
|
for query_text in queries:
|
||||||
result, error = loop.run_until_complete(
|
result, error = loop.run_until_complete(
|
||||||
process_query(query_text, rag_instance, query_param)
|
process_query(query_text, rag_instance, query_param)
|
||||||
)
|
)
|
||||||
|
@@ -3,7 +3,6 @@ import re
|
|||||||
import json
|
import json
|
||||||
import asyncio
|
import asyncio
|
||||||
from lightrag import LightRAG, QueryParam
|
from lightrag import LightRAG, QueryParam
|
||||||
from tqdm import tqdm
|
|
||||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||||
from lightrag.utils import EmbeddingFunc
|
from lightrag.utils import EmbeddingFunc
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -76,7 +75,7 @@ def run_queries_and_save_to_json(
|
|||||||
result_file.write("[\n")
|
result_file.write("[\n")
|
||||||
first_entry = True
|
first_entry = True
|
||||||
|
|
||||||
for query_text in tqdm(queries, desc="Processing queries", unit="query"):
|
for query_text in queries:
|
||||||
result, error = loop.run_until_complete(
|
result, error = loop.run_until_complete(
|
||||||
process_query(query_text, rag_instance, query_param)
|
process_query(query_text, rag_instance, query_param)
|
||||||
)
|
)
|
||||||
|
@@ -22,7 +22,6 @@ tenacity
|
|||||||
|
|
||||||
# LLM packages
|
# LLM packages
|
||||||
tiktoken
|
tiktoken
|
||||||
tqdm
|
|
||||||
xxhash
|
xxhash
|
||||||
|
|
||||||
# Extra libraries are installed when needed using pipmaster
|
# Extra libraries are installed when needed using pipmaster
|
||||||
|
Reference in New Issue
Block a user