From b6b2e6977367bbad760c29a840c65c5b3ba7207a Mon Sep 17 00:00:00 2001 From: LarFii <834462287@qq.com> Date: Mon, 11 Nov 2024 10:45:22 +0800 Subject: [PATCH] Linting --- Dockerfile | 2 +- README.md | 9 +++---- .../lightrag_api_openai_compatible_demo.py | 19 +++++++++----- lightrag/__init__.py | 2 +- lightrag/kg/__init__.py | 2 -- lightrag/kg/neo4j_impl.py | 4 +-- lightrag/lightrag.py | 1 - lightrag/operate.py | 25 +++++++++---------- test.py | 2 +- test_neo4j.py | 2 +- 10 files changed, 35 insertions(+), 33 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1b60c089..787816fe 100644 --- a/Dockerfile +++ b/Dockerfile @@ -53,4 +53,4 @@ VOLUME /data /logs EXPOSE 7474 7473 7687 ENTRYPOINT ["tini", "-g", "--", "/startup/docker-entrypoint.sh"] -CMD ["neo4j"] \ No newline at end of file +CMD ["neo4j"] diff --git a/README.md b/README.md index ec53d444..3cbacab7 100644 --- a/README.md +++ b/README.md @@ -196,7 +196,7 @@ rag = LightRAG( ### Using Neo4J for Storage * For production level scenarios you will most likely want to leverage an enterprise solution -* for KG storage. Running Neo4J in Docker is recommended for seamless local testing. +* for KG storage. Running Neo4J in Docker is recommended for seamless local testing. * See: https://hub.docker.com/_/neo4j @@ -209,7 +209,7 @@ When you launch the project be sure to override the default KG: NetworkS by specifying kg="Neo4JStorage". # Note: Default settings use NetworkX -#Initialize LightRAG with Neo4J implementation. +#Initialize LightRAG with Neo4J implementation. WORKING_DIR = "./local_neo4jWorkDir" rag = LightRAG( @@ -503,8 +503,8 @@ pip install fastapi uvicorn pydantic export RAG_DIR="your_index_directory" # Optional: Defaults to "index_default" export OPENAI_BASE_URL="Your OpenAI API base URL" # Optional: Defaults to "https://api.openai.com/v1" export OPENAI_API_KEY="Your OpenAI API key" # Required -export LLM_MODEL="Your LLM model" # Optional: Defaults to "gpt-4o-mini" -export EMBEDDING_MODEL="Your embedding model" # Optional: Defaults to "text-embedding-3-large" +export LLM_MODEL="Your LLM model" # Optional: Defaults to "gpt-4o-mini" +export EMBEDDING_MODEL="Your embedding model" # Optional: Defaults to "text-embedding-3-large" ``` 3. Run the API server: @@ -923,4 +923,3 @@ primaryClass={cs.IR} } ``` **Thank you for your interest in our work!** - diff --git a/examples/lightrag_api_openai_compatible_demo.py b/examples/lightrag_api_openai_compatible_demo.py index 20a05a5f..39001b10 100644 --- a/examples/lightrag_api_openai_compatible_demo.py +++ b/examples/lightrag_api_openai_compatible_demo.py @@ -33,7 +33,7 @@ if not os.path.exists(WORKING_DIR): async def llm_model_func( - prompt, system_prompt=None, history_messages=[], **kwargs + prompt, system_prompt=None, history_messages=[], **kwargs ) -> str: return await openai_complete_if_cache( LLM_MODEL, @@ -66,9 +66,11 @@ async def get_embedding_dim(): rag = LightRAG( working_dir=WORKING_DIR, llm_model_func=llm_model_func, - embedding_func=EmbeddingFunc(embedding_dim=asyncio.run(get_embedding_dim()), - max_token_size=EMBEDDING_MAX_TOKEN_SIZE, - func=embedding_func), + embedding_func=EmbeddingFunc( + embedding_dim=asyncio.run(get_embedding_dim()), + max_token_size=EMBEDDING_MAX_TOKEN_SIZE, + func=embedding_func, + ), ) @@ -99,8 +101,13 @@ async def query_endpoint(request: QueryRequest): try: loop = asyncio.get_event_loop() result = await loop.run_in_executor( - None, lambda: rag.query(request.query, - param=QueryParam(mode=request.mode, only_need_context=request.only_need_context)) + None, + lambda: rag.query( + request.query, + param=QueryParam( + mode=request.mode, only_need_context=request.only_need_context + ), + ), ) return Response(status="success", data=result) except Exception as e: diff --git a/lightrag/__init__.py b/lightrag/__init__.py index 8e76a260..b73db1b9 100644 --- a/lightrag/__init__.py +++ b/lightrag/__init__.py @@ -1,5 +1,5 @@ from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam -__version__ = "0.0.8" +__version__ = "0.0.9" __author__ = "Zirui Guo" __url__ = "https://github.com/HKUDS/LightRAG" diff --git a/lightrag/kg/__init__.py b/lightrag/kg/__init__.py index de9c1f9a..087eaac9 100644 --- a/lightrag/kg/__init__.py +++ b/lightrag/kg/__init__.py @@ -1,3 +1 @@ # print ("init package vars here. ......") - - diff --git a/lightrag/kg/neo4j_impl.py b/lightrag/kg/neo4j_impl.py index 4a3a4e66..e6b33a9b 100644 --- a/lightrag/kg/neo4j_impl.py +++ b/lightrag/kg/neo4j_impl.py @@ -146,11 +146,11 @@ class Neo4JStorage(BaseGraphStorage): entity_name_label_target = target_node_id.strip('"') """ Find all edges between nodes of two given labels - + Args: source_node_label (str): Label of the source nodes target_node_label (str): Label of the target nodes - + Returns: list: List of all relationships/edges found """ diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 2ae59f3b..3abe9185 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -61,7 +61,6 @@ def always_get_an_event_loop() -> asyncio.AbstractEventLoop: return loop - @dataclass class LightRAG: working_dir: str = field( diff --git a/lightrag/operate.py b/lightrag/operate.py index 04725d6a..e86388dc 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -560,19 +560,19 @@ async def _find_most_related_text_unit_from_entities( if not this_edges: continue all_one_hop_nodes.update([e[1] for e in this_edges]) - + all_one_hop_nodes = list(all_one_hop_nodes) all_one_hop_nodes_data = await asyncio.gather( *[knowledge_graph_inst.get_node(e) for e in all_one_hop_nodes] ) - + # Add null check for node data all_one_hop_text_units_lookup = { k: set(split_string_by_multi_markers(v["source_id"], [GRAPH_FIELD_SEP])) for k, v in zip(all_one_hop_nodes, all_one_hop_nodes_data) if v is not None and "source_id" in v # Add source_id check } - + all_text_units_lookup = {} for index, (this_text_units, this_edges) in enumerate(zip(text_units, edges)): for c_id in this_text_units: @@ -586,7 +586,7 @@ async def _find_most_related_text_unit_from_entities( and c_id in all_one_hop_text_units_lookup[e[1]] ): relation_counts += 1 - + chunk_data = await text_chunks_db.get_by_id(c_id) if chunk_data is not None and "content" in chunk_data: # Add content check all_text_units_lookup[c_id] = { @@ -594,29 +594,28 @@ async def _find_most_related_text_unit_from_entities( "order": index, "relation_counts": relation_counts, } - + # Filter out None values and ensure data has content all_text_units = [ - {"id": k, **v} - for k, v in all_text_units_lookup.items() + {"id": k, **v} + for k, v in all_text_units_lookup.items() if v is not None and v.get("data") is not None and "content" in v["data"] ] - + if not all_text_units: logger.warning("No valid text units found") return [] - + all_text_units = sorted( - all_text_units, - key=lambda x: (x["order"], -x["relation_counts"]) + all_text_units, key=lambda x: (x["order"], -x["relation_counts"]) ) - + all_text_units = truncate_list_by_token_size( all_text_units, key=lambda x: x["data"]["content"], max_token_size=query_param.max_token_for_text_unit, ) - + all_text_units = [t["data"] for t in all_text_units] return all_text_units diff --git a/test.py b/test.py index 35c03afe..84cbe373 100644 --- a/test.py +++ b/test.py @@ -1,6 +1,6 @@ import os from lightrag import LightRAG, QueryParam -from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete +from lightrag.llm import gpt_4o_mini_complete ######### # Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert() # import nest_asyncio diff --git a/test_neo4j.py b/test_neo4j.py index 7b13734e..822cd7bc 100644 --- a/test_neo4j.py +++ b/test_neo4j.py @@ -1,6 +1,6 @@ import os from lightrag import LightRAG, QueryParam -from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete +from lightrag.llm import gpt_4o_mini_complete #########