This commit is contained in:
LarFii
2024-11-11 10:45:22 +08:00
parent 958cb9627e
commit d0c1844264
10 changed files with 35 additions and 33 deletions

View File

@@ -923,4 +923,3 @@ primaryClass={cs.IR}
} }
``` ```
**Thank you for your interest in our work!** **Thank you for your interest in our work!**

View File

@@ -33,7 +33,7 @@ if not os.path.exists(WORKING_DIR):
async def llm_model_func( async def llm_model_func(
prompt, system_prompt=None, history_messages=[], **kwargs prompt, system_prompt=None, history_messages=[], **kwargs
) -> str: ) -> str:
return await openai_complete_if_cache( return await openai_complete_if_cache(
LLM_MODEL, LLM_MODEL,
@@ -66,9 +66,11 @@ async def get_embedding_dim():
rag = LightRAG( rag = LightRAG(
working_dir=WORKING_DIR, working_dir=WORKING_DIR,
llm_model_func=llm_model_func, llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(embedding_dim=asyncio.run(get_embedding_dim()), embedding_func=EmbeddingFunc(
max_token_size=EMBEDDING_MAX_TOKEN_SIZE, embedding_dim=asyncio.run(get_embedding_dim()),
func=embedding_func), max_token_size=EMBEDDING_MAX_TOKEN_SIZE,
func=embedding_func,
),
) )
@@ -99,8 +101,13 @@ async def query_endpoint(request: QueryRequest):
try: try:
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
result = await loop.run_in_executor( result = await loop.run_in_executor(
None, lambda: rag.query(request.query, None,
param=QueryParam(mode=request.mode, only_need_context=request.only_need_context)) lambda: rag.query(
request.query,
param=QueryParam(
mode=request.mode, only_need_context=request.only_need_context
),
),
) )
return Response(status="success", data=result) return Response(status="success", data=result)
except Exception as e: except Exception as e:

View File

@@ -1,5 +1,5 @@
from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam
__version__ = "0.0.8" __version__ = "0.0.9"
__author__ = "Zirui Guo" __author__ = "Zirui Guo"
__url__ = "https://github.com/HKUDS/LightRAG" __url__ = "https://github.com/HKUDS/LightRAG"

View File

@@ -1,3 +1 @@
# print ("init package vars here. ......") # print ("init package vars here. ......")

View File

@@ -61,7 +61,6 @@ def always_get_an_event_loop() -> asyncio.AbstractEventLoop:
return loop return loop
@dataclass @dataclass
class LightRAG: class LightRAG:
working_dir: str = field( working_dir: str = field(

View File

@@ -607,8 +607,7 @@ async def _find_most_related_text_unit_from_entities(
return [] return []
all_text_units = sorted( all_text_units = sorted(
all_text_units, all_text_units, key=lambda x: (x["order"], -x["relation_counts"])
key=lambda x: (x["order"], -x["relation_counts"])
) )
all_text_units = truncate_list_by_token_size( all_text_units = truncate_list_by_token_size(

View File

@@ -1,6 +1,6 @@
import os import os
from lightrag import LightRAG, QueryParam from lightrag import LightRAG, QueryParam
from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete from lightrag.llm import gpt_4o_mini_complete
######### #########
# Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert() # Uncomment the below two lines if running in a jupyter notebook to handle the async nature of rag.insert()
# import nest_asyncio # import nest_asyncio

View File

@@ -1,6 +1,6 @@
import os import os
from lightrag import LightRAG, QueryParam from lightrag import LightRAG, QueryParam
from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete from lightrag.llm import gpt_4o_mini_complete
######### #########