pre-commit
This commit is contained in:
@@ -10,10 +10,11 @@ WORKING_DIR = "./dickens"
|
|||||||
if not os.path.exists(WORKING_DIR):
|
if not os.path.exists(WORKING_DIR):
|
||||||
os.mkdir(WORKING_DIR)
|
os.mkdir(WORKING_DIR)
|
||||||
|
|
||||||
|
|
||||||
async def lmdeploy_model_complete(
|
async def lmdeploy_model_complete(
|
||||||
prompt=None, system_prompt=None, history_messages=[], **kwargs
|
prompt=None, system_prompt=None, history_messages=[], **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
||||||
return await lmdeploy_model_if_cache(
|
return await lmdeploy_model_if_cache(
|
||||||
model_name,
|
model_name,
|
||||||
prompt,
|
prompt,
|
||||||
@@ -23,7 +24,7 @@ async def lmdeploy_model_complete(
|
|||||||
## or model_name is a pytorch model on huggingface.co,
|
## or model_name is a pytorch model on huggingface.co,
|
||||||
## you can refer to https://github.com/InternLM/lmdeploy/blob/main/lmdeploy/model.py
|
## you can refer to https://github.com/InternLM/lmdeploy/blob/main/lmdeploy/model.py
|
||||||
## for a list of chat_template available in lmdeploy.
|
## for a list of chat_template available in lmdeploy.
|
||||||
chat_template = "llama3",
|
chat_template="llama3",
|
||||||
# model_format ='awq', # if you are using awq quantization model.
|
# model_format ='awq', # if you are using awq quantization model.
|
||||||
# quant_policy=8, # if you want to use online kv cache, 4=kv int4, 8=kv int8.
|
# quant_policy=8, # if you want to use online kv cache, 4=kv int4, 8=kv int8.
|
||||||
**kwargs,
|
**kwargs,
|
||||||
@@ -33,7 +34,7 @@ async def lmdeploy_model_complete(
|
|||||||
rag = LightRAG(
|
rag = LightRAG(
|
||||||
working_dir=WORKING_DIR,
|
working_dir=WORKING_DIR,
|
||||||
llm_model_func=lmdeploy_model_complete,
|
llm_model_func=lmdeploy_model_complete,
|
||||||
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
|
llm_model_name="meta-llama/Llama-3.1-8B-Instruct", # please use definite path for local model
|
||||||
embedding_func=EmbeddingFunc(
|
embedding_func=EmbeddingFunc(
|
||||||
embedding_dim=384,
|
embedding_dim=384,
|
||||||
max_token_size=5000,
|
max_token_size=5000,
|
||||||
|
Reference in New Issue
Block a user