pre-commit

This commit is contained in:
tackhwa
2024-10-26 16:13:18 +08:00
parent 8deb30aa20
commit 2e703296d5

View File

@@ -10,6 +10,7 @@ WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR): if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR) os.mkdir(WORKING_DIR)
async def lmdeploy_model_complete( async def lmdeploy_model_complete(
prompt=None, system_prompt=None, history_messages=[], **kwargs prompt=None, system_prompt=None, history_messages=[], **kwargs
) -> str: ) -> str:
@@ -23,7 +24,7 @@ async def lmdeploy_model_complete(
## or model_name is a pytorch model on huggingface.co, ## or model_name is a pytorch model on huggingface.co,
## you can refer to https://github.com/InternLM/lmdeploy/blob/main/lmdeploy/model.py ## you can refer to https://github.com/InternLM/lmdeploy/blob/main/lmdeploy/model.py
## for a list of chat_template available in lmdeploy. ## for a list of chat_template available in lmdeploy.
chat_template = "llama3", chat_template="llama3",
# model_format ='awq', # if you are using awq quantization model. # model_format ='awq', # if you are using awq quantization model.
# quant_policy=8, # if you want to use online kv cache, 4=kv int4, 8=kv int8. # quant_policy=8, # if you want to use online kv cache, 4=kv int4, 8=kv int8.
**kwargs, **kwargs,