update do_preprocess

This commit is contained in:
tackhwa
2024-10-26 16:24:35 +08:00
parent 2e703296d5
commit 2cf3a85a0f

View File

@@ -286,7 +286,9 @@ async def hf_model_if_cache(
output = hf_model.generate(
**input_ids, max_new_tokens=512, num_return_sequences=1, early_stopping=True
)
response_text = hf_tokenizer.decode(output[0][len(inputs["input_ids"][0]):], skip_special_tokens=True)
response_text = hf_tokenizer.decode(
output[0][len(inputs["input_ids"][0]) :], skip_special_tokens=True
)
if hashing_kv is not None:
await hashing_kv.upsert({args_hash: {"return": response_text, "model": model}})
return response_text
@@ -323,19 +325,38 @@ async def ollama_model_if_cache(
@lru_cache(maxsize=1)
def initialize_lmdeploy_pipeline(model, tp=1, chat_template=None, log_level='WARNING', model_format='hf', quant_policy=0):
def initialize_lmdeploy_pipeline(
model,
tp=1,
chat_template=None,
log_level="WARNING",
model_format="hf",
quant_policy=0,
):
from lmdeploy import pipeline, ChatTemplateConfig, TurbomindEngineConfig
lmdeploy_pipe = pipeline(
model_path=model,
backend_config=TurbomindEngineConfig(tp=tp, model_format=model_format, quant_policy=quant_policy),
chat_template_config=ChatTemplateConfig(model_name=chat_template) if chat_template else None,
log_level='WARNING')
backend_config=TurbomindEngineConfig(
tp=tp, model_format=model_format, quant_policy=quant_policy
),
chat_template_config=ChatTemplateConfig(model_name=chat_template)
if chat_template
else None,
log_level="WARNING",
)
return lmdeploy_pipe
async def lmdeploy_model_if_cache(
model, prompt, system_prompt=None, history_messages=[],
chat_template=None, model_format='hf',quant_policy=0, **kwargs
model,
prompt,
system_prompt=None,
history_messages=[],
chat_template=None,
model_format="hf",
quant_policy=0,
**kwargs,
) -> str:
"""
Args:
@@ -361,29 +382,30 @@ async def lmdeploy_model_if_cache(
do_preprocess (bool): whether pre-process the messages. Default to
True, which means chat_template will be applied.
skip_special_tokens (bool): Whether or not to remove special tokens
in the decoding. Default to be False.
in the decoding. Default to be True.
do_sample (bool): Whether or not to use sampling, use greedy decoding otherwise.
Default to be False, which means greedy decoding will be applied.
"""
try:
import lmdeploy
from lmdeploy import version_info, GenerationConfig
except:
except Exception:
raise ImportError("Please install lmdeploy before intialize lmdeploy backend.")
kwargs.pop("response_format", None)
max_new_tokens = kwargs.pop("max_tokens", 512)
tp = kwargs.pop('tp', 1)
skip_special_tokens = kwargs.pop('skip_special_tokens', False)
do_preprocess = kwargs.pop('do_preprocess', True)
do_sample = kwargs.pop('do_sample', False)
tp = kwargs.pop("tp", 1)
skip_special_tokens = kwargs.pop("skip_special_tokens", True)
do_preprocess = kwargs.pop("do_preprocess", True)
do_sample = kwargs.pop("do_sample", False)
gen_params = kwargs
version = version_info
if do_sample is not None and version < (0, 6, 0):
raise RuntimeError(
'`do_sample` parameter is not supported by lmdeploy until '
f'v0.6.0, but currently using lmdeloy {lmdeploy.__version__}')
"`do_sample` parameter is not supported by lmdeploy until "
f"v0.6.0, but currently using lmdeloy {lmdeploy.__version__}"
)
else:
do_sample = True
gen_params.update(do_sample=do_sample)
@@ -394,7 +416,8 @@ async def lmdeploy_model_if_cache(
chat_template=chat_template,
model_format=model_format,
quant_policy=quant_policy,
log_level='WARNING')
log_level="WARNING",
)
messages = []
if system_prompt:
@@ -410,11 +433,19 @@ async def lmdeploy_model_if_cache(
return if_cache_return["return"]
gen_config = GenerationConfig(
skip_special_tokens=skip_special_tokens, max_new_tokens=max_new_tokens, **gen_params)
skip_special_tokens=skip_special_tokens,
max_new_tokens=max_new_tokens,
**gen_params,
)
response = ""
async for res in lmdeploy_pipe.generate(messages, gen_config=gen_config,
do_preprocess=do_preprocess, stream_response=False, session_id=1):
async for res in lmdeploy_pipe.generate(
messages,
gen_config=gen_config,
do_preprocess=do_preprocess,
stream_response=False,
session_id=1,
):
response += res.response
if hashing_kv is not None: