Merge pull request #722 from danielaskdd/fix-mutable-default-param
Fix mutable default param
This commit is contained in:
@@ -753,13 +753,15 @@ def create_app(args):
|
||||
async def openai_alike_model_complete(
|
||||
prompt,
|
||||
system_prompt=None,
|
||||
history_messages=[],
|
||||
history_messages=None,
|
||||
keyword_extraction=False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||
if keyword_extraction:
|
||||
kwargs["response_format"] = GPTKeywordExtractionFormat
|
||||
if history_messages is None:
|
||||
history_messages = []
|
||||
return await openai_complete_if_cache(
|
||||
args.llm_model,
|
||||
prompt,
|
||||
@@ -773,13 +775,15 @@ def create_app(args):
|
||||
async def azure_openai_model_complete(
|
||||
prompt,
|
||||
system_prompt=None,
|
||||
history_messages=[],
|
||||
history_messages=None,
|
||||
keyword_extraction=False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||
if keyword_extraction:
|
||||
kwargs["response_format"] = GPTKeywordExtractionFormat
|
||||
if history_messages is None:
|
||||
history_messages = []
|
||||
return await azure_openai_complete_if_cache(
|
||||
args.llm_model,
|
||||
prompt,
|
||||
|
@@ -89,11 +89,13 @@ async def openai_complete_if_cache(
|
||||
model,
|
||||
prompt,
|
||||
system_prompt=None,
|
||||
history_messages=[],
|
||||
history_messages=None,
|
||||
base_url=None,
|
||||
api_key=None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
if history_messages is None:
|
||||
history_messages = []
|
||||
if api_key:
|
||||
os.environ["OPENAI_API_KEY"] = api_key
|
||||
|
||||
@@ -146,8 +148,14 @@ async def openai_complete_if_cache(
|
||||
|
||||
|
||||
async def openai_complete(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
prompt,
|
||||
system_prompt=None,
|
||||
history_messages=None,
|
||||
keyword_extraction=False,
|
||||
**kwargs,
|
||||
) -> Union[str, AsyncIterator[str]]:
|
||||
if history_messages is None:
|
||||
history_messages = []
|
||||
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||
if keyword_extraction:
|
||||
kwargs["response_format"] = "json"
|
||||
@@ -162,8 +170,14 @@ async def openai_complete(
|
||||
|
||||
|
||||
async def gpt_4o_complete(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
prompt,
|
||||
system_prompt=None,
|
||||
history_messages=None,
|
||||
keyword_extraction=False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
if history_messages is None:
|
||||
history_messages = []
|
||||
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||
if keyword_extraction:
|
||||
kwargs["response_format"] = GPTKeywordExtractionFormat
|
||||
@@ -177,8 +191,14 @@ async def gpt_4o_complete(
|
||||
|
||||
|
||||
async def gpt_4o_mini_complete(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
prompt,
|
||||
system_prompt=None,
|
||||
history_messages=None,
|
||||
keyword_extraction=False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
if history_messages is None:
|
||||
history_messages = []
|
||||
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||
if keyword_extraction:
|
||||
kwargs["response_format"] = GPTKeywordExtractionFormat
|
||||
@@ -192,8 +212,14 @@ async def gpt_4o_mini_complete(
|
||||
|
||||
|
||||
async def nvidia_openai_complete(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
prompt,
|
||||
system_prompt=None,
|
||||
history_messages=None,
|
||||
keyword_extraction=False,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
if history_messages is None:
|
||||
history_messages = []
|
||||
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||
result = await openai_complete_if_cache(
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|
||||
|
Reference in New Issue
Block a user