fix: unexpected keyword argument error
This commit is contained in:
@@ -478,6 +478,7 @@ class GPTKeywordExtractionFormat(BaseModel):
|
|||||||
async def gpt_4o_complete(
|
async def gpt_4o_complete(
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
if keyword_extraction:
|
if keyword_extraction:
|
||||||
kwargs["response_format"] = GPTKeywordExtractionFormat
|
kwargs["response_format"] = GPTKeywordExtractionFormat
|
||||||
return await openai_complete_if_cache(
|
return await openai_complete_if_cache(
|
||||||
@@ -492,6 +493,7 @@ async def gpt_4o_complete(
|
|||||||
async def gpt_4o_mini_complete(
|
async def gpt_4o_mini_complete(
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
if keyword_extraction:
|
if keyword_extraction:
|
||||||
kwargs["response_format"] = GPTKeywordExtractionFormat
|
kwargs["response_format"] = GPTKeywordExtractionFormat
|
||||||
return await openai_complete_if_cache(
|
return await openai_complete_if_cache(
|
||||||
@@ -506,6 +508,7 @@ async def gpt_4o_mini_complete(
|
|||||||
async def nvidia_openai_complete(
|
async def nvidia_openai_complete(
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
result = await openai_complete_if_cache(
|
result = await openai_complete_if_cache(
|
||||||
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|
"nvidia/llama-3.1-nemotron-70b-instruct", # context length 128k
|
||||||
prompt,
|
prompt,
|
||||||
@@ -522,6 +525,7 @@ async def nvidia_openai_complete(
|
|||||||
async def azure_openai_complete(
|
async def azure_openai_complete(
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
result = await azure_openai_complete_if_cache(
|
result = await azure_openai_complete_if_cache(
|
||||||
"conversation-4o-mini",
|
"conversation-4o-mini",
|
||||||
prompt,
|
prompt,
|
||||||
@@ -537,6 +541,7 @@ async def azure_openai_complete(
|
|||||||
async def bedrock_complete(
|
async def bedrock_complete(
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
result = await bedrock_complete_if_cache(
|
result = await bedrock_complete_if_cache(
|
||||||
"anthropic.claude-3-haiku-20240307-v1:0",
|
"anthropic.claude-3-haiku-20240307-v1:0",
|
||||||
prompt,
|
prompt,
|
||||||
@@ -552,6 +557,7 @@ async def bedrock_complete(
|
|||||||
async def hf_model_complete(
|
async def hf_model_complete(
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
||||||
result = await hf_model_if_cache(
|
result = await hf_model_if_cache(
|
||||||
model_name,
|
model_name,
|
||||||
@@ -568,6 +574,7 @@ async def hf_model_complete(
|
|||||||
async def ollama_model_complete(
|
async def ollama_model_complete(
|
||||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||||
) -> str:
|
) -> str:
|
||||||
|
keyword_extraction = kwargs.pop("keyword_extraction", None)
|
||||||
if keyword_extraction:
|
if keyword_extraction:
|
||||||
kwargs["format"] = "json"
|
kwargs["format"] = "json"
|
||||||
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
model_name = kwargs["hashing_kv"].global_config["llm_model_name"]
|
||||||
|
Reference in New Issue
Block a user