Merge pull request #720 from danielaskdd/add-keyword-extraction-param-for-llm

fix: add keyword_extraction param support for LLM func of API Server
This commit is contained in:
zrguo
2025-02-06 23:31:53 +08:00
committed by GitHub

View File

@@ -17,6 +17,7 @@ import argparse
from typing import List, Any, Optional, Union, Dict from typing import List, Any, Optional, Union, Dict
from pydantic import BaseModel from pydantic import BaseModel
from lightrag import LightRAG, QueryParam from lightrag import LightRAG, QueryParam
from lightrag.types import GPTKeywordExtractionFormat
from lightrag.api import __api_version__ from lightrag.api import __api_version__
from lightrag.utils import EmbeddingFunc from lightrag.utils import EmbeddingFunc
from enum import Enum from enum import Enum
@@ -756,6 +757,9 @@ def create_app(args):
keyword_extraction=False, keyword_extraction=False,
**kwargs, **kwargs,
) -> str: ) -> str:
keyword_extraction = kwargs.pop("keyword_extraction", None)
if keyword_extraction:
kwargs["response_format"] = GPTKeywordExtractionFormat
return await openai_complete_if_cache( return await openai_complete_if_cache(
args.llm_model, args.llm_model,
prompt, prompt,
@@ -773,6 +777,9 @@ def create_app(args):
keyword_extraction=False, keyword_extraction=False,
**kwargs, **kwargs,
) -> str: ) -> str:
keyword_extraction = kwargs.pop("keyword_extraction", None)
if keyword_extraction:
kwargs["response_format"] = GPTKeywordExtractionFormat
return await azure_openai_complete_if_cache( return await azure_openai_complete_if_cache(
args.llm_model, args.llm_model,
prompt, prompt,