Add /context query prefix to Ollama model simulation for LightRAG Server
This commit is contained in:
@@ -23,6 +23,7 @@ class SearchMode(str, Enum):
|
|||||||
hybrid = "hybrid"
|
hybrid = "hybrid"
|
||||||
mix = "mix"
|
mix = "mix"
|
||||||
bypass = "bypass"
|
bypass = "bypass"
|
||||||
|
context = "context"
|
||||||
|
|
||||||
|
|
||||||
class OllamaMessage(BaseModel):
|
class OllamaMessage(BaseModel):
|
||||||
@@ -111,6 +112,7 @@ def parse_query_mode(query: str) -> tuple[str, SearchMode]:
|
|||||||
"/hybrid ": SearchMode.hybrid,
|
"/hybrid ": SearchMode.hybrid,
|
||||||
"/mix ": SearchMode.mix,
|
"/mix ": SearchMode.mix,
|
||||||
"/bypass ": SearchMode.bypass,
|
"/bypass ": SearchMode.bypass,
|
||||||
|
"/context": SearchMode.context,
|
||||||
}
|
}
|
||||||
|
|
||||||
for prefix, mode in mode_map.items():
|
for prefix, mode in mode_map.items():
|
||||||
@@ -354,10 +356,16 @@ class OllamaAPI:
|
|||||||
start_time = time.time_ns()
|
start_time = time.time_ns()
|
||||||
prompt_tokens = estimate_tokens(cleaned_query)
|
prompt_tokens = estimate_tokens(cleaned_query)
|
||||||
|
|
||||||
|
if mode == SearchMode.context:
|
||||||
|
mode = SearchMode.hybrid
|
||||||
|
only_need_context = True
|
||||||
|
else:
|
||||||
|
only_need_context = False
|
||||||
|
|
||||||
param_dict = {
|
param_dict = {
|
||||||
"mode": mode,
|
"mode": mode,
|
||||||
"stream": request.stream,
|
"stream": request.stream,
|
||||||
"only_need_context": False,
|
"only_need_context": only_need_context,
|
||||||
"conversation_history": conversation_history,
|
"conversation_history": conversation_history,
|
||||||
"top_k": self.top_k,
|
"top_k": self.top_k,
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user