From c9baa4ebeb75c4b8920f302a985476fee92d9127 Mon Sep 17 00:00:00 2001 From: Andrii Lazarchuk Date: Tue, 22 Oct 2024 14:35:42 +0000 Subject: [PATCH] Finetune example to be able to run ollama example without need to tweak context size in Modelfile --- examples/lightrag_ollama_demo.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/lightrag_ollama_demo.py b/examples/lightrag_ollama_demo.py index 93196066..6070131f 100644 --- a/examples/lightrag_ollama_demo.py +++ b/examples/lightrag_ollama_demo.py @@ -15,9 +15,10 @@ if not os.path.exists(WORKING_DIR): rag = LightRAG( working_dir=WORKING_DIR, llm_model_func=ollama_model_complete, - llm_model_name="mistral:7b", - llm_model_max_async=2, - llm_model_kwargs={"host": "http://localhost:11434"}, + llm_model_name="gemma2:2b", + llm_model_max_async=4, + llm_model_max_token_size=32768, + llm_model_kwargs={"host": "http://localhost:11434", "options": {"num_ctx": 32768}}, embedding_func=EmbeddingFunc( embedding_dim=768, max_token_size=8192, @@ -27,7 +28,6 @@ rag = LightRAG( ), ) - with open("./book.txt") as f: rag.insert(f.read())