chore: added pre-commit-hooks and ruff formatting for commit-hooks

This commit is contained in:
Sanketh Kumar
2024-10-19 09:43:17 +05:30
parent b854ab4737
commit 744dad339d
26 changed files with 635 additions and 393 deletions

View File

@@ -1,4 +1,3 @@
import os
import re
import json
import jsonlines
@@ -9,28 +8,28 @@ from openai import OpenAI
def batch_eval(query_file, result1_file, result2_file, output_file_path):
client = OpenAI()
with open(query_file, 'r') as f:
with open(query_file, "r") as f:
data = f.read()
queries = re.findall(r'- Question \d+: (.+)', data)
queries = re.findall(r"- Question \d+: (.+)", data)
with open(result1_file, 'r') as f:
with open(result1_file, "r") as f:
answers1 = json.load(f)
answers1 = [i['result'] for i in answers1]
answers1 = [i["result"] for i in answers1]
with open(result2_file, 'r') as f:
with open(result2_file, "r") as f:
answers2 = json.load(f)
answers2 = [i['result'] for i in answers2]
answers2 = [i["result"] for i in answers2]
requests = []
for i, (query, answer1, answer2) in enumerate(zip(queries, answers1, answers2)):
sys_prompt = f"""
sys_prompt = """
---Role---
You are an expert tasked with evaluating two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
"""
prompt = f"""
You will evaluate two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
You will evaluate two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
- **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question?
- **Diversity**: How varied and rich is the answer in providing different perspectives and insights on the question?
@@ -69,7 +68,6 @@ def batch_eval(query_file, result1_file, result2_file, output_file_path):
}}
"""
request_data = {
"custom_id": f"request-{i+1}",
"method": "POST",
@@ -78,22 +76,21 @@ def batch_eval(query_file, result1_file, result2_file, output_file_path):
"model": "gpt-4o-mini",
"messages": [
{"role": "system", "content": sys_prompt},
{"role": "user", "content": prompt}
{"role": "user", "content": prompt},
],
}
},
}
requests.append(request_data)
with jsonlines.open(output_file_path, mode='w') as writer:
with jsonlines.open(output_file_path, mode="w") as writer:
for request in requests:
writer.write(request)
print(f"Batch API requests written to {output_file_path}")
batch_input_file = client.files.create(
file=open(output_file_path, "rb"),
purpose="batch"
file=open(output_file_path, "rb"), purpose="batch"
)
batch_input_file_id = batch_input_file.id
@@ -101,12 +98,11 @@ def batch_eval(query_file, result1_file, result2_file, output_file_path):
input_file_id=batch_input_file_id,
endpoint="/v1/chat/completions",
completion_window="24h",
metadata={
"description": "nightly eval job"
}
metadata={"description": "nightly eval job"},
)
print(f'Batch {batch.id} has been created.')
print(f"Batch {batch.id} has been created.")
if __name__ == "__main__":
batch_eval()
batch_eval()

View File

@@ -1,9 +1,8 @@
import os
from openai import OpenAI
# os.environ["OPENAI_API_KEY"] = ""
def openai_complete_if_cache(
model="gpt-4o-mini", prompt=None, system_prompt=None, history_messages=[], **kwargs
) -> str:
@@ -47,10 +46,10 @@ if __name__ == "__main__":
...
"""
result = openai_complete_if_cache(model='gpt-4o-mini', prompt=prompt)
result = openai_complete_if_cache(model="gpt-4o-mini", prompt=prompt)
file_path = f"./queries.txt"
file_path = "./queries.txt"
with open(file_path, "w") as file:
file.write(result)
print(f"Queries written to {file_path}")
print(f"Queries written to {file_path}")

View File

@@ -122,4 +122,4 @@ print("\nResult (Global):")
print(rag.query(query_text, param=QueryParam(mode="global")))
print("\nResult (Hybrid):")
print(rag.query(query_text, param=QueryParam(mode="hybrid")))
print(rag.query(query_text, param=QueryParam(mode="hybrid")))

View File

@@ -20,13 +20,11 @@ rag = LightRAG(
llm_model_func=bedrock_complete,
llm_model_name="Anthropic Claude 3 Haiku // Amazon Bedrock",
embedding_func=EmbeddingFunc(
embedding_dim=1024,
max_token_size=8192,
func=bedrock_embedding
)
embedding_dim=1024, max_token_size=8192, func=bedrock_embedding
),
)
with open("./book.txt", 'r', encoding='utf-8') as f:
with open("./book.txt", "r", encoding="utf-8") as f:
rag.insert(f.read())
for mode in ["naive", "local", "global", "hybrid"]:
@@ -34,8 +32,5 @@ for mode in ["naive", "local", "global", "hybrid"]:
print(f"| {mode.capitalize()} |")
print("+-" + "-" * len(mode) + "-+\n")
print(
rag.query(
"What are the top themes in this story?",
param=QueryParam(mode=mode)
)
rag.query("What are the top themes in this story?", param=QueryParam(mode=mode))
)

View File

@@ -1,10 +1,9 @@
import os
import sys
from lightrag import LightRAG, QueryParam
from lightrag.llm import hf_model_complete, hf_embedding
from lightrag.utils import EmbeddingFunc
from transformers import AutoModel,AutoTokenizer
from transformers import AutoModel, AutoTokenizer
WORKING_DIR = "./dickens"
@@ -13,16 +12,20 @@ if not os.path.exists(WORKING_DIR):
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=hf_model_complete,
llm_model_name='meta-llama/Llama-3.1-8B-Instruct',
llm_model_func=hf_model_complete,
llm_model_name="meta-llama/Llama-3.1-8B-Instruct",
embedding_func=EmbeddingFunc(
embedding_dim=384,
max_token_size=5000,
func=lambda texts: hf_embedding(
texts,
tokenizer=AutoTokenizer.from_pretrained("sentence-transformers/all-MiniLM-L6-v2"),
embed_model=AutoModel.from_pretrained("sentence-transformers/all-MiniLM-L6-v2")
)
texts,
tokenizer=AutoTokenizer.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
embed_model=AutoModel.from_pretrained(
"sentence-transformers/all-MiniLM-L6-v2"
),
),
),
)
@@ -31,13 +34,21 @@ with open("./book.txt") as f:
rag.insert(f.read())
# Perform naive search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)

View File

@@ -11,15 +11,12 @@ if not os.path.exists(WORKING_DIR):
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete,
llm_model_name='your_model_name',
llm_model_func=ollama_model_complete,
llm_model_name="your_model_name",
embedding_func=EmbeddingFunc(
embedding_dim=768,
max_token_size=8192,
func=lambda texts: ollama_embedding(
texts,
embed_model="nomic-embed-text"
)
func=lambda texts: ollama_embedding(texts, embed_model="nomic-embed-text"),
),
)
@@ -28,13 +25,21 @@ with open("./book.txt") as f:
rag.insert(f.read())
# Perform naive search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)

View File

@@ -6,10 +6,11 @@ from lightrag.utils import EmbeddingFunc
import numpy as np
WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
os.mkdir(WORKING_DIR)
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
@@ -20,17 +21,19 @@ async def llm_model_func(
history_messages=history_messages,
api_key=os.getenv("UPSTAGE_API_KEY"),
base_url="https://api.upstage.ai/v1/solar",
**kwargs
**kwargs,
)
async def embedding_func(texts: list[str]) -> np.ndarray:
return await openai_embedding(
texts,
model="solar-embedding-1-large-query",
api_key=os.getenv("UPSTAGE_API_KEY"),
base_url="https://api.upstage.ai/v1/solar"
base_url="https://api.upstage.ai/v1/solar",
)
# function test
async def test_funcs():
result = await llm_model_func("How are you?")
@@ -39,6 +42,7 @@ async def test_funcs():
result = await embedding_func(["How are you?"])
print("embedding_func: ", result)
asyncio.run(test_funcs())
@@ -46,10 +50,8 @@ rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
embedding_func=EmbeddingFunc(
embedding_dim=4096,
max_token_size=8192,
func=embedding_func
)
embedding_dim=4096, max_token_size=8192, func=embedding_func
),
)
@@ -57,13 +59,21 @@ with open("./book.txt") as f:
rag.insert(f.read())
# Perform naive search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)

View File

@@ -1,9 +1,7 @@
import os
import sys
from lightrag import LightRAG, QueryParam
from lightrag.llm import gpt_4o_mini_complete, gpt_4o_complete
from transformers import AutoModel,AutoTokenizer
from lightrag.llm import gpt_4o_mini_complete
WORKING_DIR = "./dickens"
@@ -12,7 +10,7 @@ if not os.path.exists(WORKING_DIR):
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=gpt_4o_mini_complete
llm_model_func=gpt_4o_mini_complete,
# llm_model_func=gpt_4o_complete
)
@@ -21,13 +19,21 @@ with open("./book.txt") as f:
rag.insert(f.read())
# Perform naive search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="naive")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="naive"))
)
# Perform local search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="local")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="local"))
)
# Perform global search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="global")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="global"))
)
# Perform hybrid search
print(rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid")))
print(
rag.query("What are the top themes in this story?", param=QueryParam(mode="hybrid"))
)