update reproduce
This commit is contained in:
114
README.md
114
README.md
@@ -149,7 +149,6 @@ Output your evaluation in the following JSON format:
|
||||
}}
|
||||
```
|
||||
### Overall Performance Table
|
||||
### Overall Performance Table
|
||||
| | **Agriculture** | | **CS** | | **Legal** | | **Mix** | |
|
||||
|----------------------|-------------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|-----------------------|
|
||||
| | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** | NaiveRAG | **LightRAG** |
|
||||
@@ -173,6 +172,114 @@ Output your evaluation in the following JSON format:
|
||||
| **Empowerment** | 36.69% | **63.31%** | 45.09% | **54.91%** | 42.81% | **57.19%** | **52.94%** | 47.06% |
|
||||
| **Overall** | 43.62% | **56.38%** | 45.98% | **54.02%** | 45.70% | **54.30%** | **51.86%** | 48.14% |
|
||||
|
||||
## Reproduce
|
||||
All the code can be found in the `./reproduce` directory.
|
||||
### Step-0 Extract Unique Contexts
|
||||
First, we need to extract unique contexts in the datasets.
|
||||
```python
|
||||
def extract_unique_contexts(input_directory, output_directory):
|
||||
|
||||
os.makedirs(output_directory, exist_ok=True)
|
||||
|
||||
jsonl_files = glob.glob(os.path.join(input_directory, '*.jsonl'))
|
||||
print(f"Found {len(jsonl_files)} JSONL files.")
|
||||
|
||||
for file_path in jsonl_files:
|
||||
filename = os.path.basename(file_path)
|
||||
name, ext = os.path.splitext(filename)
|
||||
output_filename = f"{name}_unique_contexts.json"
|
||||
output_path = os.path.join(output_directory, output_filename)
|
||||
|
||||
unique_contexts_dict = {}
|
||||
|
||||
print(f"Processing file: {filename}")
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as infile:
|
||||
for line_number, line in enumerate(infile, start=1):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
json_obj = json.loads(line)
|
||||
context = json_obj.get('context')
|
||||
if context and context not in unique_contexts_dict:
|
||||
unique_contexts_dict[context] = None
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"JSON decoding error in file {filename} at line {line_number}: {e}")
|
||||
except FileNotFoundError:
|
||||
print(f"File not found: {filename}")
|
||||
continue
|
||||
except Exception as e:
|
||||
print(f"An error occurred while processing file {filename}: {e}")
|
||||
continue
|
||||
|
||||
unique_contexts_list = list(unique_contexts_dict.keys())
|
||||
print(f"There are {len(unique_contexts_list)} unique `context` entries in the file {filename}.")
|
||||
|
||||
try:
|
||||
with open(output_path, 'w', encoding='utf-8') as outfile:
|
||||
json.dump(unique_contexts_list, outfile, ensure_ascii=False, indent=4)
|
||||
print(f"Unique `context` entries have been saved to: {output_filename}")
|
||||
except Exception as e:
|
||||
print(f"An error occurred while saving to the file {output_filename}: {e}")
|
||||
|
||||
print("All files have been processed.")
|
||||
|
||||
```
|
||||
### Step-1 Insert Contexts
|
||||
For the extracted contexts, we insert them into the LightRAG system.
|
||||
|
||||
```python
|
||||
def insert_text(rag, file_path):
|
||||
with open(file_path, mode='r') as f:
|
||||
unique_contexts = json.load(f)
|
||||
|
||||
retries = 0
|
||||
max_retries = 3
|
||||
while retries < max_retries:
|
||||
try:
|
||||
rag.insert(unique_contexts)
|
||||
break
|
||||
except Exception as e:
|
||||
retries += 1
|
||||
print(f"Insertion failed, retrying ({retries}/{max_retries}), error: {e}")
|
||||
time.sleep(10)
|
||||
if retries == max_retries:
|
||||
print("Insertion failed after exceeding the maximum number of retries")
|
||||
```
|
||||
### Step-2 Generate Queries
|
||||
|
||||
We extract tokens from both the first half and the second half of each context in the dataset, then combine them to generate queries for dataset descriptions.
|
||||
```python
|
||||
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
||||
|
||||
def get_summary(context, tot_tokens=2000):
|
||||
tokens = tokenizer.tokenize(context)
|
||||
half_tokens = tot_tokens // 2
|
||||
|
||||
start_tokens = tokens[1000:1000 + half_tokens]
|
||||
end_tokens = tokens[-(1000 + half_tokens):1000]
|
||||
|
||||
summary_tokens = start_tokens + end_tokens
|
||||
summary = tokenizer.convert_tokens_to_string(summary_tokens)
|
||||
|
||||
return summary
|
||||
```
|
||||
|
||||
### Step-3 Query
|
||||
For the queries generated in Step-2, we will extract them and query LightRAG.
|
||||
```python
|
||||
def extract_queries(file_path):
|
||||
with open(file_path, 'r') as f:
|
||||
data = f.read()
|
||||
|
||||
data = data.replace('**', '')
|
||||
|
||||
queries = re.findall(r'- Question \d+: (.+)', data)
|
||||
|
||||
return queries
|
||||
```
|
||||
## Code Structure
|
||||
|
||||
```python
|
||||
@@ -191,6 +298,11 @@ Output your evaluation in the following JSON format:
|
||||
│ ├── prompt.py
|
||||
│ ├── storage.py
|
||||
│ └── utils.jpeg
|
||||
├── reproduce
|
||||
│ ├── Step_0.py
|
||||
│ ├── Step_1.py
|
||||
│ ├── Step_2.py
|
||||
│ └── Step_3.py
|
||||
├── LICENSE
|
||||
├── README.md
|
||||
├── requirements.txt
|
||||
|
Reference in New Issue
Block a user