forked from teticio/llama-squad
-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_encoder_squad.py
70 lines (57 loc) · 1.95 KB
/
test_encoder_squad.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import csv
import json
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
from tqdm import tqdm
from transformers import HfArgumentParser, pipeline
@dataclass
class ScriptArguments:
model_name: Optional[str] = field(default="deepset/deberta-v3-large-squad2")
batch_size: Optional[int] = field(default=8)
output_csv_file: Optional[str] = field(default="results/results_encoder.csv")
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
dataset = load_dataset("squad_v2")["validation"]
pipe = pipeline(
"question-answering",
model=script_args.model_name,
tokenizer=script_args.model_name,
device=0,
)
def process_batch(batch):
inputs = [
{"question": q, "context": c}
for q, c in zip(batch["question"], batch["context"])
]
outputs = pipe(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
result = {key: [output[key] for output in outputs] for key in outputs[0].keys()}
return result
results = dataset.map(process_batch, batched=True, batch_size=script_args.batch_size)
with open(script_args.output_csv_file, "w") as file:
writer = csv.writer(file)
writer.writerow(
[
"Context",
"Question",
"Correct answers",
"Model answer",
"Exact match",
]
)
for result in tqdm(results):
context = result["context"]
question = result["question"]
answers = result["answers"]["text"]
if len(answers) == 0:
answers = ["?"]
model_answer = "?" if result["score"] < 0.5 else result["answer"].strip()
if model_answer[-1] in [".", ",", ";"]:
model_answer = model_answer[:-1]
exact_match = model_answer in answers
writer.writerow(
[context, question, json.dumps(answers), model_answer, exact_match]
)
file.flush()