|
- import os
- import sys
- import json
- from model_url import get_model_resp, get_url_tokenizer
-
- def run_predict(url, log_path, few_shot = True):
- import numpy as np
- tokenizer = get_url_tokenizer()
- MAIN_DIR = os.path.dirname(os.path.abspath(__file__))
- file_dir = MAIN_DIR + "/task_dataset/COPA/val.jsonl"
- count = 0
- correct_num = 0
- acc = 0
- with open(file_dir, "r", encoding="utf8") as f:
- for line in f.readlines():
- line = json.loads(line)
- premise, choice1, choice2, question, label = line["premise"], line["choice1"], line["choice2"], line[
- "question"], line["label"]
- count += 1
-
- example = f"{premise}\nQuestion: What may be the {question}?\nAnswer:"
- input_str_one = f"{example}{choice1}"
- input_str_two = f"{example}{choice2}"
-
- input_str = []
- input_str.append(input_str_one)
- input_str.append(input_str_two)
- mask_length_list = []
- input_length_list = []
- for pred in input_str:
- input_length_list.append(len(tokenizer.encode(pred)))
- mask_length_list.append(len(tokenizer.encode(example)))
- model_resp = get_model_resp(url=url, input_str=input_str, tokens_to_generate=0, top_k=1, logprobs=True)
- return_resp = []
- for resp_item, input_length, mask_length in zip(model_resp, input_length_list, mask_length_list):
- assert len(resp_item) == input_length - 1
- item = resp_item[mask_length - 1:input_length - 1]
- return_resp.append(item)
-
- pred_list = [sum(logprobs) / len(logprobs) for logprobs in return_resp]
- answers_pred = int(np.argmax(pred_list))
- if answers_pred == int(label):
- correct_num += 1
- acc = correct_num / count
- print(f"copa, 准确率Acc:{acc}, number: {count}")
-
- if not few_shot:
- with open( log_path + '/copa_zeroshot.txt', 'w') as file:
- file.write(f"copa, zero shot , Acc: {acc}, number: {count}")
- else:
- with open(log_path + '/copa_fewshot.txt', 'w') as file:
- file.write(f"copa, few shot , Acc: {acc}, number: {count}")
|