|
- import torch
-
- from models import THHGR, util
- from argparams import get_args
- from data_process.util import DataHelper
- from data_process.eval_data import EvaluateDataset
-
- import tqdm
- import random
- import numpy
- import heapq
- from sklearn.metrics import f1_score
-
-
-
-
- def evaluate(
- model: THHGR.TransformerHHGR,
- tokenizer,
- g_eval,
- all_user,
- all_group,
- eval_gu,
- h_ul_coarse,
- h_ul_fine,
- h_gl_eval,
- group_dict,
- user_dict,
- k
- ):
- print ("evaluating the gorup-level learning")
-
- all_user_embedding = model.to_embedding(all_user, tokenizer).detach()
- fine_user_embedding = all_user_embedding
- user_embeds_coarse = model.hgcn_coarse(all_user_embedding, h_ul_coarse)
- user_embeds_fine = model.hgcn_fine(fine_user_embedding, h_ul_fine)
- user_embeds_ul = user_embeds_coarse + user_embeds_fine
-
- all_group_embeds = model.to_embedding(all_group, tokenizer)
- group_embeddings = model.hgcn_gl(all_group_embeds, h_gl_eval)
-
- hrs, ndcgs = [], []
-
- with tqdm.tqdm(enumerate(g_eval), total=len(g_eval)) as eval_tqdm:
- for idx, (pos_pair, neg_items) in eval_tqdm:
- group = pos_pair[0]
- pos_item = pos_pair[1]
-
- items = neg_items
- items.append(pos_item)
- random.shuffle(items)
-
- groups_in = [group] * len(items)
-
- preds = model("group",
- **{"group_inputs": groups_in, "item_inputs": items, "user_embedding": user_embeds_ul, "group_embedding": group_embeddings,
- "group_users": eval_gu, "group_dict": group_dict, "user_dict": user_dict, "tokenizer": tokenizer}
- )
- preds_list = preds.cpu().squeeze(1).data.numpy().tolist()
- item_scores = {}
- for idx, item in enumerate(items):
- item_scores[item] = preds_list[idx]
-
- rank = heapq.nlargest(k, item_scores, key=item_scores.get)
- hr = util.get_HitRatio(rank, pos_item)
- ndcg = util.get_NDCG(rank, pos_item)
-
- hrs.append(hr)
- ndcgs.append(ndcg)
-
- micro_f1 = f1_score([1] * len(hrs), hrs, average="micro")
- macro_f1 = f1_score([1] * len(hrs), hrs, average="macro")
-
- eval_tqdm.write("k {:^3d} | micro f1 {:^7.4f} | macro f1 {:^7.4f}".format(k, micro_f1, macro_f1))
-
- return
-
-
-
- def CloudBrainEvaluate():
- args = get_args()
-
- model = torch.load("/model/thhgr.pth")
-
- data_helper = DataHelper(args.dataset)
- geval = EvaluateDataset(args.dataset, data_helper.num_items, args.num_negs, data_helper.item_dict)
-
- return
|