import os import sys sys.path.append(os.getcwd()) import pandas as pd import numpy as np from collections import defaultdict from tqdm import tqdm import math import warnings import pickle import collections from datetime import datetime from utils import get_file_size from config import logger,offline_mode from config import shixun_itemcf_i2i_sim_data, need_metric_recall from config import shixun_itemcf_recall_dict from config import shixun_merge_emb_path from config import samples_mode from config import test_user_id from matching.shixun.recall_comm import get_all_select_df from matching.shixun.recall_comm import get_user_info_df, get_item_info_df from matching.shixun.recall_comm import get_user_item_time_dict,get_user_item_dict from matching.shixun.recall_comm import get_cf_hist_and_last_select, get_item_info_dict from matching.shixun.recall_comm import get_item_topk_select, metrics_recall from matching.shixun.item_wordem_i2i_sim import embedding_i2i_sim tqdm.pandas() warnings.filterwarnings('ignore') def itemcf_sim(df, item_created_time_dict): """ 物品相似性矩阵计算,基于物品的协同过滤+关联规则 :param df: 物品行为数据 :param item_created_time_dict: 物品创建时间的字典 :return: 物品与物品的相似性矩阵 """ if os.path.exists(shixun_itemcf_i2i_sim_data) and (get_file_size(shixun_itemcf_i2i_sim_data) > 1): i2i_sim_ = pickle.load(open(shixun_itemcf_i2i_sim_data, 'rb')) return i2i_sim_ print("获取用户选择的item字典属性") user_item_dict = get_user_item_dict(df) # 计算物品相似度 i2i_sim = {} item_cnt = defaultdict(int) for user_id, item_list in tqdm(user_item_dict.items()): # 在基于物品的协同过滤优化的时候考虑时间因素 for loc1, (i, i_visits,i_challenges_count,i_avergestar,i_task,i_select_time) in enumerate(item_list): item_cnt[i] += 1 i2i_sim.setdefault(i, {}) for loc2, (j, j_visits,j_challenges_count,j_avergestar,j_task,j_select_time) in enumerate(item_list): if(i == j) or (i not in item_created_time_dict) or (j not in item_created_time_dict): continue # 考虑物品的正向顺序选择和反向顺序选择 loc_alpha = 1.0 if loc2 > loc1 else 0.7 # 位置信息权重,其中的参数可以调节 loc_weight = loc_alpha * (0.9 ** (np.abs(loc2 - loc1) - 1)) # 选择时间权重,其中的参数可以调节 select_time_weight = loc_alpha*(np.exp(0.7 ** np.abs(i_select_time - j_select_time))) # 两个物品创建时间差的权重,其中的参数可以调节 created_time_weight = loc_alpha*(np.exp(0.8 ** np.abs(item_created_time_dict[i] - item_created_time_dict[j]))) #两个物品访问量差的权重,其中参数可以调节 visit_weight = loc_alpha*(np.exp(0.9 ** np.abs(i_visits - j_visits))) #两个物品挑战数量差的权重,其中参数可以调节 challenges_weight = loc_alpha*(np.exp(0.5 ** np.abs(i_challenges_count - j_challenges_count))) #两个物品平均星数量差的权重,其中参数可以调节 avergestar_weight = loc_alpha*(np.exp(0.8 ** np.abs(i_avergestar - j_avergestar))) #两个物品通过数量差的权重,其中参数可以调节 task_weight = loc_alpha*(np.exp(0.5 ** np.abs(i_task - j_task))) i2i_sim[i].setdefault(j, 0) # 考虑多种因素的权重计算最终的物品之间的相似度 i2i_sim[i][j] += loc_weight * select_time_weight * created_time_weight*visit_weight*challenges_weight*avergestar_weight*task_weight/ math.log(len(item_list) + 1) i2i_sim_ = i2i_sim.copy() # 余弦相似度分母部分计算 for i, related_items in i2i_sim.items(): for j, wij in related_items.items(): i2i_sim_[i][j] = wij / math.sqrt(item_cnt[i] * item_cnt[j]) # 将得到的相似性矩阵保存到本地 pickle.dump(i2i_sim_, open(shixun_itemcf_i2i_sim_data, 'wb')) return i2i_sim_ # 基于物品协同过滤进行召回 def item_based_recommend(user_id, user_item_time_dict, i2i_sim, sim_item_topk, recall_item_num, item_topk_select, item_created_time_dict, emb_i2i_sim): """ 基于物品协同过滤+关联规则的召回 :param user_id: 用户id :param user_item_time_dict: 字典, 根据选择时间获取用户的选择物品序列 {user1: [(item1, time1), (item2, time2)..]...} :param i2i_sim: 字典,物品相似度矩阵 :param sim_item_topk: 整数,选择与当前物品最相似的k个物品 :param recall_item_num: 整数,需要召回的物品数量 :param item_topk_select: 列表,选择次数最多的物品列表,用于召回补全 :param item_created_time_dic: 字典,物品创建时间 :param emb_i2i_sim: 字典,物品embedding相似度矩阵 :return: 召回的物品列表 [(item1, score1), (item2, score2)...] """ # 获取用户历史选择的物品 user_hist_items = user_item_time_dict[user_id] user_hist_items_ = {item_id for item_id, _ in user_hist_items} item_rank = {} for loc, (i, select_time) in enumerate(user_hist_items): for j, wij in sorted(i2i_sim[i].items(), key=lambda x: x[1], reverse=True)[:sim_item_topk]: if j in user_hist_items_: continue # 物品创建时间差权重 created_time_weight = np.exp(0.8 ** np.abs(item_created_time_dict[i] - item_created_time_dict[j])) # 相似物品和历史选择物品序列中历史物品所在的位置权重 loc_weight = (0.9 ** (len(user_hist_items) - loc)) content_weight = 1.0 if emb_i2i_sim.get(i, {}).get(j, None) is not None: content_weight += emb_i2i_sim[i][j] if emb_i2i_sim.get(j, {}).get(i, None) is not None: content_weight += emb_i2i_sim[j][i] item_rank.setdefault(j, 0) item_rank[j] += created_time_weight * loc_weight * content_weight * wij # 不足的用热门物品补全 if len(item_rank) < recall_item_num: for i, item in enumerate(item_topk_select): # 填充的item应该不在原来的列表中 if item in item_rank.items(): continue # 随便给个负数就行 item_rank[item] = - i - 100 # 达到召回的数量 if len(item_rank) == recall_item_num: break item_rank = sorted(item_rank.items(), key=lambda x: x[1], reverse=True)[:recall_item_num] return item_rank def init_itemcf_recall(): """ 初始化召回用到的一些数据 """ global train_hist_select_df global user_item_time_dict global i2i_sim, sim_item_topk global recall_item_num global item_topk_select global item_created_time_dict global emb_i2i_sim global train_last_select_df logger.info("加载物品行为数据") all_select_df = get_all_select_df(offline=offline_mode) logger.info("加载物品属性数据") item_info_df = get_item_info_df() logger.info("获取用户信息数据") users_info_df = get_user_info_df() all_select_df = all_select_df.merge(users_info_df, on='user_id') all_select_df = all_select_df.merge(item_info_df, on='shixun_id') sim_item_topk = 120 recall_item_num = 100 logger.info('生成物品信息字典') item_visists_dict, item_trainee_dict, item_created_time_dict, \ item_averge_star_dict, item_myshixuns_count_dict, item_challenges_count_dict = \ get_item_info_dict(item_info_df) logger.info('生成itemcf相似度矩阵') i2i_sim = itemcf_sim(all_select_df, item_created_time_dict) logger.info('生成物品embedding相似度矩阵') item_emb_df = pd.read_csv(shixun_merge_emb_path, sep='\t', encoding='utf-8') emb_i2i_sim = embedding_i2i_sim(item_emb_df, topk=recall_item_num) # 为了召回评估,提取最后一次选择作为召回评估 # 如果不需要做召回评估直接使用全量的训练集进行召 if need_metric_recall: logger.info('获取物品行为数据历史和最后一次选择') train_hist_select_df, train_last_select_df = get_cf_hist_and_last_select(all_select_df) else: train_hist_select_df = all_select_df logger.info('获取用户选择的物品列表') user_item_time_dict = get_user_item_time_dict(train_hist_select_df) logger.info('获取选择次数最多的物品') item_topk_select = get_item_topk_select(train_hist_select_df, k=recall_item_num) def itemcf_recall(user_id, topk): """ itemcf召回调用接口 """ start_time = datetime.now() logger.info(f"本次需要进行itemcf召回的用户ID: {user_id}") recall_results = {} recall_results.clear() if user_id not in user_item_time_dict: return recall_results recall_results = item_based_recommend(user_id, user_item_time_dict, i2i_sim, topk + (topk // 2), topk, item_topk_select, item_created_time_dict, emb_i2i_sim) # 计算耗时毫秒 end_time = datetime.utcnow() cost_time_millisecond = round(float((end_time - start_time).microseconds / 1000.0), 3) logger.info(f"本次召回耗时: {cost_time_millisecond} 毫秒") return recall_results def itemcf_recall_train(): """ itemcf召回训练和评估 """ # 调用初始化召回用到的一些数据 init_itemcf_recall() # 只在采样模式下计算所有用户的召回数据并进行召回效果评估 # 如果用全量数据计算所有用户的召回数据会非常耗时 if samples_mode == True and need_metric_recall: # 定义召回物品的字典 user_recall_items_dict = collections.defaultdict(dict) logger.info('生成itemcf所有用户的召回列表') for user_id in tqdm(train_hist_select_df['user_id'].unique()): try: user_recall_items_dict[user_id] = item_based_recommend(user_id, user_item_time_dict, i2i_sim, sim_item_topk, recall_item_num, item_topk_select, item_created_time_dict, emb_i2i_sim) except: continue logger.info('保存itemcf召回结果') pickle.dump(user_recall_items_dict, open(shixun_itemcf_recall_dict, 'wb')) logger.info('itemcf召回效果评估') metrics_recall(user_recall_items_dict, train_last_select_df, topk=recall_item_num) if __name__ == '__main__': itemcf_recall_train() recall_results = itemcf_recall(user_id=test_user_id, topk=20) print(recall_results)