-
Notifications
You must be signed in to change notification settings - Fork 0
/
YoutubeDNN.py
177 lines (140 loc) · 8.91 KB
/
YoutubeDNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
import collections
import pickle
import random
import faiss
import numpy as np
from deepctr.feature_column import SparseFeat, VarLenSparseFeat
from deepmatch.models import YoutubeDNN
from deepmatch.utils import sampledsoftmaxloss
from keras import Model
from keras.src.utils import pad_sequences
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from dataset import save_path
from multi_recall import user_multi_recall_dict, metric_recall, all_click_df
from utils import get_hist_and_last_click, metrics_recall
def gen_data_set(data, negsample=1):
data.sort_values("click_timestamp", inplace=True)
item_ids = data['click_article_id'].unique()
train_set = []
test_set = []
for reviewerID, hist in tqdm(data.groupby('user_id')):
pos_list = hist['click_article_id'].tolist()
if negsample > 0:
candidate_set = list(set(item_ids) - set(pos_list)) # 用户没看过的文章里面选择负样本
neg_list = np.random.choice(candidate_set, size=len(pos_list) * negsample, replace=True) # 对于每个正样本,选择n个负样本
# 长度只有一个的时候,需要把这条数据也放到训练集中,不然的话最终学到的embedding就会有缺失
if len(pos_list) == 1:
train_set.append((reviewerID, [pos_list[0]], pos_list[0], 1, len(pos_list)))
test_set.append((reviewerID, [pos_list[0]], pos_list[0], 1, len(pos_list)))
# 滑窗构造正负样本
for i in range(1, len(pos_list)):
hist = pos_list[:i]
if i != len(pos_list) - 1:
train_set.append((reviewerID, hist[::-1], pos_list[i], 1,
len(hist[::-1]))) # 正样本 [user_id, his_item, pos_item, label, len(his_item)]
for negi in range(negsample):
train_set.append((reviewerID, hist[::-1], neg_list[i * negsample + negi], 0,
len(hist[::-1]))) # 负样本 [user_id, his_item, neg_item, label, len(his_item)]
else:
# 将最长的那一个序列长度作为测试数据
test_set.append((reviewerID, hist[::-1], pos_list[i], 1, len(hist[::-1])))
random.shuffle(train_set)
random.shuffle(test_set)
return train_set, test_set
# 将输入的数据进行padding,使得序列特征的长度都一致
def gen_model_input(train_set, seq_max_len):
train_uid = np.array([line[0] for line in train_set])
train_seq = [line[1] for line in train_set]
train_iid = np.array([line[2] for line in train_set])
train_label = np.array([line[3] for line in train_set])
train_hist_len = np.array([line[4] for line in train_set])
train_seq_pad = pad_sequences(train_seq, maxlen=seq_max_len, padding='post', truncating='post', value=0)
train_model_input = {"user_id": train_uid, "click_article_id": train_iid, "hist_article_id": train_seq_pad,
"hist_len": train_hist_len}
return train_model_input, train_label
def youtubednn_u2i_dict(data, topk=20):
sparse_features = ["click_article_id", "user_id"]
SEQ_LEN = 30 # 用户点击序列的长度,短的填充,长的截断
user_profile_ = data[["user_id"]].drop_duplicates('user_id')
item_profile_ = data[["click_article_id"]].drop_duplicates('click_article_id')
# 类别编码
features = ["click_article_id", "user_id"]
feature_max_idx = {}
for feature in features:
lbe = LabelEncoder()
data[feature] = lbe.fit_transform(data[feature])
feature_max_idx[feature] = data[feature].max() + 1
# 提取user和item的画像
user_profile = data[["user_id"]].drop_duplicates('user_id')
item_profile = data[["click_article_id"]].drop_duplicates('click_article_id')
user_index_2_rawid = dict(zip(user_profile['user_id'], user_profile_['user_id']))
item_index_2_rawid = dict(zip(item_profile['click_article_id'], item_profile_['click_article_id']))
# 划分训练和测试集
# 由于深度学习需要的数据量通常都是非常大的,所以为了保证召回的效果,往往会通过滑窗的形式扩充训练样本
train_set, test_set = gen_data_set(data, 0)
# 输入数据
train_model_input, train_label = gen_model_input(train_set, user_profile, SEQ_LEN)
test_model_input, test_label = gen_model_input(test_set, user_profile, SEQ_LEN)
# 确定Embedding的维度
embedding_dim = 16
# 将数据整理成模型可以直接输入的形式
user_feature_columns = [SparseFeat('user_id', feature_max_idx['user_id'], embedding_dim),
VarLenSparseFeat(
SparseFeat('hist_article_id', feature_max_idx['click_article_id'], embedding_dim,
embedding_name="click_article_id"), SEQ_LEN, 'mean', 'hist_len'), ]
item_feature_columns = [SparseFeat('click_article_id', feature_max_idx['click_article_id'], embedding_dim)]
# 模型的定义
# num_sampled: 负采样时的样本数量
model = YoutubeDNN(user_feature_columns, item_feature_columns, num_sampled=5,
user_dnn_hidden_units=(64, embedding_dim))
# 模型编译
model.compile(optimizer="adam", loss=sampledsoftmaxloss)
# 模型训练,这里可以定义验证集的比例,如果设置为0的话就是全量数据直接进行训练
history = model.fit(train_model_input, train_label, batch_size=256, epochs=1, verbose=1, validation_split=0.0)
# 训练完模型之后,提取训练的Embedding,包括user端和item端
test_user_model_input = test_model_input
all_item_model_input = {"click_article_id": item_profile['click_article_id'].values}
user_embedding_model = Model(inputs=model.user_input, outputs=model.user_embedding)
item_embedding_model = Model(inputs=model.item_input, outputs=model.item_embedding)
# 保存当前的item_embedding 和 user_embedding 排序的时候可能能够用到,但是需要注意保存的时候需要和原始的id对应
user_embs = user_embedding_model.predict(test_user_model_input, batch_size=2 ** 12)
item_embs = item_embedding_model.predict(all_item_model_input, batch_size=2 ** 12)
# embedding保存之前归一化一下
user_embs = user_embs / np.linalg.norm(user_embs, axis=1, keepdims=True)
item_embs = item_embs / np.linalg.norm(item_embs, axis=1, keepdims=True)
# 将Embedding转换成字典的形式方便查询
raw_user_id_emb_dict = {user_index_2_rawid[k]: v for k, v in zip(user_profile['user_id'], user_embs)}
raw_item_id_emb_dict = {item_index_2_rawid[k]: v for k, v in zip(item_profile['click_article_id'], item_embs)}
# 将Embedding保存到本地
pickle.dump(raw_user_id_emb_dict, open(save_path + 'user_youtube_emb.pkl', 'wb'))
pickle.dump(raw_item_id_emb_dict, open(save_path + 'item_youtube_emb.pkl', 'wb'))
# faiss紧邻搜索,通过user_embedding 搜索与其相似性最高的topk个item
index = faiss.IndexFlatIP(embedding_dim)
# 上面已经进行了归一化,这里可以不进行归一化了
# faiss.normalize_L2(user_embs)
# faiss.normalize_L2(item_embs)
index.add(item_embs) # 将item向量构建索引
sim, idx = index.search(np.ascontiguousarray(user_embs), topk) # 通过user去查询最相似的topk个item
user_recall_items_dict = collections.defaultdict(dict)
for target_idx, sim_value_list, rele_idx_list in tqdm(zip(test_user_model_input['user_id'], sim, idx)):
target_raw_id = user_index_2_rawid[target_idx]
# 从1开始是为了去掉商品本身, 所以最终获得的相似商品只有topk-1
for rele_idx, sim_value in zip(rele_idx_list[1:], sim_value_list[1:]):
rele_raw_id = item_index_2_rawid[rele_idx]
user_recall_items_dict[target_raw_id][rele_raw_id] = user_recall_items_dict.get(target_raw_id, {}).get(rele_raw_id, 0) + sim_value
# 将召回的结果进行排序
user_recall_items_dict = {k: sorted(v.items(), key=lambda x: x[1], reverse=True) for k, v in
user_recall_items_dict.items()}
# 保存召回的结果
pickle.dump(user_recall_items_dict, open(save_path + 'youtubednn_recall_dict.pkl', 'wb'))
return user_recall_items_dict
if __name__ == '__main__':
# 由于这里需要做召回评估,所以把训练集中的最后一次点击都提取了出来
if not metric_recall:
user_multi_recall_dict['youtubednn_recall'] = youtubednn_u2i_dict(all_click_df, topk=20)
else:
trn_hist_click_df, trn_last_click_df = get_hist_and_last_click(all_click_df)
user_multi_recall_dict['youtubednn_recall'] = youtubednn_u2i_dict(trn_hist_click_df, topk=20)
# 召回效果评估
metrics_recall(user_multi_recall_dict['youtubednn_recall'], trn_last_click_df, topk=20)