forked from siqi13579/ctr_Keras
-
Notifications
You must be signed in to change notification settings - Fork 0
/
nfm_sum_multiply.py
84 lines (79 loc) · 3.04 KB
/
nfm_sum_multiply.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import os
import time
import numpy as np
import pandas as pd
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, Reshape, Lambda, concatenate, dot, add
from keras.layers import Dropout, GaussianDropout, multiply, SpatialDropout1D, BatchNormalization
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from keras.optimizers import Adam
print('nfm_sum_multiply')
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
file_path = "nfm_sum_multiply.h5"
batch_size = 256
epochs = 1
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
numer_columns = [col for col in train.columns if 'numer' in col]
cat_columns = [col for col in train.columns if 'cat' in col]
y_train = train['label'].values
y_test = test['label'].values
# ----------------------------------model-------------
def base_model(cat_columns, train, test):
cat_num = len(cat_columns)
field_cnt = cat_num
cat_field_input = []
field_embedding = []
lr_embedding = []
for cat in cat_columns:
input = Input(shape=(1,))
cat_field_input.append(input)
nums = pd.concat((train[cat], test[cat])).max() + 1
# fm embeddings
field = []
embed = Embedding(nums, 10, input_length=1, trainable=True)(input)
reshape = Reshape((10,))(embed)
field_embedding.append(reshape)
# fm embeddings
#######fm layer##########
inner_product = []
for i in range(field_cnt):
for j in range(i + 1, field_cnt):
tmp = multiply([field_embedding[i], field_embedding[j]])
inner_product.append(tmp)
#######dnn layer##########
embed_layer = add(inner_product)
embed_layer = Dense(64)(embed_layer)
embed_layer = BatchNormalization()(embed_layer)
embed_layer = Activation('relu')(embed_layer)
embed_layer = Dense(64)(embed_layer)
embed_layer = BatchNormalization()(embed_layer)
embed_layer = Activation('relu')(embed_layer)
embed_layer = Dense(1)(embed_layer)
########linear layer##########
preds = Activation('sigmoid')(embed_layer)
opt = Adam(0.001)
model = Model(inputs=cat_field_input, outputs=preds)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['acc'])
return model
# training########################################3
cols = cat_columns + numer_columns
x_train = train[cols].values
x_test = test[cols].values
x_train = list(x_train.T)
x_test = list(x_test.T)
early = EarlyStopping(monitor="val_loss", patience=2)
checkpoint = ModelCheckpoint(
file_path, save_weights_only=True, verbose=1, save_best_only=True)
callbacks_list = [early, checkpoint] # early
model = base_model(cols, train, test)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test), callbacks=callbacks_list, shuffle=False)