I have the code using keras 1.2 and tensorflow 1.1. I have run it but with error
import numpy as np
import keras
from keras import backend as K
from keras import initializers
from keras.models import Sequential, Model, load_model, save_model
from keras.layers.core import Dense, Lambda, Activation
from keras.layers import Embedding, Input, Dense, Multiply, Reshape, Flatten
from keras.optimizers import Adagrad, Adam, SGD, RMSprop
from keras.regularizers import l2
from sklearn.metrics import average_precision_score
from sklearn.metrics import  auc
def init_normal(shape, name=None):
    return initializers.lecun_uniform(seed=None)
def get_model(num_a, num_b, num_c, dim, regs=[0,0,0]):
    a = Input(shape=(1,), dtype='int32', name = 'a')
    b = Input(shape=(1,), dtype='int32', name = 'b')
    c = Input(shape=(1,), dtype='int32', name = 'c')
    Embedding_a = Embedding(input_dim = num_a, output_dim = dim,
                              embeddings_initializer='uniform', W_regularizer = l2(regs[0]), input_length=1)
    Embedding_b = Embedding(input_dim = num_b, output_dim = dim,
                              embeddings_initializer='uniform', W_regularizer = l2(regs[1]), input_length=1)   
    Embedding_c = Embedding(input_dim = num_c, output_dim = dim,
                              embeddings_initializer='uniform', W_regularizer = l2(regs[2]), input_length=1)  
    a_latent = Flatten()(Embedding_a(a))
    b_latent = Flatten()(Embedding_b(b))
    c_latent = Flatten()(Embedding_c(c))
    predict_vector = Multiply()([a_latent, b_latent, b_latent])
    prediction = Dense(1, activation='sigmoid', init='lecun_uniform', name = 'prediction')(predict_vector)
    model = Model(input=[a, b, c], output=prediction)
    return model
def evaluate_model(model, test_pos, test_neg):
    global _model
    global _test_pos
    global _test_neg
    _model = model
    _test_pos = test_pos
    _test_neg = test_neg
    print(_test_neg)
    a, b, c, labels = [],[],[],[]
    for item in _test_pos:
        a.append(item[0])
        b.append(item[1])
        c.append(item[2])
        labels.append(1)
    for item in _test_neg:
        a.append(item[0])
        b.append(item[1])
        c.append(item[2])
        labels.append(0)
    a = np.array(a)
    b = np.array(b)
    c = np.array(c)
    predictions = _model.predict([a, b, c], 
                             batch_size=100, verbose=0)
    return average_precision_score(labels, predictions), auc(labels, predictions)
model = get_model(4, 8, 12, 2, [0,0,0])
model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy')
pos_test = [[0, 0, 2], [4, 8, 8], [2, 5, 4], [0, 0, 0]]
neg_test = [[3, 3, 2], [2, 1, 8], [1, 4, 1], [3, 3, 12]]
aupr, auc = evaluate_model(model, pos_test, neg_test)
print(aupr, auc)
However, It give me error:any way to fix it?
InvalidArgumentError (see above for traceback): indices[1,0] = 4 is not in [0, 4)
     [[Node: embedding_4/embedding_lookup = Gather[Tindices=DT_INT32, Tparams=DT_FLOAT, _class=["loc:@embedding_4/embeddings"], validate_indices=true, _device="/job:localhost/replica:0/task:0/cpu:0"](embedding_4/embeddings/read, _recv_a_1_0)]]
The problem is, you defined embedding input_dim as 4, 8 and 12 while it should be is 5, 9, 13. Because input_dim in embedding should be max_index + 1.  It is also clearly mentioned in Keras docs:
Size of the vocabulary, i.e. maximum integer index + 1.
How to fix the issue?
Change get_model method to:
model = get_model(5, 9, 13, 2, [0, 0, 0])
Or alternatively change index of data to:
pos_test = [[0, 0, 2], [3, 7, 7], [2, 5, 4], [0, 0, 0]]
neg_test = [[3, 3, 2], [2, 1, 7], [1, 4, 1], [3, 3, 11]]
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With