-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
24 lines (18 loc) · 878 Bytes
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
#model using the attention layer.
def model_lstm_atten(embedding_matrix):
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = SpatialDropout1D(0.1)(x)
x = Bidirectional(CuDNNLSTM(40, return_sequences=True))(x)
y = Bidirectional(CuDNNGRU(40, return_sequences=True))(x)
atten_1 = Attention(maxlen)(x) # skip connect
atten_2 = Attention(maxlen)(y)
avg_pool = GlobalAveragePooling1D()(y)
max_pool = GlobalMaxPooling1D()(y)
conc = concatenate([atten_1, atten_2, avg_pool, max_pool])
conc = Dense(16, activation="relu")(conc)
conc = Dropout(0.1)(conc)
outp = Dense(1, activation="sigmoid")(conc)
model = Model(inputs=inp, outputs=outp)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[f1])
return model