-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmixture_network.py
92 lines (71 loc) · 2.81 KB
/
mixture_network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
from __future__ import absolute_import
from __future__ import print_function
from autograd import grad
from math import sqrt
import autograd.numpy.random as npr
import autograd.numpy as np
import tensorflow as tf
import os
def neural_net(x, weights, biases):
# Create mixture network
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
#layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer
def predict(nn, inputs, x, init, model_path):
saver = tf.train.Saver()
# start the second session
with tf.Session() as sess:
# Initialize variables
sess.run(init)
# Restore model weights from previously saved model
saver.restore(sess, model_path)
ypred = sess.run(nn, {x:inputs})
ypred = (ypred).astype(int)
return ypred
def train_mixture_network(inputs, targets, num_input, num_output):
learning_rate = 0.0001
epochs = 5000
# batch_size = 25
model_path = "/tmp/model.ckpt"
display_step = 10
num_hidden1 = 40
# num_hidden2 = 30
print("Num input:", num_input)
x = tf.placeholder("float32", [None, num_input], name='x') # create a tensor with any number of rows and obs dim inputs
y = tf.placeholder("float32", [None, num_output], name='y')
# Initialise weights
weights = {
'h1': tf.Variable(tf.random_normal([num_input, num_hidden1])),
#'h2': tf.Variable(tf.random_normal([num_hidden1, num_hidden2])),
'out': tf.Variable(tf.random_normal([num_hidden1, num_output]))
}
# Initialise biases
biases = {
'b1': tf.Variable(tf.random_normal([num_hidden1])),
#'b2': tf.Variable(tf.random_normal([num_hidden2])),
'out': tf.Variable(tf.random_normal([num_output]))
}
pred = neural_net(x, weights, biases)
# Define loss and optimiser
mse = tf.losses.mean_squared_error(pred, targets)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(mse)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Start a new TF session
with tf.Session() as sess:
# Run the initialiser
sess.run(init)
cost = 0
for epoch in range(epochs):
_ , c = sess.run([optimizer, mse], feed_dict={x:inputs, y:targets})
cost += c
#if epoch % 50 == 0:
#print('Epoch: ', (epoch+1), 'cost=', '{:.3f}'.format(cost))
#print(sqrt(sess.run(mse, feed_dict={x:inputs, y:obs_targets})))
# Save model to disk
save_path = saver.save(sess, model_path)
ypred = predict(pred, inputs, x, init, save_path)
return pred, x, init, model_path