-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathEvaluate-LSTM-on-test.py
60 lines (48 loc) · 2.51 KB
/
Evaluate-LSTM-on-test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import tensorflow as tf
from tensorflow import keras
import numpy as np
from tensorflow.keras import layers
from tensorflow.keras.utils import plot_model
import os
import time
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, TimeDistributed, Flatten, Concatenate, LSTM, MaxPool2D, Reshape, Conv2DTranspose, BatchNormalization, UpSampling2D, Cropping2D, Conv2D
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras import backend as K
import pydot as pydot
from PIL import Image
import pandas as pd
import csv
architecture_ID = "O-R"
Z_test = np.load('C:/Users/MrLin/Documents/Experiments/Deep Video Embedding/Results Z2V/Turf Valley/Embedding data/Z_test_last-' + architecture_ID + '.npy')
T_test = len(Z_test)
l = 75
latent_dim=3
batch_size = 1
start_frame_test = l - 1 # first frame inded in the valid range
end_frame_test = T_test - 2 # last frame index in the valid range, assuming images start at t=0 and go to t=T-1
samples_test = end_frame_test - start_frame_test + 1
print(samples_test)
def inputs_generator_inference_latent_LSTM(t_start, t_end, Z):
t = t_start
while t <= t_end: #now we only predict up to the 2nd to last frame since that is the last frame at which the true v is computable
ZSegment = Z[t - l + 1:t + 1, :]
yield (ZSegment) # 0 represents the target output of the model. the metric in .compile is computed using this. the addloss layer outputs the loss itself just for convienience.
t += 1
ds_test = tf.data.Dataset.from_generator(
inputs_generator_inference_latent_LSTM,
args=[start_frame_test, end_frame_test, Z_test],
output_types=(tf.float32),
output_shapes=(l, latent_dim))
ds_test = ds_test.batch(batch_size, drop_remainder=False)
v = Z_test[start_frame_test+1:T_test, :] - Z_test[start_frame_test:T_test-1, :]
full_model = keras.models.load_model('C:/Users/MrLin/Documents/Experiments/Deep Video Embedding/saved models/Turf Valley Z2V/DVE/last-' + architecture_ID) # Full model
detached_LSTM_input = Input((75, 3), name='detached_LSTM_input')
w = full_model.layers[6](detached_LSTM_input)
vhat = full_model.layers[8](w)
model = Model(detached_LSTM_input, vhat, name='joint_LSTM')
vhat = model.predict(ds_test)
print(f"v shape {v.shape}, vhat shape {vhat.shape}")
MSE = np.mean(np.linalg.norm(v - vhat, axis=1)**2)
CosineSim = np.mean(tf.keras.losses.cosine_similarity(vhat, v, axis=-1).numpy())
print(f"MSE {MSE} Cos {CosineSim}")