Skip to content

Commit

Permalink
Merge branch 'merge_flameletai' of https://github.com/EvertBunschoten…
Browse files Browse the repository at this point in the history
  • Loading branch information
EvertBunschoten committed Aug 1, 2024
2 parents bcc7224 + 750b5a6 commit 7a1fa11
Show file tree
Hide file tree
Showing 2 changed files with 94 additions and 20 deletions.
16 changes: 4 additions & 12 deletions Manifold_Generation/MLP/Trainer_Base.py
Original file line number Diff line number Diff line change
Expand Up @@ -898,9 +898,10 @@ def LoopEpochs(self):
t_start = time.time()
worst_error = 1e32
i = 0
train_batches = tf.data.Dataset.from_tensor_slices((self._X_train_norm, self._Y_train_norm)).batch(2**self._batch_expo)
while (i < self._n_epochs) and self.__keep_training:
train_batches_shuffled = self.SetTrainBatches()
self.LoopBatches(train_batches=train_batches_shuffled)
#train_batches_shuffled = self.SetTrainBatches()
self.LoopBatches(train_batches=train_batches)

val_loss = self.ValidationLoss()

Expand All @@ -911,7 +912,6 @@ def LoopEpochs(self):
worst_error = self.__CheckEarlyStopping(val_loss, worst_error)

self.PrintEpochInfo(i, val_loss)

i += 1
t_end = time.time()
self._train_time = (t_end - t_start)/60
Expand All @@ -922,17 +922,9 @@ def PrintEpochInfo(self, i_epoch, val_loss):
print("Epoch: ", str(i_epoch), " Validation loss: ", str(val_loss.numpy()))
return

#@tf.function
def LoopBatches(self, train_batches):
i_batch=0
for x_norm_batch, y_norm_batch in train_batches:
indices = tf.range(start=0,limit=tf.shape(x_norm_batch)[0],dtype=tf.int32)
shuffled_indices = tf.random.shuffle(indices)
x_batch_shuffled = tf.gather(x_norm_batch, shuffled_indices)
y_batch_shuffled = tf.gather(y_norm_batch, shuffled_indices)

self.Train_Step(x_batch_shuffled, y_batch_shuffled)
i_batch += 1
self.Train_Step(x_norm_batch, y_norm_batch)
return

def ValidationLoss(self):
Expand Down
98 changes: 90 additions & 8 deletions Manifold_Generation/MLP/Trainers_NICFD/Trainers.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,86 @@ def Plot_and_Save_History(self):
self._train_vars = self.__Entropy_var
return

def write_SU2_MLP(self, file_out: str):
"""Write the network to ASCII format readable by the MLPCpp module in SU2.
:param file_out: MLP output path and file name.
:type file_out: str
"""

n_layers = len(self._weights)+1

# Select trimmed weight matrices for output.
weights_for_output = self._weights
biases_for_output = self._biases

# Opening output file
fid = open(file_out+'.mlp', 'w+')
fid.write("<header>\n\n")


# Writing number of neurons per layer
fid.write('[number of layers]\n%i\n\n' % n_layers)
fid.write('[neurons per layer]\n')
activation_functions = []

for iLayer in range(n_layers-1):
if iLayer == 0:
activation_functions.append('linear')
else:
activation_functions.append(self._activation_function_name)
n_neurons = np.shape(weights_for_output[iLayer])[0]
fid.write('%i\n' % n_neurons)
fid.write('%i\n' % len(self._train_vars))

activation_functions.append('linear')

# Writing the activation function for each layer
fid.write('\n[activation function]\n')
for iLayer in range(n_layers):
fid.write(activation_functions[iLayer] + '\n')

# Writing the input and output names
fid.write('\n[input names]\n')
for input in self._controlling_vars:
fid.write(input + '\n')

fid.write('\n[input normalization]\n')
for i in range(len(self._controlling_vars)):
fid.write('%+.16e\t%+.16e\n' % (self._X_min[i], self._X_max[i]))

fid.write('\n[output names]\n')
for output in self.__Entropy_var:
fid.write(output+'\n')

fid.write('\n[output normalization]\n')
fid.write('%+.16e\t%+.16e\n' % (self.__s_min, self.__s_max))

fid.write("\n</header>\n")
# Writing the weights of each layer
fid.write('\n[weights per layer]\n')
for W in weights_for_output:
fid.write("<layer>\n")
for i in range(np.shape(W)[0]):
fid.write("\t".join("%+.16e" % float(w) for w in W[i, :]) + "\n")
fid.write("</layer>\n")

# Writing the biases of each layer
fid.write('\n[biases per layer]\n')

# Input layer biases are set to zero
fid.write("\t".join("%+.16e" % 0 for _ in self._controlling_vars) + "\n")

#for B in self.biases:
for B in biases_for_output:
try:
fid.write("\t".join("%+.16e" % float(b) for b in B.numpy()) + "\n")
except:
fid.write("\t".join("%+.16e" % float(B.numpy())) + "\n")

fid.close()
return

@tf.function
def CollectVariables(self):
"""Define weights and biases as trainable hyper-parameters.
Expand Down Expand Up @@ -181,7 +261,7 @@ def __EntropicEOS(self,rho, dsdrhoe, d2sdrho2e2):
return T, P, c2

@tf.function
def __TD_Evaluation(self, rhoe_norm:tf.Tensor):
def TD_Evaluation(self, rhoe_norm:tf.Tensor):
s, dsdrhoe, d2sdrho2e2 = self.__ComputeEntropyGradients(rhoe_norm)
rho_norm = tf.gather(rhoe_norm, indices=self.__idx_rho, axis=1)
rho = (self.__rho_max - self.__rho_min)*rho_norm + self.__rho_min
Expand Down Expand Up @@ -223,7 +303,7 @@ def __Compute_T_error(self, T_label_norm:tf.Tensor, x_var:tf.constant):
"""

# Evaluate thermodynamic state.
_, T, _, _ = self.__TD_Evaluation(x_var)
_, T, _, _ = self.TD_Evaluation(x_var)

# Normalize reference and predicted temperature.
T_pred_norm = (T - self._Y_min[self.__idx_T]) / (self._Y_max[self.__idx_T] - self._Y_min[self.__idx_T])
Expand All @@ -246,7 +326,7 @@ def __Compute_P_error(self, P_label_norm:tf.Tensor,x_var:tf.constant):
"""

# Evaluate thermodynamic state.
_, _, P, _ = self.__TD_Evaluation(x_var)
_, _, P, _ = self.TD_Evaluation(x_var)

# Normalize reference and predicted pressure.
P_pred_norm = (P - self._Y_min[self.__idx_p]) / (self._Y_max[self.__idx_p] - self._Y_min[self.__idx_p])
Expand All @@ -269,7 +349,7 @@ def __Compute_C2_error(self, C2_label_norm,x_var:tf.constant):
"""

# Evaluate thermodynamic state.
_, _, _, C2 = self.__TD_Evaluation(x_var)
_, _, _, C2 = self.TD_Evaluation(x_var)

# Normalize reference and predicted squared SoS.
C2_pred_norm = (C2 - self._Y_min[self.__idx_c2]) / (self._Y_max[self.__idx_c2] - self._Y_min[self.__idx_c2])
Expand Down Expand Up @@ -368,7 +448,7 @@ def PrintEpochInfo(self, i_epoch, val_loss):
T_val_loss = val_loss[0]
P_val_loss = val_loss[1]
C2_val_loss = val_loss[2]
print("Epoch %i Validation loss Temperature: %.4e, Pressure: %.4e, Speed of sound: %.4e" % (i_epoch, T_val_loss.numpy(), P_val_loss.numpy(), C2_val_loss.numpy()))
print("Epoch %i Validation loss Temperature: %.4e, Pressure: %.4e, Speed of sound: %.4e" % (i_epoch, T_val_loss, P_val_loss, C2_val_loss))

return

Expand All @@ -377,7 +457,7 @@ def __ValidationLoss(self):
# T_val_error = self.__Compute_T_error(self._Y_val_norm[:,self.__idx_T], rhoe_val_norm)
# p_val_error = self.__Compute_P_error(self._Y_val_norm[:,self.__idx_p], rhoe_val_norm)
# c2_val_error = self.__Compute_C2_error(self._Y_val_norm[:,self.__idx_c2], rhoe_val_norm)
_, T_pred_val, P_pred_val, C2_pred_val = self.__TD_Evaluation(rhoe_val_norm)
_, T_pred_val, P_pred_val, C2_pred_val = self.TD_Evaluation(rhoe_val_norm)
T_pred_val_norm = (T_pred_val - self.Temperature_min)/(self.Temperature_max - self.Temperature_min)
P_pred_val_norm = (P_pred_val - self.Pressure_min)/(self.Pressure_max - self.Pressure_min)
C2_pred_val_norm = (C2_pred_val - self.C2_min)/(self.C2_max - self.C2_min)
Expand All @@ -395,7 +475,7 @@ def TestLoss(self):

rhoe_test_norm = tf.constant(self._X_test_norm, self._dt)

_, T_pred_test, P_pred_test, C2_pred_test = self.__TD_Evaluation(rhoe_test_norm)
_, T_pred_test, P_pred_test, C2_pred_test = self.TD_Evaluation(rhoe_test_norm)
T_pred_test_norm = (T_pred_test - self.Temperature_min)/(self.Temperature_max - self.Temperature_min)
P_pred_test_norm = (P_pred_test - self.Pressure_min)/(self.Pressure_max - self.Pressure_min)
C2_pred_test_norm = (C2_pred_test - self.C2_min)/(self.C2_max - self.C2_min)
Expand Down Expand Up @@ -424,7 +504,7 @@ def __Generate_Error_Plots(self):
"""Make nice plots of the interpolated test data.
"""

s_test_pred, T_test_pred, P_test_pred, C2_test_pred = self.__TD_Evaluation(self._X_test_norm)
s_test_pred, T_test_pred, P_test_pred, C2_test_pred = self.TD_Evaluation(self._X_test_norm)

figformat = "png"
plot_fontsize = 20
Expand Down Expand Up @@ -583,12 +663,14 @@ def CommenceTraining(self):
self.__trainer_PINN.SetWeights(weights_entropy)
self.__trainer_PINN.SetBiases(biases_entropy)
self.__trainer_PINN.Train_MLP()
self.__trainer_PINN.PostProcessing()

fid = open(self.main_save_dir + "/current_iter.txt", "w+")
fid.write(str(self.current_iter) + "\n")
fid.close()
self._test_score = self.__trainer_PINN.GetTestScore()
self._cost_parameter = self.__trainer_PINN.GetCostParameter()
self.__trainer_PINN.Save_Relevant_Data()

return

0 comments on commit 7a1fa11

Please sign in to comment.