Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…into main
EvertBunschoten committed Aug 21, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
2 parents 5b8c65d + 6bfc071 commit f593795
Showing 4 changed files with 137 additions and 20 deletions.
7 changes: 3 additions & 4 deletions Data_Processing/OptimizeProgressVariable.py
Original file line number Diff line number Diff line change
@@ -395,7 +395,6 @@ def RunOptimizer(self):

bounds = Bounds(lb=lb, ub=ub)

print(bounds)
# Generate convergence output file.
self.__convergence = []
self.__convergence_history_filename = self.__output_dir + "/" + self.__Config.GetConfigName() + "_PV_convergence_history.csv"
@@ -601,12 +600,12 @@ def __FilterFlameletData(self):
delta_Y_mon = Y_mon[1:, :] - Y_mon[:-1, :]

# Normalize mass fraction increment vector to generate progress vector components.
range_Y = np.max(Y_filtered,axis=0) - np.min(Y_filtered,axis=0)
range_Y = np.max(Y_filtered,axis=0) - np.min(Y_filtered,axis=0) + 1e-32
delta_Y_norm = delta_Y_mon / range_Y

# Add additional data as progress vector components if defined.
if any(self.__idx_additional_vars):
range_otherdata = np.max(otherdata,axis=0) - np.min(otherdata,axis=0)
range_otherdata = np.max(otherdata,axis=0) - np.min(otherdata,axis=0) + 1e-32
otherdata_mon = otherdata[idx_mon, :]
delta_otherdata_mon = otherdata_mon[1:, :] - otherdata_mon[:-1,:]
delta_Y_norm = np.hstack((delta_Y_norm, delta_otherdata_mon / range_otherdata))
@@ -640,7 +639,7 @@ def __MakeMonotonic(self, Y_flamelet:np.ndarray, AdditionalData:np.ndarray=None)
range_Y = np.max(flamedata,axis=0) - np.min(flamedata,axis=0)

# Compute normalized flamelet data increment vector.
delta_Y_scaled = (flamedata[1:, :]-flamedata[:-1, :]) / range_Y[np.newaxis, :]
delta_Y_scaled = (flamedata[1:, :]-flamedata[:-1, :]) / (range_Y[np.newaxis, :]+1e-32)

# Compute flamelet progress vector and accumulation.
abs_delta_Y_scaled = np.linalg.norm(delta_Y_scaled, axis=1)
2 changes: 1 addition & 1 deletion Manifold_Generation/MLP/Trainer_Base.py
Original file line number Diff line number Diff line change
@@ -752,7 +752,7 @@ class CustomTrainer(MLPTrainer):
_train_name:str = ""
_figformat:str="pdf"
__keep_training:bool = True

__stagnation_iter:int = 0
def __init__(self):
MLPTrainer.__init__(self)

10 changes: 5 additions & 5 deletions Manifold_Generation/MLP/Trainers_NICFD/Trainers.py
Original file line number Diff line number Diff line change
@@ -133,11 +133,11 @@ def TD_Evaluation(self, rhoe_norm:tf.Tensor):
s, dsdrhoe, d2sdrho2e2 = self.__ComputeEntropyGradients(rhoe_norm)
rho_norm = tf.gather(rhoe_norm, indices=0, axis=1)
rho = (self.__rho_max - self.__rho_min)*rho_norm + self.__rho_min
T, P, c2 = self.__EntropicEOS(rho, dsdrhoe, d2sdrho2e2)
T, P, c2 = self.EntropicEOS(rho, dsdrhoe, d2sdrho2e2)
return s, T, P, c2

@tf.function
def __EntropicEOS(self,rho, dsdrhoe, d2sdrho2e2):
def EntropicEOS(self,rho, dsdrhoe, d2sdrho2e2):
dsdrho_e = dsdrhoe[0]
dsde_rho = dsdrhoe[1]
d2sdrho2 = d2sdrho2e2[0][0]
@@ -680,7 +680,7 @@ def __ComputeEntropyGradients(self, rhoe_norm:tf.Tensor):
return s_dim, dsdrhoe, d2sdrho2e2

@tf.function
def __EntropicEOS(self,rho, dsdrhoe, d2sdrho2e2):
def EntropicEOS(self,rho, dsdrhoe, d2sdrho2e2):
dsdrho_e = dsdrhoe[0]
dsde_rho = dsdrhoe[1]
d2sdrho2 = d2sdrho2e2[0][0]
@@ -699,15 +699,15 @@ def TD_Evaluation(self, rhoe_norm:tf.Tensor):
s, dsdrhoe, d2sdrho2e2 = self.__ComputeEntropyGradients(rhoe_norm)
rho_norm = tf.gather(rhoe_norm, indices=self.__idx_rho, axis=1)
rho = (self.__rho_max - self.__rho_min)*rho_norm + self.__rho_min
T, P, c2 = self.__EntropicEOS(rho, dsdrhoe, d2sdrho2e2)
T, P, c2 = self.EntropicEOS(rho, dsdrhoe, d2sdrho2e2)
return s, T, P, c2

@tf.function
def EvaluateState(self, X_norm:tf.Tensor):
_, dsdrhoe, d2sdrho2e2 = self.__ComputeEntropyGradients(X_norm)
rho_norm = tf.gather(X_norm, indices=self.__idx_rho, axis=1)
rho = (self.__rho_max - self.__rho_min)*rho_norm + self.__rho_min
T, P, c2 = self.__EntropicEOS(rho, dsdrhoe, d2sdrho2e2)
T, P, c2 = self.EntropicEOS(rho, dsdrhoe, d2sdrho2e2)
Y_state_pred = tf.stack((T, P, c2),axis=1)
return Y_state_pred

138 changes: 128 additions & 10 deletions Manifold_Generation/MLP/optimizeHP.py
Original file line number Diff line number Diff line change
@@ -30,7 +30,8 @@
import csv
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize, Bounds
from scipy.optimize import minimize, Bounds, differential_evolution
from multiprocessing import current_process

from Common.Properties import DefaultProperties
from Common.Config_base import Config
@@ -74,9 +75,12 @@ class MLPOptimizer:
__NN_max:int = 100
__architecture:list[int]=[30]

__activation_function:str = "exponential"

# Optimization history.
__population_history:list = []
__fitness_history:list = []
__generation_number:int = 0

def __init__(self, Config_in:Config=None, load_file:str=None):
"""Class constructor
@@ -183,6 +187,10 @@ def SetBatch_Expo(self, batch_expo:int=6):

return

def SetActivationFunction(self, activation_function:str="exponential"):
self.__activation_function = activation_function
return

def SetBounds_Batch_Expo(self, batch_expo_min:int=3, batch_expo_max:int=7):
"""Set minimum and maximum values for the training batch exponent (base 2) during optimization.
@@ -274,6 +282,49 @@ def __prepareBounds(self):
upperbound.append(self.__NN_max)

return gene_trainparams, lowerbound, upperbound
def __prepareBounds_DE(self):
lowerbound = []
upperbound = []
integrality = []
if self.__optimize_batch:
integrality.append(True)
lowerbound.append(self.__batch_expo_min)
upperbound.append(self.__batch_expo_max)
if self.__optimize_LR:
integrality.append(False)
integrality.append(False)
lowerbound.append(self.__alpha_expo_min)
lowerbound.append(self.__lr_decay_min)
upperbound.append(self.__alpha_expo_max)
upperbound.append(self.__lr_decay_max)
if self.__optimize_NN:
integrality.append(True)
lowerbound.append(int(self.__NN_min))
upperbound.append(int(self.__NN_max))
return np.array(integrality), np.array(lowerbound), np.array(upperbound)

def __setOptimizer_DE(self):
integrality, lowerbound, upperbound = self.__prepareBounds_DE()
N_genes = len(integrality)
bounds = Bounds(lb=lowerbound, ub=upperbound)
# Determine update strategy based on whether parallel processing is used.
if self.__n_workers > 1:
update_strategy = "deferred"
else:
update_strategy = "immediate"
self.__generation_number = 0
result = differential_evolution(func = self.DE_fitnessFunction,\
callback=self.saveGenerationInfo_DE,\
maxiter=10*N_genes,\
popsize=10,\
bounds=bounds,\
workers=self.__n_workers,\
updating=update_strategy,\
integrality=integrality,\
strategy='best1exp',\
seed=1,\
tol=1e-6)
return

def __setOptimizer(self):
"""Prepare PyGaD optimization routine.
@@ -286,6 +337,7 @@ def __setOptimizer(self):
for lb, ub in zip(lowerbound, upperbound):
gene_space.append({'low':lb,'high':ub})


# Initiate PyGaD instance with
self.__optimizer = pygad.GA(num_generations=10*N_genes,\
fitness_func=self.fitnessFunction,\
@@ -351,16 +403,47 @@ def optimizeHP(self):
os.mkdir(self.save_dir)

# Prepare bounds and set optimizer.
if self.__optimize_NN or self.__optimize_batch:
self.__setOptimizer()
self.__setOptimizer_DE()
# if self.__optimize_NN or self.__optimize_batch:
# #self.__setOptimizer()

# Initiate HP optimization.
self.__optimizer.run()
else:
self.__setSimplexOptimizer()
# # Initiate HP optimization.
# self.__optimizer.run()
# else:
# self.__setSimplexOptimizer()

return

def saveGenerationInfo_DE(self, x, convergence):
#x = ga_result.x
f_best = convergence
idx_x = 0
batch_expo = self.__batch_expo
alpha_expo = self.__alpha_expo
lr_decay = self.__lr_decay
architecture = self.__architecture
if self.__optimize_batch:
batch_expo = int(x[idx_x])
idx_x += 1
if self.__optimize_LR:
alpha_expo = x[idx_x]
idx_x += 1
lr_decay = x[idx_x]
idx_x += 1
if self.__optimize_NN:
architecture = [int(x[idx_x])]
idx_x += 1

line_to_write = ("%i,%i,%+.5e,%+.5e," % (self.__generation_number, batch_expo, alpha_expo, lr_decay))
line_to_write += ",".join(("%i" % n) for n in architecture)
line_to_write += ",%+.6e" % f_best
line_to_write += "\n"
with open(self.opt_history_filepath, 'a+') as fid:
fid.write(line_to_write)
print("HELLO")
self.__generation_number += 1
return

def saveGenerationInfo(self, ga_instance:pygad.GA):
"""Save population information per completed generation.
"""
@@ -399,11 +482,11 @@ def __translateGene(self, x:np.ndarray[float], Evaluator:EvaluateArchitecture):
Evaluator.SetAlphaExpo(self.__alpha_expo)
Evaluator.SetLRDecay(self.__lr_decay)
Evaluator.SetHiddenLayers(self.__architecture)

Evaluator.SetActivationFunction(self.__activation_function)
# Set hyper-parameter according to gene.
idx_x = 0
if self.__optimize_batch:
batch_expo = x[idx_x]
batch_expo = int(x[idx_x])
Evaluator.SetBatchExpo(batch_expo)
idx_x += 1
if self.__optimize_LR:
@@ -414,12 +497,47 @@ def __translateGene(self, x:np.ndarray[float], Evaluator:EvaluateArchitecture):
Evaluator.SetLRDecay(lr_decay)
idx_x += 1
if self.__optimize_NN:
architecture = [x[idx_x]]
architecture = [int(x[idx_x])]
Evaluator.SetHiddenLayers(architecture)
idx_x += 1

return

def DE_fitnessFunction(self, x:np.ndarray):
if self.__n_workers > 1:
p = current_process()
worker_idx = p._identity[0]
else:
worker_idx = 0
Evaluator:EvaluateArchitecture = EvaluateArchitecture_NICFD(self._Config)

# Set CPU index.
Evaluator.SetTrainHardware("CPU", worker_idx)

Evaluator.SetVerbose(0)

# Translate gene and update hyper-parameters.
self.__translateGene(x, Evaluator=Evaluator)

# Set output directory for MLP training callbacks.
Evaluator.SetSaveDir(self.save_dir)

# Train for 1000 epochs direct and physics-informed.
Evaluator.SetNEpochs(1000)
Evaluator.CommenceTraining()

# Extract test set evaluation score.
Evaluator.TrainPostprocessing()
test_score = Evaluator.GetTestScore()

# Scale test set loss to fitness.
fitness_test_score = np.log10(test_score)

# Free up memory
del Evaluator

return fitness_test_score

def fitnessFunction(self, ga_instance:pygad.GA, x:np.ndarray, x_idx:int):
""" Fitness function evaluated during GA routine.
"""

0 comments on commit f593795

Please sign in to comment.