Skip to content

Commit

Permalink
Merge pull request #103 from perib/hyperparameter_demo
Browse files Browse the repository at this point in the history
Hyperparameter mutation probabilities and gradual changes
  • Loading branch information
nickotto authored Oct 20, 2023
2 parents d7ff57f + 3ea1729 commit 662553c
Show file tree
Hide file tree
Showing 4 changed files with 250 additions and 85 deletions.
245 changes: 173 additions & 72 deletions tpot2/config/hyperparametersuggestor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,88 +2,189 @@
from scipy.stats import loguniform, logser #TODO: remove this dependency?
import numpy as np #TODO: remove this dependency and use scipy instead?

#function that selects selects items from a list with each having independent probability p of being selected
def select(items, p):
selected = [item for item in items if random.random() < p]
#if selected is empty, select one item at random
if not selected:
return [random.choice(items)]
return selected


class Trial():

#Replicating the API found in optuna: https://optuna.readthedocs.io/en/stable/reference/generated/optuna.trial.Trial.html
#copy-pasted some code
def suggest_categorical(name, choices):
return random.choice(choices)

def suggest_float(
name: str,
low: float,
high: float,
*,
step = None,
log = False,
):

if log and step is not None:
raise ValueError("The parameter `step` is not supported when `log` is true.")

if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)

if log and low <= 0.0:
raise ValueError(
"The `low` value must be larger than 0 for a log distribution "
"(low={}, high={}).".format(low, high)
)

if step is not None and step <= 0:
raise ValueError(
"The `step` value must be non-zero positive value, " "but step={}.".format(step)
)

#TODO check this produces correct output
if log:
value = np.random.uniform(np.log(low),np.log(high))
return np.e**value

else:
if step is not None:
return np.random.choice(np.arange(low,high,step))
def __init__(self, old_params=None, alpha=1, hyperparameter_probability=1):
self._params = dict()

self.old_params = old_params
self.alpha = alpha
self.hyperparameter_probability = hyperparameter_probability

if old_params is not None and len(old_params) > 0:
self.params_to_update = select(list(old_params.keys()), self.hyperparameter_probability)
else:
return np.random.uniform(low,high)
self.params_to_update = None


#Replicating the API found in optuna: https://optuna.readthedocs.io/en/stable/reference/generated/optuna.trial.Trial.html
#copy-pasted some code
def suggest_categorical(self, name, choices):
if self.params_to_update == None or name in self.params_to_update or name not in self.old_params: #If this parameter is selected to be changed
choice = self.suggest_categorical_(name, choices)
else: #if this parameter is not selected to be changed
choice = self.old_params[name]
if choice not in choices: #if the old value is not in the choices, then we need to choose a value for it
choice = self.suggest_categorical_(name, choices)

self._params[name] = choice
return choice

def suggest_float(self,
name: str,
low: float,
high: float,
*,
step = None,
log = False,
):
if self.params_to_update == None or name in self.params_to_update or name not in self.old_params: #If this parameter is selected to be changed
choice = self.suggest_float_(name, low=low, high=high, step=step, log=log)
if self.old_params is not None and name in self.old_params:
choice = self.alpha*choice + (1-self.alpha)*self.old_params[name]
else: #if this parameter is not selected to be changed
choice = self.old_params[name]

self._params[name] = choice
return choice



def suggest_discrete_uniform(self, name, low, high, q):
if self.params_to_update == None or name in self.params_to_update or name not in self.old_params:
choice = self.suggest_discrete_uniform_(name, low=low, high=high, q=q)
if self.old_params is not None and name in self.old_params:
choice = self.alpha*choice + (1-self.alpha)*self.old_params[name]
else:
choice = self.old_params[name]

self._params[name] = choice
return choice

def suggest_discrete_uniform(name, low, high, q):
return suggest_float(name, low, high, step=q)


def suggest_int(name, low, high, step=1, log=False):
if low == high: #TODO check that this matches optuna's behaviour
return low

if log and step >1:
raise ValueError("The parameter `step`>1 is not supported when `log` is true.")
def suggest_int(self, name, low, high, step=1, log=False):
if self.params_to_update == None or name in self.params_to_update or name not in self.old_params:
choice = self.suggest_int_(name, low=low, high=high, step=step, log=log)
if self.old_params is not None and name in self.old_params:
choice = int(self.alpha*choice + (1-self.alpha)*self.old_params[name])
else:
choice = self.old_params[name]

if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)
self._params[name] = choice
return choice

if log and low <= 0.0:
raise ValueError(
"The `low` value must be larger than 0 for a log distribution "
"(low={}, high={}).".format(low, high)
)

if step is not None and step <= 0:
raise ValueError(
"The `step` value must be non-zero positive value, " "but step={}.".format(step)
)
def suggest_uniform(self, name, low, high):
if self.params_to_update == None or name in self.params_to_update or name not in self.old_params:
choice = self.suggest_uniform_(name, low=low, high=high)
if self.old_params is not None and name in self.old_params:
choice = self.alpha*choice + (1-self.alpha)*self.old_params[name]
else:
choice = self.old_params[name]

self._params[name] = choice
return choice



####################################
#Replicating the API found in optuna: https://optuna.readthedocs.io/en/stable/reference/generated/optuna.trial.Trial.html
#copy-pasted some code
def suggest_categorical_(self, name, choices):

choice = random.choice(choices)
return choice

def suggest_float_(self,
name: str,
low: float,
high: float,
*,
step = None,
log = False,
):

if log and step is not None:
raise ValueError("The parameter `step` is not supported when `log` is true.")

if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)

if log and low <= 0.0:
raise ValueError(
"The `low` value must be larger than 0 for a log distribution "
"(low={}, high={}).".format(low, high)
)

if step is not None and step <= 0:
raise ValueError(
"The `step` value must be non-zero positive value, " "but step={}.".format(step)
)

#TODO check this produces correct output
if log:
value = np.random.uniform(np.log(low),np.log(high))
choice = np.e**value
return choice

if log:
value = np.random.uniform(np.log(low),np.log(high))
return int(np.e**value)
else:
return np.random.choice(list(range(low,high,step)))
else:
if step is not None:
choice = np.random.choice(np.arange(low,high,step))
return choice
else:
choice = np.random.uniform(low,high)
return choice


def suggest_discrete_uniform_(self, name, low, high, q):
choice = self.suggest_float(name, low, high, step=q)
return choice


def suggest_int_(self, name, low, high, step=1, log=False):
if low == high: #TODO check that this matches optuna's behaviour
return low

if log and step >1:
raise ValueError("The parameter `step`>1 is not supported when `log` is true.")

if low > high:
raise ValueError(
"The `low` value must be smaller than or equal to the `high` value "
"(low={}, high={}).".format(low, high)
)

if log and low <= 0.0:
raise ValueError(
"The `low` value must be larger than 0 for a log distribution "
"(low={}, high={}).".format(low, high)
)

if step is not None and step <= 0:
raise ValueError(
"The `step` value must be non-zero positive value, " "but step={}.".format(step)
)

if log:
value = np.random.uniform(np.log(low),np.log(high))
choice = int(np.e**value)
return choice
else:
choice = np.random.choice(list(range(low,high,step)))
return choice

def suggest_uniform(name, low, high):
return suggest_float(name, low, high)
def suggest_uniform_(self, name, low, high):
return self.suggest_float(name, low, high)
Loading

0 comments on commit 662553c

Please sign in to comment.