Skip to content

Commit

Permalink
Ensemble of PAL classes
Browse files Browse the repository at this point in the history
Fixes #228
  • Loading branch information
Kevin Maik Jablonka committed May 20, 2022
1 parent 1c8791e commit ee2a4ed
Show file tree
Hide file tree
Showing 4 changed files with 125 additions and 18 deletions.
10 changes: 5 additions & 5 deletions src/pyepal/pal/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ def _get_max_wt( # pylint:disable=too-many-arguments
sampled: np.array,
pooling_method: str = "fro",
use_coef_var: bool = True,
) -> int:
) -> Tuple[int, float]:
"""Returns the index in design space with the maximum size of the hyperrectangle
(scaled by the mean predictions, i.e., effectively,
we use the coefficient of variation).
Expand Down Expand Up @@ -320,7 +320,7 @@ def _get_max_wt( # pylint:disable=too-many-arguments
max_uncertainty = uncertainty
maxid = i

return maxid
return maxid, max_uncertainty


@jit(nopython=True)
Expand All @@ -331,7 +331,7 @@ def _get_max_wt_all( # pylint:disable=too-many-arguments
sampled: np.array,
pooling_method: str = "fro",
use_coef_var: bool = True,
) -> int:
) -> Tuple[int, float]:
"""Returns the index in design space with the maximum size of the hyperrectangle
(scaled by the mean predictions, i.e., effectively,
we use the coefficient of variation).
Expand All @@ -351,7 +351,7 @@ def _get_max_wt_all( # pylint:disable=too-many-arguments
the unscaled rectangle sizes
Returns:
int: index with maximum size of hyperrectangle
Tuple[int, List[float]]: index with maximum size of hyperrectangle, all uncertainties
"""
max_uncertainty = -np.inf
maxid = 0
Expand All @@ -374,7 +374,7 @@ def _get_max_wt_all( # pylint:disable=too-many-arguments
max_uncertainty = uncertainty
maxid = i

return maxid
return maxid, max_uncertainty


@jit(nopython=True)
Expand Down
39 changes: 26 additions & 13 deletions src/pyepal/pal/pal_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import logging
import warnings
from copy import deepcopy
from typing import List, Union
from typing import Iterable, List, Union

import numpy as np
from sklearn.metrics import mean_absolute_error
Expand Down Expand Up @@ -69,6 +69,7 @@ def __init__( # pylint:disable=too-many-arguments
goals: List[str] = None,
coef_var_threshold: float = 3,
ranges: Union[np.ndarray, None] = None,
pooling_method: str = "fro",
):
r"""Initialize the PAL instance
Expand All @@ -95,6 +96,10 @@ def __init__( # pylint:disable=too-many-arguments
If this is provided, we will use :math:`\epsilon \cdot ranges`
to computer the uncertainties of the hyperrectangles instead
of the default behavior :math:`\epsilon \cdot |\mu|`
pooling_method (str): Method that is used to aggregate
the uncertainty in different objectives into one scalar.
Available options are: "fro" (Frobenius/Euclidean norm), "mean",
"median". Defaults to "fro".
"""
self.cross_val_points = 10 # maybe we make it an argument at some point
Expand Down Expand Up @@ -441,28 +446,31 @@ def _replace_by_measurements(self, replace_mean: bool = True, replace_std: bool
def run_one_step( # pylint:disable=too-many-arguments
self,
batch_size: int = 1,
pooling_method: str = "fro",
sample_discarded: bool = False,
use_coef_var: bool = True,
replace_mean: bool = True,
replace_std: bool = True,
replacement_models: Iterable[any] = None,
) -> Union[np.array, None]:
"""[summary]
"""Run one iteration of the PAL algorithm. That is, train the models,
get the predictions for all the design points and then classify them.
After classification, return the samples. We do not update the "sampled"
attrobute here.
Args:
batch_size (int, optional): Number of indices that will be returned.
If >1 then we use a greedy approximation.
Defaults to 1.
pooling_method (str): Method that is used to aggregate
the uncertainty in different objectives into one scalar.
Available options are: "fro" (Frobenius/Euclidean norm), "mean",
"median". Defaults to "fro".
sample_discarded (bool): if true, it will sample from all points
and not only from the unclassified and Pareto optimal ones
use_coef_var (bool): If True, uses the coefficient of variation instead of
the unscaled rectangle sizes
replace_mean (bool): If true uses the measured _means for the sampled points
replace_std (bool): If true uses the measured standard deviation for the
sampled points
replacement_models: A list of models that will be used to replace the models.
If the models are provide we skip the hyperparameter optimization and training. If is useful if, for some reason, the same model is trained somewhere else in parallel. Providing this takes precedence over hyperparameter and training schedules.
Defaults to None.
Raises:
ValueError: In case the PAL instance was not initialized with
Expand All @@ -482,10 +490,15 @@ def run_one_step( # pylint:disable=too-many-arguments
if self.should_cross_validate():
self._compare_mae_variance()

if self._should_optimize_hyperparameters():
self._set_hyperparameters()
if replacement_models is None:
if self._should_optimize_hyperparameters():
self._set_hyperparameters()

self._train()
else:
PAL_LOGGER.debug("Replacing models with provided ones.")
self.models = replacement_models

self._train()
self._predict()

self._update_beta()
Expand All @@ -500,7 +513,7 @@ def run_one_step( # pylint:disable=too-many-arguments
for _ in range(batch_size):
sampled_idx = self.sample(
exclude_idx=samples,
pooling_method=pooling_method,
pooling_method=self.pooling_method,
sample_discarded=sample_discarded,
use_coef_var=use_coef_var,
)
Expand Down Expand Up @@ -736,7 +749,7 @@ def sample(
sampled_mask += exclude_mask

if sample_discarded:
sampled_idx = _get_max_wt_all(
sampled_idx, _uncertainty = _get_max_wt_all(
self.rectangle_lows,
self.rectangle_ups,
self._means,
Expand All @@ -745,7 +758,7 @@ def sample(
use_coef_var,
)
else:
sampled_idx = _get_max_wt(
sampled_idx, _uncertainty = _get_max_wt(
self.rectangle_lows,
self.rectangle_ups,
self._means,
Expand Down
94 changes: 94 additions & 0 deletions src/pyepal/pal/pal_ensemble.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
import numpy as np


class PALEnsemble:
def __init__(self, pal_list):
self.pal_list = pal_list

# we just pick one class where we will update the models
self.head_pal = pal_list[0]

@classmethod
def from_class_and_kwarg_lists(pal_class, **kwargs):
pal_list = []
iterable_keys = []
for key, value in kwargs.items():
if isinstance(value, list, tuple):
iterable_keys.append(key)

if len(iterable_keys) == 0:
raise ValueError(
"No iterable keys found in kwargs. If you do not provide iterable keys, please use a single PAL instance."
)

num_values = len(kwargs[iterable_keys[0]])

for key in iterable_keys:
if len(kwargs[key]) != num_values:
raise ValueError(
"All iterable keys must have the same length. Please check the length of your iterable keys."
)

for i in range(num_values):
this_kwargs = {}
for key, value in kwargs.items():
if key in iterable_keys:
this_kwargs[key] = value[i]
else:
this_kwargs[key] = value
pal_list.append(pal_class(**this_kwargs))
return PALEnsemble(pal_list)

def run_one_step(
self,
batch_size: int = 1,
pooling_method: str = "fro",
sample_discarded: bool = False,
use_coef_var: bool = True,
replace_mean: bool = True,
replace_std: bool = True,
):
samples = []
uncertainties = []
head_samples, head_uncertainties = self.head_pal.run_one_step(
batch_size, pooling_method, sample_discarded, use_coef_var, replace_mean, replace_std
)
samples.extend(head_samples)
uncertainties.extend(head_uncertainties)

samples.extend(head_samples)

for pal in self.pal_list[1:]:
this_samples, this_uncertainties = pal.run_one_step(
batch_size,
pooling_method,
sample_discarded,
use_coef_var,
replace_mean,
replace_std,
replace_models=self.head_pal.models,
)
samples.extend(this_samples)
uncertainties.extend(this_uncertainties)

uncertainties_sorted, indices_sorted = zip(*sorted(zip(uncertainties, samples)))
uncertainties_sorted = np.array(uncertainties_sorted)
indices_sorted = np.array(indices_sorted)
_, original_sorted_indices = np.unique(indices_sorted, return_index=True)
indices_selected = indices_sorted[original_sorted_indices]
return indices_selected[-batch_size:], uncertainties_sorted[-batch_size:]

def augment_design_space( # pylint: disable=invalid-name
self, X_design: np.ndarray, classify: bool = False, clean_classify: bool = True
) -> None:
for pal in self.pal_list:
pal.augment_design_space(X_design, classify, clean_classify)

def update_train_set(
self,
indices: np.ndarray,
measurements: np.ndarray,
measurement_uncertainty: np.ndarray = None,
) -> None:
for pal in self.pal_list:
pal.update_train_set(indices, measurements, measurement_uncertainty)
Empty file added tests/test_pal_ensemble.py
Empty file.

0 comments on commit ee2a4ed

Please sign in to comment.