Skip to content

Commit

Permalink
Update tests except UQ
Browse files Browse the repository at this point in the history
  • Loading branch information
mjwen committed Jul 30, 2023
1 parent 3775a9d commit f17be91
Show file tree
Hide file tree
Showing 7 changed files with 180 additions and 166 deletions.
96 changes: 47 additions & 49 deletions tests/calculators/test_calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,52 +34,50 @@
]


class TestCalculator:
def test_compute(self):
test_file_path = Path(__file__).parents[1].joinpath("configs_extxyz")
tset = Dataset(test_file_path.joinpath("Si_4"))
configs = tset.get_configs()

modelname = "SW_StillingerWeber_1985_Si__MO_405512056662_006"
model = KIMModel(modelname)

# calculator
calc = Calculator(model)
compute_arguments = calc.create(configs)

for i, ca in enumerate(compute_arguments):
calc.compute(ca)
energy = calc.get_energy(ca)
forces = calc.get_forces(ca)[:3]

assert energy == pytest.approx(ref_energies[i], 1e-6)
assert np.allclose(forces, ref_forces[i])

def test_parameter(self):
modelname = "SW_StillingerWeber_1985_Si__MO_405512056662_006"
model = KIMModel(modelname)

# parameters
params = model.get_model_params()
sigma = params["sigma"][0]
A = params["A"][0]

# optimizing parameters
# B will not be optimized, only providing initial guess
model.set_opt_params(
sigma=[["default"]], B=[["default", "fix"]], A=[["default"]]
)

calc = Calculator(model)

x0 = calc.get_opt_params()
assert x0[0] == sigma
assert x0[1] == A
assert len(x0) == 2
assert model.get_num_opt_params() == 2

x1 = [i + 0.1 for i in x0]
calc.update_model_params(x1)

assert params["sigma"][0] == sigma + 0.1
assert params["A"][0] == A + 0.1
def test_compute(test_data_dir):
test_file_dir = test_data_dir.joinpath("configs/Si_4")
tset = Dataset(test_file_dir)
configs = tset.get_configs()

modelname = "SW_StillingerWeber_1985_Si__MO_405512056662_006"
model = KIMModel(modelname)

# calculator
calc = Calculator(model)
compute_arguments = calc.create(configs)

for i, ca in enumerate(compute_arguments):
calc.compute(ca)
energy = calc.get_energy(ca)
forces = calc.get_forces(ca)[:3]

assert energy == pytest.approx(ref_energies[i], 1e-6)
assert np.allclose(forces, ref_forces[i])


def test_parameter():
modelname = "SW_StillingerWeber_1985_Si__MO_405512056662_006"
model = KIMModel(modelname)

# parameters
params = model.get_model_params()
sigma = params["sigma"][0]
A = params["A"][0]

# optimizing parameters
# B will not be optimized, only providing initial guess
model.set_opt_params(sigma=[["default"]], B=[["default", "fix"]], A=[["default"]])

calc = Calculator(model)

x0 = calc.get_opt_params()
assert x0[0] == sigma
assert x0[1] == A
assert len(x0) == 2
assert model.get_num_opt_params() == 2

x1 = [i + 0.1 for i in x0]
calc.update_model_params(x1)

assert params["sigma"][0] == sigma + 0.1
assert params["A"][0] == A + 0.1
152 changes: 91 additions & 61 deletions tests/calculators/test_calculator_torch.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from pathlib import Path

import numpy as np
import pytest
import torch
from torch import Tensor

Expand All @@ -10,56 +11,90 @@
from kliff.descriptors import SymmetryFunction
from kliff.models import NeuralNetwork

# model
descriptor = SymmetryFunction(
cut_name="cos", cut_dists={"Si-Si": 5.0}, hyperparams="set30", normalize=True
)

N1 = np.random.randint(5, 10)
N2 = np.random.randint(5, 10)
model = NeuralNetwork(descriptor)
model.add_layers(
# first hidden layer
nn.Linear(descriptor.get_size(), N1),
nn.Tanh(),
# second hidden layer
nn.Linear(N1, N2),
nn.Tanh(),
# output layer
nn.Linear(N2, 1),
)

# training set
path = Path(__file__).absolute().parents[1].joinpath("configs_extxyz/Si_4")
data = Dataset(path)
configs = data.get_configs()

# calculator
calc = CalculatorTorch(model, gpu=False)
_ = calc.create(configs, reuse=False)
loader = calc.get_compute_arguments(batch_size=100)

# data on parameter sizes
exp_sizes = [
torch.Size([N1, 30]),
torch.Size([N1]),
torch.Size([N2, N1]),
torch.Size([N2]),
torch.Size([1, N2]),
torch.Size([1]),
]
exp_nparams_per_layer = [N1 * 30, N1, N2 * N1, N2, N2, 1]
exp_nparams_total = np.sum(exp_nparams_per_layer)

# parameters to try
p0 = np.zeros(exp_nparams_total)
p1 = np.ones(exp_nparams_total)


# Test if the functions to update parameters work


def test_get_parameters_sizes():

@pytest.fixture(scope="module")
def N1():
return np.random.randint(5, 10)


@pytest.fixture(scope="module")
def N2():
return np.random.randint(5, 10)


@pytest.fixture(scope="module")
def calc(test_data_dir, N1, N2):
# model
descriptor = SymmetryFunction(
cut_name="cos", cut_dists={"Si-Si": 5.0}, hyperparams="set30", normalize=True
)

model = NeuralNetwork(descriptor)
model.add_layers(
# first hidden layer
nn.Linear(descriptor.get_size(), N1),
nn.Tanh(),
# second hidden layer
nn.Linear(N1, N2),
nn.Tanh(),
# output layer
nn.Linear(N2, 1),
)

# training set
data = Dataset(test_data_dir / "configs" / "Si_4")
configs = data.get_configs()

# calculator
calc = CalculatorTorch(model, gpu=False)
_ = calc.create(configs, reuse=False)

return calc


@pytest.fixture(scope="module")
def loader(calc, N1, N2):
return calc.get_compute_arguments(batch_size=100)


@pytest.fixture(scope="module")
def exp_sizes(N1, N2):
# data on parameter sizes
exp_sizes = [
torch.Size([N1, 30]),
torch.Size([N1]),
torch.Size([N2, N1]),
torch.Size([N2]),
torch.Size([1, N2]),
torch.Size([1]),
]

return exp_sizes


@pytest.fixture(scope="module")
def exp_nparams_per_layer(N1, N2):
return [N1 * 30, N1, N2 * N1, N2, N2, 1]


@pytest.fixture(scope="module")
def exp_nparams_total(exp_nparams_per_layer):
return np.sum(exp_nparams_per_layer)


@pytest.fixture(scope="module")
def p0(exp_nparams_total):
return np.zeros(exp_nparams_total)


@pytest.fixture(scope="module")
def p1(exp_nparams_total):
return np.ones(exp_nparams_total)


def test_get_parameters_sizes(
calc, exp_sizes, exp_nparams_per_layer, exp_nparams_total
):
"""
Test if the function to get parameters sizes works.
Expand All @@ -74,7 +109,7 @@ def test_get_parameters_sizes():
assert nparams_total == exp_nparams_total, "Total number of parameters is incorrect"


def test_parameter_values():
def test_parameter_values(calc, p0, p1):
"""
Test if the parameter values are updated.
Expand All @@ -89,19 +124,20 @@ def test_parameter_values():
), "Either `update_model_params` or `get_opt_params` not working"


def test_predictions_change():
def test_predictions_change(calc, loader, p0, p1):
"""
Test if changing parameters affect the predictions.
There are two steps of this test. The first one, if we set all the parameters to be
zero, then the (forces) predictions should also be zero.
Then, if we change the parameters to some other values, the predictions should change
and they should not be zero, unless there is something special with the
Then, if we change the parameters to some other values, the predictions should
change and they should not be zero, unless there is something special with the
configurations.
"""
# Test if predictions are zeros when all parameters are zeros
calc.update_model_params(p0)

for batch in loader:
calc.compute(batch)
# We will only look at the forces
Expand All @@ -110,7 +146,7 @@ def test_predictions_change():
for f0 in forces0:
all_zeros.append(Tensor.all(f0 == 0.0))
assert np.all(all_zeros), (
"Problem in predicitons calculation: "
"Problem in prediction calculation: "
+ "there are non-zero forces when all parameters are zero"
)

Expand All @@ -124,9 +160,3 @@ def test_predictions_change():
change.append(not Tensor.all(f0 - f1 == 0.0))
# Use any since there might be special configurations
assert np.any(change), "Changing parameters doesn't change predictions"


if __name__ == "__main__":
test_get_parameters_sizes()
test_parameter_values()
test_predictions_change()
36 changes: 19 additions & 17 deletions tests/dataset/test_extxyz.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,28 @@
from pathlib import Path

import numpy as np
import pytest

from kliff.dataset.dataset import Configuration, Dataset


def test_configuration(e=True, f=False, s=False, order=False):
path = Path(__file__).parents[1].joinpath("configs_extxyz/MoS2")
@pytest.mark.parametrize(
"f,s,order",
(
[False, False, False],
[True, False, False],
[True, True, False],
[True, True, True],
),
)
def test_configuration(test_data_dir, f, s, order, e=True):
path = test_data_dir.joinpath("configs/MoS2")

if e:
fname = path.joinpath("MoS2_energy.xyz")
fname = path / "MoS2_energy.xyz"
if f:
fname = path.joinpath("MoS2_energy_forces.xyz")
fname = path / "MoS2_energy_forces.xyz"
if s:
fname = path.joinpath("MoS2_energy_forces_stress.xyz")
fname = path / "MoS2_energy_forces_stress.xyz"

fname = fname.as_posix()

config = Configuration.from_file(fname, file_format="xyz")
Expand Down Expand Up @@ -71,15 +81,7 @@ def test_configuration(e=True, f=False, s=False, order=False):
assert natoms_by_species["S"] == 192


def test_config():
test_configuration(True, False, False)
test_configuration(True, True, False)
test_configuration(True, True, True)
test_configuration(True, True, True, order=True)


def test_dataset():
directory = Path(__file__).parents[1].joinpath("configs_extxyz/MoS2").as_posix()
tset = Dataset(directory)
def test_dataset(test_data_dir):
tset = Dataset(test_data_dir / "configs/MoS2")
configs = tset.get_configs()
assert len(configs) == 3
12 changes: 0 additions & 12 deletions tests/uq/test_bootstrap_empirical.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,15 +186,3 @@ def test_multi_calc_cas_generator():
assert (
len(bootstrap_cas[0][0]) + len(bootstrap_cas[0][1]) == ncas_energy + ncas_forces
), "For each sample, generator should generate the same number of cas in total as the original"


if __name__ == "__main__":
test_wrapper()
test_error()
test_bootstrap_cas_generator()
test_callback()
test_run()
test_appending_cas()
test_save_load_cas()
test_reset()
test_multi_calc_cas_generator()
11 changes: 0 additions & 11 deletions tests/uq/test_bootstrap_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,14 +146,3 @@ def test_reset():
# Check reset bootstrap samples
assert BS._nsamples_prepared == 0, "Reset bootstrap cas failed"
assert BS._nsamples_done == 0, "Reset ensembles failed"


if __name__ == "__main__":
test_wrapper()
test_error()
test_original_state()
test_bootstrap_cas_generator()
test_run()
test_appending_cas()
test_save_load_cas()
test_reset()
6 changes: 0 additions & 6 deletions tests/uq/test_bootstrap_nn_separate_species.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,3 @@ def test_run():
shape == exp_shape
), f"Samples doesn't have the right shape; expected {exp_shape}, got {shape}"
assert BS._nsamples_done == nsamples


if __name__ == "__main__":
test_model()
test_original_state()
test_run()
Loading

0 comments on commit f17be91

Please sign in to comment.