-
Notifications
You must be signed in to change notification settings - Fork 45
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
8 changed files
with
302 additions
and
32 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,12 +1,14 @@ | ||
dist: trusty | ||
dist: xenial | ||
language: python | ||
python: | ||
- "3.5" | ||
- "3.6" | ||
- "3.7" | ||
env: | ||
- TF_VERSION=2.0.0 | ||
install: | ||
- pip install -U pip | ||
- pip install -U -e .[cpu,develop,examples] | ||
- pip install -U --force-reinstall -e .[development,examples] tensorflow==$TF_VERSION | ||
script: | ||
- PYTHONPATH=$PWD:$PYTHONPATH py.test | ||
- jupyter-nbconvert --ExecutePreprocessor.timeout=86400 --to notebook --execute --stdout examples/attention.ipynb > /dev/null | ||
- jupyter-nbconvert --ExecutePreprocessor.timeout=86400 --to notebook --execute --stdout examples/activation_maximization.ipynb > /dev/null | ||
- jupyter-nbconvert --ExecutePreprocessor.timeout=86400 --to notebook --execute examples/attention.ipynb |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,37 @@ | ||
import numpy as np | ||
import pytest | ||
import tensorflow as tf | ||
from tensorflow.keras.layers import Dense | ||
from tensorflow.keras.models import Sequential | ||
|
||
from tf_keras_vis import ModelVisualization | ||
|
||
|
||
@pytest.fixture(scope="function", autouse=True) | ||
def model(): | ||
return Sequential([Dense(5, input_shape=(3, )), Dense(2, activation='softmax')]) | ||
|
||
|
||
class MockVisualizer(ModelVisualization): | ||
def __call__(self): | ||
pass | ||
|
||
|
||
def change_activation(model): | ||
model.layers[-1].activation = tf.keras.activations.linear | ||
|
||
|
||
def test__init__(model): | ||
mock = MockVisualizer(model) | ||
assert mock.model != model | ||
assert np.array_equal(mock.model.get_weights()[0], model.get_weights()[0]) | ||
|
||
mock = MockVisualizer(model, change_activation) | ||
assert mock.model != model | ||
assert mock.model.layers[-1].activation == tf.keras.activations.linear | ||
assert model.layers[-1].activation == tf.keras.activations.softmax | ||
|
||
another_model = Sequential([Dense(5, input_shape=(3, ))]) | ||
mock = MockVisualizer(model, lambda m: another_model) | ||
assert mock.model != model | ||
assert mock.model == another_model |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
import numpy as np | ||
import pytest | ||
from tensorflow.keras.layers import Conv2D, Dense, Flatten, Concatenate, Input | ||
from tensorflow.keras.models import Sequential, Model | ||
|
||
from tf_keras_vis.activation_maximization import ActivationMaximization | ||
from tf_keras_vis.utils.losses import SmoothedLoss | ||
from tf_keras_vis.utils.callbacks import OptimizerCallback | ||
|
||
|
||
class MockCallback(OptimizerCallback): | ||
def on_begin(self): | ||
self.on_begin_was_called = True | ||
|
||
def __call__(self, i, values, grads, losses, model_outpus, **kwargs): | ||
self.on_call_was_called = True | ||
|
||
def on_end(self): | ||
self.on_end_was_called = True | ||
|
||
|
||
@pytest.fixture(scope="function", autouse=True) | ||
def multiple_inputs_model(): | ||
a = Input(shape=(8, 8, 3)) | ||
b = Input(shape=(8, 8, 3)) | ||
c = Input(shape=(8, 8, 3)) | ||
x1 = Conv2D(5, 3, activation='relu')(a) | ||
x2 = Conv2D(5, 3, activation='relu')(b) | ||
x3 = Conv2D(5, 3, activation='relu')(c) | ||
x = Concatenate()([x1, x2, x3]) | ||
x = Dense(3)(x) | ||
return Model([a, b, c], [x]) | ||
|
||
|
||
@pytest.fixture(scope="function", autouse=True) | ||
def cnn_model(): | ||
return _cnn_model() | ||
|
||
|
||
def _cnn_model(): | ||
return Sequential([ | ||
Input(shape=(8, 8, 3)), | ||
Conv2D(5, 3, activation='relu'), | ||
Flatten(), | ||
Dense(2, activation='softmax') | ||
]) | ||
|
||
|
||
def test__call__if_loss_is_None(cnn_model): | ||
activation_maximization = ActivationMaximization(cnn_model) | ||
try: | ||
activation_maximization(None, steps=1) | ||
assert False | ||
except ValueError: | ||
assert True | ||
|
||
|
||
def test__call__(cnn_model): | ||
activation_maximization = ActivationMaximization(cnn_model) | ||
result = activation_maximization(SmoothedLoss(1), steps=1) | ||
assert result.shape == (1, 8, 8, 3) | ||
|
||
|
||
def test__call__if_loss_is_list(cnn_model): | ||
activation_maximization = ActivationMaximization(cnn_model) | ||
result = activation_maximization([SmoothedLoss(1)], steps=1) | ||
assert result.shape == (1, 8, 8, 3) | ||
|
||
|
||
def test__call__with_seed_input(cnn_model): | ||
activation_maximization = ActivationMaximization(cnn_model) | ||
result = activation_maximization(SmoothedLoss(1), | ||
seed_input=np.random.sample((8, 8, 3)), | ||
steps=1) | ||
assert result.shape == (1, 8, 8, 3) | ||
|
||
|
||
def test__call__with_callback(cnn_model): | ||
activation_maximization = ActivationMaximization(cnn_model) | ||
mock = MockCallback() | ||
result = activation_maximization(SmoothedLoss(1), steps=1, callbacks=mock) | ||
assert result.shape == (1, 8, 8, 3) | ||
assert mock.on_begin_was_called | ||
assert mock.on_call_was_called | ||
assert mock.on_end_was_called | ||
|
||
|
||
def test__call__with_gradient_modifier(cnn_model): | ||
activation_maximization = ActivationMaximization(cnn_model) | ||
result = activation_maximization(SmoothedLoss(1), steps=1, gradient_modifier=lambda x: x) | ||
assert result.shape == (1, 8, 8, 3) | ||
|
||
|
||
def test__call__with_mutiple_inputs_model(multiple_inputs_model): | ||
activation_maximization = ActivationMaximization(multiple_inputs_model) | ||
result = activation_maximization(SmoothedLoss(1), steps=1, input_modifiers=None) | ||
assert result[0].shape == (1, 8, 8, 3) | ||
assert result[1].shape == (1, 8, 8, 3) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
import numpy as np | ||
import pytest | ||
from tensorflow.keras.layers import Conv2D, Dense, Flatten | ||
from tensorflow.keras.models import Sequential | ||
|
||
from tf_keras_vis.gradcam import Gradcam | ||
from tf_keras_vis.utils.losses import SmoothedLoss | ||
|
||
|
||
@pytest.fixture(scope="function", autouse=True) | ||
def dense_model(): | ||
return Sequential( | ||
[Dense(5, input_shape=(3, ), activation='relu'), | ||
Dense(2, activation='softmax')]) | ||
|
||
|
||
@pytest.fixture(scope="function", autouse=True) | ||
def cnn_model(): | ||
return Sequential([ | ||
Conv2D(5, 3, input_shape=(8, 8, 3), activation='relu'), | ||
Flatten(), | ||
Dense(2, activation='softmax') | ||
]) | ||
|
||
|
||
def test__call__if_loss_is_None(cnn_model): | ||
gradcam = Gradcam(cnn_model) | ||
try: | ||
gradcam(None, None) | ||
assert False | ||
except ValueError: | ||
assert True | ||
|
||
|
||
def test__call__if_seed_input_is_None(cnn_model): | ||
gradcam = Gradcam(cnn_model) | ||
try: | ||
gradcam(SmoothedLoss(1), None) | ||
assert False | ||
except ValueError: | ||
assert True | ||
|
||
|
||
def test__call__if_seed_input_has_not_batch_dim(cnn_model): | ||
gradcam = Gradcam(cnn_model) | ||
result = gradcam(SmoothedLoss(1), np.random.sample((8, 8, 3))) | ||
assert result.shape == (1, 8, 8) | ||
|
||
|
||
def test__call__(cnn_model): | ||
gradcam = Gradcam(cnn_model) | ||
result = gradcam(SmoothedLoss(1), np.random.sample((1, 8, 8, 3))) | ||
assert result.shape == (1, 8, 8) | ||
|
||
|
||
def test__call__if_penultimate_layer_is_None(cnn_model): | ||
gradcam = Gradcam(cnn_model) | ||
result = gradcam(SmoothedLoss(1), np.random.sample((1, 8, 8, 3)), penultimate_layer=None) | ||
assert result.shape == (1, 8, 8) | ||
|
||
|
||
def test__call__if_penultimate_layer_is_noexist_index(cnn_model): | ||
gradcam = Gradcam(cnn_model) | ||
try: | ||
gradcam(SmoothedLoss(1), np.random.sample((1, 8, 8, 3)), penultimate_layer=100000) | ||
assert False | ||
except ValueError: | ||
assert True | ||
|
||
|
||
def test__call__if_penultimate_layer_is_noexist_name(cnn_model): | ||
gradcam = Gradcam(cnn_model) | ||
try: | ||
gradcam(SmoothedLoss(1), np.random.sample((1, 8, 8, 3)), penultimate_layer='hoge') | ||
assert False | ||
except ValueError: | ||
assert True |
Oops, something went wrong.