From d812c821612fd3d554c6f074f28f293c4579d78b Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 08:47:11 +0000 Subject: [PATCH 01/57] correcting a bug where the read method was ignoring the "mode" argument causing errors when loading a text file --- lucid/misc/io/reading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/misc/io/reading.py b/lucid/misc/io/reading.py index 94279257..52fed129 100644 --- a/lucid/misc/io/reading.py +++ b/lucid/misc/io/reading.py @@ -177,7 +177,7 @@ def _read_and_cache(url, mode="rb"): with lock: if os.path.exists(local_path): log.debug("Found cached file '%s'.", local_path) - return _handle_gfile(local_path) + return _handle_gfile(local_path, mode) log.debug("Caching URL '%s' locally at '%s'.", url, local_path) try: with write_handle(local_path, "wb") as output_handle, read_handle( From 50d2d9d55c1a88aba121fe86e38b2054bf519c39 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 08:50:22 +0000 Subject: [PATCH 02/57] corresting bug that was adding a dot in filename for local paths in scope_url --- lucid/misc/io/scoping.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/misc/io/scoping.py b/lucid/misc/io/scoping.py index ea57effc..cc04fce4 100644 --- a/lucid/misc/io/scoping.py +++ b/lucid/misc/io/scoping.py @@ -43,7 +43,7 @@ def _normalize_url(url: str) -> str: def scope_url(url, io_scopes=None): io_scopes = io_scopes or current_io_scopes() - if "//" in url or url.startswith("/"): + if "//" in url or url.startswith("/") or url.startswith("./"): return url paths = io_scopes + [url] joined = os.path.join(*paths) From 9e2c4720b3da2a66b56a0bbbca9644277d903585 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 08:52:56 +0000 Subject: [PATCH 03/57] adding a condition in objective 'direction' to allow the argument vec to be a tensor. It allow the notebook Activation Grid to work --- lucid/optvis/objectives.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lucid/optvis/objectives.py b/lucid/optvis/objectives.py index b66975ca..eebbe702 100644 --- a/lucid/optvis/objectives.py +++ b/lucid/optvis/objectives.py @@ -184,7 +184,9 @@ def inner(T): def direction(layer, vec, cossim_pow=0, batch=None): """Visualize a direction""" vec = vec[None, None, None] - vec = vec.astype("float32") + + if isinstance(vec,np.ndarray): + vec = vec.astype("float32") @handle_batch(batch) def inner(T): From 5275c859720c5a50b3d1b01ae532f1a827f3ce3e Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 08:53:49 +0000 Subject: [PATCH 04/57] updating tutorial notebook --- notebooks/tutorial.ipynb | 1308 +++++++++++++++++++------------------- 1 file changed, 656 insertions(+), 652 deletions(-) diff --git a/notebooks/tutorial.ipynb b/notebooks/tutorial.ipynb index 9f754760..c36779d0 100644 --- a/notebooks/tutorial.ipynb +++ b/notebooks/tutorial.ipynb @@ -1,708 +1,712 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", "colab": { - "name": "Lucid Tutorial", - "version": "0.3.2", - "views": {}, - "default_view": {}, - "provenance": [] + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - "kernelspec": { - "name": "python2", - "display_name": "Python 2" + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_vAVmphMywZR" + }, + "source": [ + "# Lucid: A Quick Tutorial\n", + "\n", + "This tutorial quickly introduces [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](https://colab.sandbox.google.com/github/tensorflow/lucid/blob/master/notebooks/tutorial.ipynb), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "FsFc1mE51tCd" + }, + "source": [ + "## Install, Import, Load Model" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - "accelerator": "GPU" + "colab_type": "code", + "id": "tavMPe3KQ8Cs" + }, + "outputs": [], + "source": [ + "# Install Lucid\n", + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "\n", + "# %tensorflow_version only works on colab\n", + "# %tensorflow_version 1.x\n" + ] }, - "cells": [ - { - "metadata": { - "id": "JndnmDMp66FL", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "##### Copyright 2018 Google LLC.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\");" - ] + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - { - "metadata": { - "id": "hMqWDc_m6rUC", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - }, - "cellView": "both" - }, - "cell_type": "code", - "source": [ - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] + "colab_type": "code", + "id": "RBr8QbboRAdU" + }, + "outputs": [], + "source": [ + "# Imports\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "assert tf.__version__.startswith('1')\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.objectives as objectives\n", + "import lucid.optvis.param as param\n", + "import lucid.optvis.render as render\n", + "import lucid.optvis.transform as transform" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - { - "metadata": { - "id": "_vAVmphMywZR", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Lucid: A Quick Tutorial\n", - "\n", - "This tutorial quickly introduces [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", - "\n", - "**Note**: The easiest way to use this tutorial is [as a colab notebook](https://colab.sandbox.google.com/github/tensorflow/lucid/blob/master/notebooks/tutorial.ipynb), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", - "\n", - "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", - "\n", - "Thanks for trying Lucid!\n", - "\n", - "\n", - "\n" - ] + "colab_type": "code", + "id": "yNALaA0QRJVT" + }, + "outputs": [], + "source": [ + "# Let's import a model from the Lucid modelzoo!\n", + "\n", + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "1l31v18X42gc" + }, + "source": [ + "In this tutorial, we will be visualizing InceptionV1, also known as GoogLeNet.\n", + "\n", + "While we will focus on a few neurons, you may wish to experiment with visualizing others. If you'd like, you can try any of the following layers: `conv2d0, maxpool0, conv2d1, conv2d2, maxpool1, mixed3a, mixed3b, maxpool4, mixed4a, mixed4b, mixed4c, mixed4d, mixed4e, maxpool10, mixed5a, mixed5b`.\n", + "\n", + "You can learn more about GoogLeNet in the [paper](https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf). You can also find visualizations of all neurons in mixed3a-mixed5b [here](https://distill.pub/2017/feature-visualization/appendix/)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "VcUL29K612SI" + }, + "source": [ + "## Visualize a Neuron" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 }, - { - "metadata": { - "id": "FsFc1mE51tCd", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## Install, Import, Load Model" - ] + "colab_type": "code", + "executionInfo": { + "elapsed": 9883, + "status": "ok", + "timestamp": 1520528085592, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 }, + "id": "CLDYzkKoRQtw", + "outputId": "47739b06-c868-4627-924c-dc28ada359d2" + }, + "outputs": [ { - "metadata": { - "id": "tavMPe3KQ8Cs", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "# Install Lucid\n", - "\n", - "!pip install --quiet lucid==0.2.3\n", - "#!pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", - "# %tensorflow_version only works on colab\n", - "%tensorflow_version 1.x\n" - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 1234.6516\n" + ] }, { - "metadata": { - "id": "RBr8QbboRAdU", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "# Imports\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "assert tf.__version__.startswith('1')\n", - "\n", - "import lucid.modelzoo.vision_models as models\n", - "from lucid.misc.io import show\n", - "import lucid.optvis.objectives as objectives\n", - "import lucid.optvis.param as param\n", - "import lucid.optvis.render as render\n", - "import lucid.optvis.transform as transform" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Visualizing a neuron is easy!\n", + "# Let's visualize the channel 476 from layer mixed4a_pre_relu\n", + "_ = render.render_vis(model, \"mixed4a_pre_relu:476\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "6gmaVSej19us" + }, + "source": [ + "## Getting a bit deeper\n", + "\n", + "Lucid splits visualizations into a few components which you can fiddle with completely indpendently:\n", + "\n", + "* **objectives** -- What do you want the model to visualize?\n", + "* **parameterization** -- How do you describe the image?\n", + "* **transforms** -- What transformations do you want your visualization to be robust to?\n", + "\n", + "In this section, we'll experiment with each one." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "6zO3np2D2tGh" + }, + "source": [ + "**Experimenting with objectives**" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 }, + "colab_type": "code", + "executionInfo": { + "elapsed": 102011, + "status": "ok", + "timestamp": 1518144808685, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", + "userId": "104989755527098071788" + }, + "user_tz": 480 + }, + "id": "YyexdOXIcH2i", + "outputId": "79641344-277d-4c33-993b-da8930e569c7" + }, + "outputs": [ { - "metadata": { - "id": "yNALaA0QRJVT", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "# Let's import a model from the Lucid modelzoo!\n", - "\n", - "model = models.InceptionV1()\n", - "model.load_graphdef()" - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 1661.9348\n" + ] }, { - "metadata": { - "id": "1l31v18X42gc", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "In this tutorial, we will be visualizing InceptionV1, also known as GoogLeNet.\n", - "\n", - "While we will focus on a few neurons, you may wish to experiment with visualizing others. If you'd like, you can try any of the following layers: `conv2d0, maxpool0, conv2d1, conv2d2, maxpool1, mixed3a, mixed3b, maxpool4, mixed4a, mixed4b, mixed4c, mixed4d, mixed4e, maxpool10, mixed5a, mixed5b`.\n", - "\n", - "You can learn more about GoogLeNet in the [paper](https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf). You can also find visualizations of all neurons in mixed3a-mixed5b [here](https://distill.pub/2017/feature-visualization/appendix/)." + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Let's visualize another channel using a more explicit objective:\n", + "\n", + "obj = objectives.channel(\"mixed4a_pre_relu\", 465)\n", + "_ = render.render_vis(model, obj)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 }, + "colab_type": "code", + "executionInfo": { + "elapsed": 101841, + "status": "ok", + "timestamp": 1518144939030, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", + "userId": "104989755527098071788" + }, + "user_tz": 480 + }, + "id": "YdERJ3_7cLdy", + "outputId": "8201d3be-9487-4ca7-819f-b332b459ab6f" + }, + "outputs": [ { - "metadata": { - "id": "VcUL29K612SI", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## Visualize a Neuron" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 1932.1196\n" + ] }, { - "metadata": { - "id": "CLDYzkKoRQtw", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "47739b06-c868-4627-924c-dc28ada359d2", - "executionInfo": { - "status": "ok", - "timestamp": 1520528085592, - "user_tz": 480, - "elapsed": 9883, - "user": { - "displayName": "", - "photoUrl": "", - "userId": "" - } - } - }, - "cell_type": "code", - "source": [ - "# Visualizing a neuron is easy!\n", - "\n", - "_ = render.render_vis(model, \"mixed4a_pre_relu:476\")" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 1150.7921\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Or we could do something weirder:\n", + "# (Technically, objectives are a class that implements addition.)\n", + "channel = lambda n: objectives.channel(\"mixed4a_pre_relu\", n)\n", + "obj = channel(476) + channel(465)\n", + "_ = render.render_vis(model, obj)\n", + "\n", + "# The addition of two channel objectives gives an objective that will optimize jointly\n", + "# the activation of both channels" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "9Rvhpqtn3XXm" + }, + "source": [ + "**Transformation Robustness**\n", + "\n", + "Recommended reading: The Feature Visualization article's section titled [The Enemy of Feature Visualization](https://distill.pub/2017/feature-visualization/#enemy-of-feature-vis) discusion of \"Transformation Robustness.\" In particular, there's an interactive diagram that allows you to easily explore how different kinds of transformation robustness effects visualizations." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 }, - { - "metadata": { - "id": "6gmaVSej19us", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## Getting a bit deeper\n", - "\n", - "Lucid splits visualizations into a few components which you can fiddle with completely indpendently:\n", - "\n", - "* **objectives** -- What do you want the model to visualize?\n", - "* **parameterization** -- How do you describe the image?\n", - "* **transforms** -- What transformations do you want your visualization to be robust to?\n", - "\n", - "In this section, we'll experiment with each one." - ] + "colab_type": "code", + "executionInfo": { + "elapsed": 77826, + "status": "ok", + "timestamp": 1518145016924, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", + "userId": "104989755527098071788" + }, + "user_tz": 480 }, + "id": "DDBH4gpD3X2O", + "outputId": "4eb10b16-79e6-4b86-daa8-8e52d258fe69" + }, + "outputs": [ { - "metadata": { - "id": "6zO3np2D2tGh", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "**Experimenting with objectives**" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 2294.2246\n" + ] }, { - "metadata": { - "id": "YyexdOXIcH2i", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "79641344-277d-4c33-993b-da8930e569c7", - "executionInfo": { - "status": "ok", - "timestamp": 1518144808685, - "user_tz": 480, - "elapsed": 102011, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", - "userId": "104989755527098071788" - } - } - }, - "cell_type": "code", - "source": [ - "# Let's visualize another neuron using a more explicit objective:\n", - "\n", - "obj = objectives.channel(\"mixed4a_pre_relu\", 465)\n", - "_ = render.render_vis(model, obj)" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 1785.2615\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# No transformation robustness\n", + "\n", + "transforms = []\n", + "_ = render.render_vis(model, \"mixed4a_pre_relu:476\", transforms=transforms)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 }, - { - "metadata": { - "id": "YdERJ3_7cLdy", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "8201d3be-9487-4ca7-819f-b332b459ab6f", - "executionInfo": { - "status": "ok", - "timestamp": 1518144939030, - "user_tz": 480, - "elapsed": 101841, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", - "userId": "104989755527098071788" - } - } - }, - "cell_type": "code", - "source": [ - "# Or we could do something weirder:\n", - "# (Technically, objectives are a class that implements addition.)\n", - "\n", - "channel = lambda n: objectives.channel(\"mixed4a_pre_relu\", n)\n", - "obj = channel(476) + channel(465)\n", - "_ = render.render_vis(model, obj)" - ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 2312.0425\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } - ] + "colab_type": "code", + "executionInfo": { + "elapsed": 78302, + "status": "ok", + "timestamp": 1518145095337, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", + "userId": "104989755527098071788" + }, + "user_tz": 480 }, + "id": "mDOviz8d4n4c", + "outputId": "89be067a-b356-443d-d589-ab55016190a1" + }, + "outputs": [ { - "metadata": { - "id": "9Rvhpqtn3XXm", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "**Transformation Robustness**\n", - "\n", - "Recomended reading: The Feature Visualization article's section titled [The Enemy of Feature Visualization](https://distill.pub/2017/feature-visualization/#enemy-of-feature-vis) discusion of \"Transformation Robustness.\" In particular, there's an interactive diagram that allows you to easily explore how different kinds of transformation robustness effects visualizations." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 1851.3445\n" + ] }, { - "metadata": { - "id": "DDBH4gpD3X2O", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "4eb10b16-79e6-4b86-daa8-8e52d258fe69", - "executionInfo": { - "status": "ok", - "timestamp": 1518145016924, - "user_tz": 480, - "elapsed": 77826, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", - "userId": "104989755527098071788" - } - } - }, - "cell_type": "code", - "source": [ - "# No transformation robustness\n", - "\n", - "transforms = []\n", - "_ = render.render_vis(model, \"mixed4a_pre_relu:476\", transforms=transforms)" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 2420.1245\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Jitter 2\n", + "\n", + "transforms = [\n", + " transform.jitter(2)\n", + "]\n", + "_ = render.render_vis(model, \"mixed4a_pre_relu:476\", transforms=transforms)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 113882, + "status": "ok", + "timestamp": 1518145209289, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", + "userId": "104989755527098071788" + }, + "user_tz": 480 }, + "id": "325tbTiE5GpJ", + "outputId": "780d820d-643d-4e99-ff26-ca7b0ebf2453" + }, + "outputs": [ { - "metadata": { - "id": "mDOviz8d4n4c", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "89be067a-b356-443d-d589-ab55016190a1", - "executionInfo": { - "status": "ok", - "timestamp": 1518145095337, - "user_tz": 480, - "elapsed": 78302, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", - "userId": "104989755527098071788" - } - } - }, - "cell_type": "code", - "source": [ - "# Jitter 2\n", - "\n", - "transforms = [\n", - " transform.jitter(2)\n", - "]\n", - "_ = render.render_vis(model, \"mixed4a_pre_relu:476\", transforms=transforms)" - ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 1853.4551\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 1286.6077\n" + ] }, { - "metadata": { - "id": "325tbTiE5GpJ", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "780d820d-643d-4e99-ff26-ca7b0ebf2453", - "executionInfo": { - "status": "ok", - "timestamp": 1518145209289, - "user_tz": 480, - "elapsed": 113882, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", - "userId": "104989755527098071788" - } - } - }, - "cell_type": "code", - "source": [ - "# Breaking out all the stops\n", - "\n", - "transforms = [\n", - " transform.pad(16),\n", - " transform.jitter(8),\n", - " transform.random_scale([n/100. for n in range(80, 120)]),\n", - " transform.random_rotate(range(-10,10) + range(-5,5) + 10*range(-2,2)),\n", - " transform.jitter(2)\n", - "]\n", - "_ = render.render_vis(model, \"mixed4a_pre_relu:476\", transforms=transforms)" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 1195.9929\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Breaking out all the stops\n", + "\n", + "transforms = [\n", + " transform.pad(16),\n", + " transform.jitter(8),\n", + " transform.random_scale([n/100. for n in range(80, 120)]),\n", + " transform.random_rotate(list(range(-10,10)) + list(range(-5,5)) + 10*list(range(-2,2))),\n", + " transform.jitter(2)\n", + "]\n", + "_ = render.render_vis(model, \"mixed4a_pre_relu:476\", transforms=transforms)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "lW2Fmtv124Bo" + }, + "source": [ + "**Experimenting with parameterization**\n", + "\n", + "Recomended reading: The Feature Visualization article's section on [Preconditioning and Parameterization](https://distill.pub/2017/feature-visualization/#preconditioning)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 91118, + "status": "ok", + "timestamp": 1518145357388, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", + "userId": "104989755527098071788" + }, + "user_tz": 480 }, + "id": "N-BTF_W0fHZh", + "outputId": "e3c92004-c89e-4c7b-da4f-21d364f30906" + }, + "outputs": [ { - "metadata": { - "id": "lW2Fmtv124Bo", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "**Experimenting with parameterization**\n", - "\n", - "Recomended reading: The Feature Visualization article's section on [Preconditioning and Parameterization](https://distill.pub/2017/feature-visualization/#preconditioning)" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 1015.43964\n" + ] }, { - "metadata": { - "id": "N-BTF_W0fHZh", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "e3c92004-c89e-4c7b-da4f-21d364f30906", - "executionInfo": { - "status": "ok", - "timestamp": 1518145357388, - "user_tz": 480, - "elapsed": 91118, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", - "userId": "104989755527098071788" - } - } - }, - "cell_type": "code", - "source": [ - "# Using alternate parameterizations is one of the primary ingredients for\n", - "# effective visualization\n", - "\n", - "param_f = lambda: param.image(128, fft=False, decorrelate=False)\n", - "_ = render.render_vis(model, \"mixed4a_pre_relu:2\", param_f)" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 808.84076\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Using alternate parameterizations is one of the primary ingredients for\n", + "# effective visualization\n", + "\n", + "param_f = lambda: param.image(128, fft=False, decorrelate=False)\n", + "_ = render.render_vis(model, \"mixed4a_pre_relu:2\", param_f)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 6988, + "status": "ok", + "timestamp": 1517984295016, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", + "userId": "104989755527098071788" + }, + "user_tz": 480 + }, + "id": "8hrCwdxhcUHn", + "outputId": "9e0fd16a-55b2-43e3-fcd8-c29164d35fcc" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 855.3585\n" + ] }, { - "metadata": { - "id": "8hrCwdxhcUHn", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 166 - }, - "outputId": "9e0fd16a-55b2-43e3-fcd8-c29164d35fcc", - "executionInfo": { - "status": "ok", - "timestamp": 1517984295016, - "user_tz": 480, - "elapsed": 6988, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh6.googleusercontent.com/-BDHAgNAk34E/AAAAAAAAAAI/AAAAAAAAAMw/gTWZ3IeP8dY/s50-c-k-no/photo.jpg", - "userId": "104989755527098071788" - } - } - }, - "cell_type": "code", - "source": [ - "# Using alternate parameterizations is one of the primary ingredients for\n", - "# effective visualization\n", - "\n", - "param_f = lambda: param.image(128, fft=True, decorrelate=True)\n", - "_ = render.render_vis(model, \"mixed4a_pre_relu:2\", param_f)" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 1191.0022\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" } - ] + ], + "source": [ + "# Using alternate parameterizations is one of the primary ingredients for\n", + "# effective visualization\n", + "\n", + "param_f = lambda: param.image(128, fft=True, decorrelate=True)\n", + "_ = render.render_vis(model, \"mixed4a_pre_relu:2\", param_f)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "default_view": {}, + "name": "Lucid Tutorial", + "provenance": [], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 } From dbda67d3074cc287f3354458e8f4f247c56c9486 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 08:55:00 +0000 Subject: [PATCH 05/57] updating notebook of modelzoo --- notebooks/modelzoo.ipynb | 514 +++++++++++++++++++++++++++++---------- 1 file changed, 385 insertions(+), 129 deletions(-) diff --git a/notebooks/modelzoo.ipynb b/notebooks/modelzoo.ipynb index 71b54027..532f9349 100644 --- a/notebooks/modelzoo.ipynb +++ b/notebooks/modelzoo.ipynb @@ -28,7 +28,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": 1, "metadata": { "colab": {}, "colab_type": "code", @@ -36,10 +36,10 @@ }, "outputs": [], "source": [ - "# Expanded modelzoo is only available as of lucid v0.3\n", - "!pip install --quiet lucid==0.3\n", + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "\n", "#tensorflow_version only works in colab\n", - "%tensorflow_version 1.x\n", + "# %tensorflow_version 1.x\n", "\n", "import numpy as np\n", "import tensorflow as tf\n", @@ -64,7 +64,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": 2, "metadata": { "colab": {}, "colab_type": "code", @@ -111,42 +111,44 @@ "\n", "Model Dataset\n", "\n", - "PnasnetMobile_slim ImageNet\n", - "PnasnetLarge_slim ImageNet\n", - "InceptionV1_caffe_Places365 Places365\n", - "MobilenetV1_025_slim ImageNet\n", + "AlexNet ImageNet\n", "AlexNet_caffe_Places365 Places365\n", - "NasnetMobile_slim ImageNet\n", - "InceptionV2_slim ImageNet\n", - "ResnetV1_101_slim ImageNet\n", - "InceptionV1_caffe ImageNet\n", + "CaffeNet_caffe ImageNet\n", "InceptionResnetV2_slim ImageNet\n", - "InceptionV3_slim ImageNet\n", + "InceptionV1 ImageNet\n", + "InceptionV1_caffe ImageNet\n", "InceptionV1_caffe_Places205 Places205\n", + "InceptionV1_caffe_Places365 Places365\n", "InceptionV1_slim ImageNet\n", - "ResnetV1_50_slim ImageNet\n", - "CaffeNet_caffe ImageNet\n", + "InceptionV2_slim ImageNet\n", + "InceptionV3_slim ImageNet\n", "InceptionV4_slim ImageNet\n", - "VGG19_caffe ImageNet\n", - "ResnetV1_152_slim ImageNet\n", + "MobilenetV1_025_slim ImageNet\n", "MobilenetV1_050_slim ImageNet\n", + "MobilenetV1_slim ImageNet\n", + "MobilenetV2_10_slim ImageNet\n", + "MobilenetV2_14_slim ImageNet\n", "NasnetLarge_slim ImageNet\n", - "AlexNet ImageNet\n", + "NasnetMobile_slim ImageNet\n", + "PnasnetLarge_slim ImageNet\n", + "PnasnetMobile_slim ImageNet\n", + "ResnetV1_101_slim ImageNet\n", + "ResnetV1_152_slim ImageNet\n", + "ResnetV1_50_slim ImageNet\n", "ResnetV2_101_slim ImageNet\n", "ResnetV2_152_slim ImageNet\n", - "MobilenetV1_slim ImageNet\n", + "ResnetV2_50_slim ImageNet\n", "VGG16_caffe ImageNet\n", - "InceptionV1 ImageNet\n", - "ResnetV2_50_slim ImageNet\n" + "VGG19_caffe ImageNet\n" ] } ], "source": [ - "print \"\"\n", - "print \"Model\".ljust(27), \" \", \"Dataset\"\n", - "print \"\"\n", - "for name, Model in nets.models_map.iteritems():\n", - " print name.ljust(27), \" \", Model.dataset" + "print(\"\")\n", + "print(\"Model\".ljust(27), \" \", \"Dataset\")\n", + "print(\"\")\n", + "for name, Model in nets.models_map.items():\n", + " print(name.ljust(27), \" \", Model.dataset)" ] }, { @@ -175,79 +177,33 @@ { "data": { "text/plain": [ - "[{'name': 'InceptionV4/InceptionV4/Conv2d_1a_3x3/Relu',\n", - " 'size': 32,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Conv2d_2a_3x3/Relu',\n", - " 'size': 32,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Conv2d_2b_3x3/Relu',\n", - " 'size': 64,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_3a/concat',\n", - " 'size': 160,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_4a/concat',\n", - " 'size': 192,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_5a/concat',\n", - " 'size': 384,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_5b/concat',\n", - " 'size': 384,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_5c/concat',\n", - " 'size': 384,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_5d/concat',\n", - " 'size': 384,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_5e/concat',\n", - " 'size': 384,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6a/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6b/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6c/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6d/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6e/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6f/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6g/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_6h/concat',\n", - " 'size': 1024,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_7a/concat',\n", - " 'size': 1536,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_7b/concat',\n", - " 'size': 1536,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_7c/concat',\n", - " 'size': 1536,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/InceptionV4/Mixed_7d/concat',\n", - " 'size': 1536,\n", - " 'type': 'conv'},\n", - " {'name': 'InceptionV4/Logits/Predictions', 'size': 1001, 'type': 'dense'}]" + "(Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'conv'}]),\n", + " Layer (belonging to InceptionV4_slim) ([{'dense'}]))" ] }, "execution_count": 4, - "metadata": { - "tags": [] - }, + "metadata": {}, "output_type": "execute_result" } ], @@ -278,6 +234,54 @@ "outputId": "dc9c2ea0-1e81-453f-f92f-2eaf1782ac31" }, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/writing.py:69: The name tf.gfile.MakeDirs is deprecated. Please use tf.io.gfile.makedirs instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/writing.py:69: The name tf.gfile.MakeDirs is deprecated. Please use tf.io.gfile.makedirs instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/writing.py:77: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/writing.py:77: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, { "data": { "text/html": [ @@ -285,12 +289,12 @@ " \n", " " @@ -299,15 +303,12 @@ "" ] }, - "metadata": { - "tags": [] - }, + "metadata": {}, "output_type": "display_data" } ], "source": [ "model = models.InceptionV4_slim()\n", - "model.load_graphdef()\n", "\n", "model.show_graph()" ] @@ -328,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -343,27 +344,246 @@ "name": "stdout", "output_type": "stream", "text": [ - "512 12.305752\n" + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:89: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:89: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/param/spatial.py:82: The name tf.spectral.irfft2d is deprecated. Please use tf.signal.irfft2d instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/param/spatial.py:82: The name tf.spectral.irfft2d is deprecated. Please use tf.signal.irfft2d instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:224: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:224: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:242: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:242: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:170: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:170: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:171: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:171: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:38: The name tf.random_crop is deprecated. Please use tf.image.random_crop instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:38: The name tf.random_crop is deprecated. Please use tf.image.random_crop instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:141: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:141: The name tf.random_uniform is deprecated. Please use tf.random.uniform instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:83: The name tf.image.resize_bilinear is deprecated. Please use tf.compat.v1.image.resize_bilinear instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:83: The name tf.image.resize_bilinear is deprecated. Please use tf.compat.v1.image.resize_bilinear instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:\n", + "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + " * https://github.com/tensorflow/io (for I/O related ops)\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:\n", + "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + " * https://github.com/tensorflow/io (for I/O related ops)\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/redirected_relu_grad.py:95: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.where in 2.0, which has the same broadcast rule as np.where\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/redirected_relu_grad.py:95: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.where in 2.0, which has the same broadcast rule as np.where\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:98: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:98: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 15.12645\n" ] }, { "data": { "text/html": [ - "" + "" ], "text/plain": [ "" ] }, - "metadata": { - "tags": [] - }, + "metadata": {}, "output_type": "display_data" } ], "source": [ "model = models.InceptionV4_slim()\n", - "model.load_graphdef()\n", "\n", "_ = render.render_vis(model, \"InceptionV4/InceptionV4/Mixed_6b/concat:0\")" ] @@ -382,7 +602,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -394,32 +614,69 @@ }, "outputs": [ { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": { - "tags": [] - }, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/recipes/caricature.py:53: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n", + "\n" + ] }, { "name": "stderr", "output_type": "stream", "text": [ - "WARNING:lucid.misc.io.showing:Show only supports numpy arrays so far. Using repr().\n" + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/recipes/caricature.py:53: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n", + "\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "None\n" + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:160: The name tf.image.resize_image_with_crop_or_pad is deprecated. Please use tf.image.resize_with_crop_or_pad instead.\n", + "\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:160: The name tf.image.resize_image_with_crop_or_pad is deprecated. Please use tf.image.resize_with_crop_or_pad instead.\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + " InceptionV4/InceptionV4/Mixed_6b/concat
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "
\n", + " 0
\n", + " \n", + "
\n", + " 1
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -428,7 +685,6 @@ "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", "\n", "model = models.InceptionV4_slim()\n", - "model.load_graphdef()\n", "\n", "result = feature_inversion(img, model, \"InceptionV4/InceptionV4/Mixed_6b/concat\", n_steps=512, cossim_pow=0.0)\n", "show(result)" @@ -457,9 +713,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.2" + "version": "3.6.9" } }, "nbformat": 4, - "nbformat_minor": 1 -} \ No newline at end of file + "nbformat_minor": 4 +} From 0db3c3da272b9e7ddb0b37cc72675f56c7d9eb77 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 08:57:20 +0000 Subject: [PATCH 06/57] updating notebook of Semantidc Dictionnary --- .../building-blocks/SemanticDictionary.ipynb | 3125 +++++++---------- 1 file changed, 1224 insertions(+), 1901 deletions(-) diff --git a/notebooks/building-blocks/SemanticDictionary.ipynb b/notebooks/building-blocks/SemanticDictionary.ipynb index ce5e9669..5ba44fa8 100644 --- a/notebooks/building-blocks/SemanticDictionary.ipynb +++ b/notebooks/building-blocks/SemanticDictionary.ipynb @@ -1,1922 +1,1245 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "cellView": "both", "colab": { - "name": "Semantic Dictionaries - Building Blocks of Interpretability", - "version": "0.3.2", - "views": {}, - "default_view": {}, - "provenance": [ - { - "file_id": "1lPRYcRiLweDFSMwTl7CUwboCo0YHkAYB", - "timestamp": 1518586294543 - } - ], - "collapsed_sections": [] + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - "kernelspec": { - "name": "python2", - "display_name": "Python 2" - }, - "accelerator": "GPU" + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] }, - "cells": [ - { - "metadata": { - "id": "JndnmDMp66FL", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "##### Copyright 2018 Google LLC.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\");" - ] - }, - { - "metadata": { - "id": "hMqWDc_m6rUC", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - }, - "cellView": "both" - }, - "cell_type": "code", - "source": [ - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "metadata": { - "id": "pNqKk1MmrakH", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Semantic Dictionaries -- Building Blocks of Interpretability\n", - "\n", - "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", - "\n", - "This notebook studies **semantic dictionaries**. The basic idea of semantic dictionaries is to marry neuron activations to visualizations of those neurons, transforming them from abstract vectors to something more meaningful to humans. Semantic dictionaries can also be applied to other bases, such as rotated versions of activations space that try to disentangle neurons.\n", - "\n", - "
\n", - "\n", - "
\n", - "\n", - "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", - "\n", - "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", - "\n", - "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", - "\n", - "Thanks for trying Lucid!\n" - ] - }, - { - "metadata": { - "id": "hOBBuzMaxU37", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Install / Import / Load" - ] - }, - { - "metadata": { - "id": "UL1yOZtjqkcj", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependancies such as TensorFlow. And then import them as appropriate." - ] + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pNqKk1MmrakH" + }, + "source": [ + "# Semantic Dictionaries -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "This notebook studies **semantic dictionaries**. The basic idea of semantic dictionaries is to marry neuron activations to visualizations of those neurons, transforming them from abstract vectors to something more meaningful to humans. Semantic dictionaries can also be applied to other bases, such as rotated versions of activations space that try to disentangle neurons.\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hOBBuzMaxU37" + }, + "source": [ + "# Install / Import / Load" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "UL1yOZtjqkcj" + }, + "source": [ + "This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependancies such as TensorFlow. And then import them as appropriate." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 102, + "output_extras": [ + {} + ] }, - { - "metadata": { - "id": "AA17rJBLuyYH", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 102 - }, - "outputId": "3acd867e-4fc2-4369-8684-cbdcd3f70c7d", - "executionInfo": { - "status": "ok", - "timestamp": 1520312194763, - "user_tz": 480, - "elapsed": 15116, - "user": { - "displayName": "", - "photoUrl": "", - "userId": "" - } - } - }, - "cell_type": "code", - "source": [ - "!pip install --quiet lucid==0.0.5\n", - "!npm install -g svelte-cli@2.2.0\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "\n", - "import lucid.modelzoo.vision_models as models\n", - "import lucid.optvis.render as render\n", - "from lucid.misc.io import show, load\n", - "from lucid.misc.io.showing import _image_url\n", - "import lucid.scratch.web.svelte as lucid_svelte" - ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "\u001b[K\u001b[?25h/tools/node/bin/svelte -> /tools/node/lib/node_modules/svelte-cli/bin.js\n", - "/tools/node/lib\n", - "└─┬ \u001b[40m\u001b[33msvelte-cli@2.2.0\u001b[39m\u001b[49m \n", - " └── \u001b[40m\u001b[33msvelte@1.56.2\u001b[39m\u001b[49m \n", - "\n" - ], - "name": "stdout" - } - ] + "colab_type": "code", + "executionInfo": { + "elapsed": 15116, + "status": "ok", + "timestamp": 1520312194763, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 }, - { - "metadata": { - "id": "HfG1iu5KdmUV", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Semantic Dictionary Code" - ] + "id": "AA17rJBLuyYH", + "outputId": "3acd867e-4fc2-4369-8684-cbdcd3f70c7d" + }, + "outputs": [], + "source": [ + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "# !npm install -g svelte-cli@2.2.0\n", + "%tensorflow_version 1.x\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "assert tf.__version__.startswith('1')\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "import lucid.optvis.render as render\n", + "from lucid.misc.io import show, load\n", + "from lucid.misc.io.showing import _image_url\n", + "import lucid.scratch.web.svelte as lucid_svelte" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "HfG1iu5KdmUV" + }, + "source": [ + "# Semantic Dictionary Code" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "GABn20jcn4MC" + }, + "source": [ + "## **Defining the interface**\n", + "\n", + "First, we define our \"semantic dictionary\" interface as a [svelte component](https://svelte.technology/). This makes it easy to manage state, like which position we're looking at." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 102, + "output_extras": [ + {} + ] }, - { - "metadata": { - "id": "GABn20jcn4MC", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## **Defining the interface**\n", - "\n", - "First, we define our \"semantic dictionary\" interface as a [svelte component](https://svelte.technology/). This makes it easy to manage state, like which position we're looking at." - ] + "colab_type": "code", + "executionInfo": { + "elapsed": 542, + "status": "ok", + "timestamp": 1520312330316, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 }, + "id": "ZhUtaAmcOIrw", + "outputId": "20678755-ab0c-43fc-fa6c-24bdc361fc96" + }, + "outputs": [ { - "metadata": { - "id": "ZhUtaAmcOIrw", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 102 - }, - "outputId": "20678755-ab0c-43fc-fa6c-24bdc361fc96", - "executionInfo": { - "status": "ok", - "timestamp": 1520312330316, - "user_tz": 480, - "elapsed": 542, - "user": { - "displayName": "", - "photoUrl": "", - "userId": "" - } - } - }, - "cell_type": "code", - "source": [ - "%%html_define_svelte SemanticDict\n", - "\n", - "
\n", - "
\n", - "
\n", - " \n", - "\n", - " {{#each xs as x}}\n", - " {{#each ys as y}}\n", - " \n", - " {{/each}}\n", - " {{/each}}\n", - " \n", - "
\n", - "
\n", - " {{#each present_acts as act, act_ind}}\n", - "
\n", - "
\n", - "
\n", - "
\n", - " {{/each}}\n", - "
\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "" - ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_plOnQK/SemanticDict_1ed5293.html > /tmp/svelte_plOnQK/SemanticDict_1ed5293.js\n", - "svelte version 1.56.2\n", - "compiling ../tmp/svelte_plOnQK/SemanticDict_1ed5293.html...\n", - "\n" - ], - "name": "stdout" - } - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_wdjk7ptl/SemanticDict_66496726_d99e_4104_8df6_99fd6104c2e6.html > /tmp/svelte_wdjk7ptl/SemanticDict_66496726_d99e_4104_8df6_99fd6104c2e6.js\n", + "Svelte build failed! Output:\n", + "/bin/sh: 1: svelte: not found\n", + "\n" + ] + } + ], + "source": [ + "%%html_define_svelte SemanticDict\n", + "\n", + "
\n", + "
\n", + "
\n", + " \n", + "\n", + " {{#each xs as x}}\n", + " {{#each ys as y}}\n", + " \n", + " {{/each}}\n", + " {{/each}}\n", + " \n", + "
\n", + "
\n", + " {{#each present_acts as act, act_ind}}\n", + "
\n", + "
\n", + "
\n", + "
\n", + "
{{Math.round(act.v*100)/100}}
\n", + "
\n", + "
\n", + "
\n", + " {{/each}}\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "F4l9Ki-UoVko" + }, + "source": [ + "## **Spritemaps**\n", + "\n", + "In order to use the semantic dictionaries, we need \"spritemaps\" of channel visualizations.\n", + "These visualization spritemaps are large grids of images (such as [this one](https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_mixed4d_channel.jpeg)) that visualize every channel in a layer.\n", + "We provide spritemaps for GoogLeNet because making them takes a few hours of GPU time, but\n", + "you can make your own channel spritemaps to explore other models. [Check out other notebooks](https://github.com/tensorflow/lucid#notebooks) on how to\n", + "make your own neuron visualizations.\n", + "\n", + "It's also worth noting that GoogLeNet has unusually semantically meaningful neurons. We don't know why this is -- although it's an active area of research for us. More sophisticated interfaces, such as neuron groups, may work better for networks where meaningful ideas are more entangled or less aligned with the neuron directions." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - { - "metadata": { - "id": "F4l9Ki-UoVko", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## **Spritemaps**\n", - "\n", - "In order to use the semantic dictionaries, we need \"spritemaps\" of channel visualizations.\n", - "These visualization spritemaps are large grids of images (such as [this one](https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_mixed4d_channel.jpeg)) that visualize every channel in a layer.\n", - "We provide spritemaps for GoogLeNet because making them takes a few hours of GPU time, but\n", - "you can make your own channel spritemaps to explore other models. [Check out other notebooks](https://github.com/tensorflow/lucid#notebooks) on how to\n", - "make your own neuron visualizations.\n", - "\n", - "It's also worth noting that GoogLeNet has unusually semantically meaningful neurons. We don't know why this is -- although it's an active area of research for us. More sophisticated interfaces, such as neuron groups, may work better for networks where meaningful ideas are more entangled or less aligned with the neuron directions." - ] + "colab_type": "code", + "id": "BpGLiyEEoPfB" + }, + "outputs": [], + "source": [ + "layer_spritemap_sizes = {\n", + " 'mixed3a' : 16,\n", + " 'mixed3b' : 21,\n", + " 'mixed4a' : 22,\n", + " 'mixed4b' : 22,\n", + " 'mixed4c' : 22,\n", + " 'mixed4d' : 22,\n", + " 'mixed4e' : 28,\n", + " 'mixed5a' : 28,\n", + " }\n", + "\n", + "def googlenet_spritemap(layer):\n", + " assert layer in layer_spritemap_sizes\n", + " size = layer_spritemap_sizes[layer]\n", + " url = \"https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg\" % layer\n", + " return size, url" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "eP9jcxiLowkZ" + }, + "source": [ + "## **User facing constructor**\n", + "\n", + "Now we'll create a convenient API for creating semantic dictionary visualizations. It will compute the network activations for an image, grab an appropriate spritemap, and render the interface." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - { - "metadata": { - "id": "BpGLiyEEoPfB", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "layer_spritemap_sizes = {\n", - " 'mixed3a' : 16,\n", - " 'mixed3b' : 21,\n", - " 'mixed4a' : 22,\n", - " 'mixed4b' : 22,\n", - " 'mixed4c' : 22,\n", - " 'mixed4d' : 22,\n", - " 'mixed4e' : 28,\n", - " 'mixed5a' : 28,\n", - " }\n", - "\n", - "def googlenet_spritemap(layer):\n", - " assert layer in layer_spritemap_sizes\n", - " size = layer_spritemap_sizes[layer]\n", - " url = \"https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg\" % layer\n", - " return size, url" - ], - "execution_count": 0, - "outputs": [] + "colab_type": "code", + "id": "9czK9sf1d1bU" + }, + "outputs": [], + "source": [ + "googlenet = models.InceptionV1()\n", + "googlenet.load_graphdef()\n", + "\n", + "\n", + "def googlenet_semantic_dict(layer, img_url):\n", + " img = load(img_url)\n", + " \n", + " # Compute the activations\n", + " with tf.Graph().as_default(), tf.Session():\n", + " t_input = tf.placeholder(tf.float32, [224, 224, 3])\n", + " T = render.import_model(googlenet, t_input, t_input)\n", + " acts = T(layer).eval({t_input: img})[0]\n", + " \n", + " # Find the most interesting position for our initial view\n", + " max_mag = acts.max(-1)\n", + " max_x = int(np.argmax(max_mag.max(-1)))\n", + " max_y = int(np.argmax(max_mag[max_x]))\n", + " \n", + " # Find appropriate spritemap\n", + " spritemap_n, spritemap_url = googlenet_spritemap(layer)\n", + " \n", + " # Actually construct the semantic dictionary interface\n", + " # using our *custom component*\n", + " lucid_svelte.SemanticDict({\n", + " \"spritemap_url\": spritemap_url,\n", + " \"sprite_size\": 110,\n", + " \"sprite_n_wrap\": spritemap_n, # row size\n", + " \"image_url\": _image_url(img), # generate a data:image/ url\n", + " \"activations\": [\n", + " [\n", + " [{\"n\": int(n), \"v\": float(act_vec[n])} for n in np.argsort(-act_vec)[:4]]\n", + " for act_vec in act_slice\n", + " ]\n", + " for act_slice in acts\n", + " ], # the 4 features with the highest activation for the tile\n", + " \"pos\": [max_y, max_x], # best position for initial view\n", + " })\n", + " \n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "k0-gZUpApZyz" + }, + "source": [ + "# Now let's make some semantic dictionaries!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 265, + "output_extras": [ + {} + ] }, - { - "metadata": { - "id": "eP9jcxiLowkZ", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## **User facing constructor**\n", - "\n", - "Now we'll create a convenient API for creating semantic dictionary visualizations. It will compute the network activations for an image, grab an appropriate spritemap, and render the interface." - ] + "colab_type": "code", + "executionInfo": { + "elapsed": 2455, + "status": "ok", + "timestamp": 1520312336706, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 }, - { - "metadata": { - "id": "9czK9sf1d1bU", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "googlenet = models.InceptionV1()\n", - "googlenet.load_graphdef()\n", - "\n", - "\n", - "def googlenet_semantic_dict(layer, img_url):\n", - " img = load(img_url)\n", - " \n", - " # Compute the activations\n", - " with tf.Graph().as_default(), tf.Session():\n", - " t_input = tf.placeholder(tf.float32, [224, 224, 3])\n", - " T = render.import_model(googlenet, t_input, t_input)\n", - " acts = T(layer).eval({t_input: img})[0]\n", - " \n", - " # Find the most interesting position for our initial view\n", - " max_mag = acts.max(-1)\n", - " max_x = np.argmax(max_mag.max(-1))\n", - " max_y = np.argmax(max_mag[max_x])\n", - " \n", - " # Find appropriate spritemap\n", - " spritemap_n, spritemap_url = googlenet_spritemap(layer)\n", - " \n", - " # Actually construct the semantic dictionary interface\n", - " # using our *custom component*\n", - " lucid_svelte.SemanticDict({\n", - " \"spritemap_url\": spritemap_url,\n", - " \"sprite_size\": 110,\n", - " \"sprite_n_wrap\": spritemap_n,\n", - " \"image_url\": _image_url(img),\n", - " \"activations\": [[[{\"n\": n, \"v\": float(act_vec[n])} for n in np.argsort(-act_vec)[:4]] for act_vec in act_slice] for act_slice in acts],\n", - " \"pos\" : [max_y, max_x]\n", - " })" - ], - "execution_count": 0, - "outputs": [] + "id": "MEWC-UKdqRGC", + "outputId": "096bc577-d5d9-4c48-a914-411b8e77b03a" + }, + "outputs": [], + "source": [ + "googlenet_semantic_dict(\"mixed4d\", \"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 265, + "output_extras": [ + {} + ] }, - { - "metadata": { - "id": "k0-gZUpApZyz", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Now let's make some semantic dictionaries!" - ] + "colab_type": "code", + "executionInfo": { + "elapsed": 1259, + "status": "ok", + "timestamp": 1520312338047, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 }, + "id": "Izf_YqCRe6E7", + "outputId": "ed2b0826-52b4-4714-e019-773162c2b170" + }, + "outputs": [ { - "metadata": { - "id": "MEWC-UKdqRGC", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 265 - }, - "outputId": "096bc577-d5d9-4c48-a914-411b8e77b03a", - "executionInfo": { - "status": "ok", - "timestamp": 1520312336706, - "user_tz": 480, - "elapsed": 2455, - "user": { - "displayName": "", - "photoUrl": "", - "userId": "" - } - } - }, - "cell_type": "code", - "source": [ - "googlenet_semantic_dict(\"mixed4d\", \"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] - }, + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" + } + ], + "source": [ + "googlenet_semantic_dict(\"mixed4d\", \"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "Semantic Dictionaries - Building Blocks of Interpretability", + "provenance": [ { - "metadata": { - "id": "Izf_YqCRe6E7", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 265 - }, - "outputId": "ed2b0826-52b4-4714-e019-773162c2b170", - "executionInfo": { - "status": "ok", - "timestamp": 1520312338047, - "user_tz": 480, - "elapsed": 1259, - "user": { - "displayName": "", - "photoUrl": "", - "userId": "" - } - } - }, - "cell_type": "code", - "source": [ - "googlenet_semantic_dict(\"mixed4d\", \"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")" - ], - "execution_count": 0, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } - ] + "file_id": "1lPRYcRiLweDFSMwTl7CUwboCo0YHkAYB", + "timestamp": 1518586294543 } - ] + ], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 } From 21793cb273186ca11f7d57c048300e683dd27c7b Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:08:51 +0000 Subject: [PATCH 07/57] adding a jupyter version of the notebook Semantic Dictionnary --- .../SemanticDictionaryJupyter.ipynb | 1288 +++++++++++++++++ 1 file changed, 1288 insertions(+) create mode 100644 notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb diff --git a/notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb new file mode 100644 index 00000000..8897c2d4 --- /dev/null +++ b/notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb @@ -0,0 +1,1288 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pNqKk1MmrakH" + }, + "source": [ + "# Semantic Dictionaries -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "This notebook studies **semantic dictionaries**. The basic idea of semantic dictionaries is to marry neuron activations to visualizations of those neurons, transforming them from abstract vectors to something more meaningful to humans. Semantic dictionaries can also be applied to other bases, such as rotated versions of activations space that try to disentangle neurons.\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "#### **This notebook is a Jupyter version of the original Google Colab Notebook. This version adds widgets to facilitate the use of Lucid on your own images.**" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" + ] + } + ], + "source": [ + "!pip install --quiet ipywidgets ipyfilechooser" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 102, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 15116, + "status": "ok", + "timestamp": 1520312194763, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 + }, + "id": "AA17rJBLuyYH", + "outputId": "3acd867e-4fc2-4369-8684-cbdcd3f70c7d" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "import numpy as np\n", + "from ipyfilechooser import FileChooser\n", + "import ipywidgets as widgets\n", + "from IPython.display import display\n", + "from pathlib import Path\n", + "import PIL.Image as Image\n", + "\n", + "import tensorflow as tf\n", + "assert tf.__version__.startswith('1')\n", + "# uncomment to avoid deprecation warnings :\n", + "from tensorflow.python.util import deprecation\n", + "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", + "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "import lucid.optvis.render as render\n", + "from lucid.misc.io import show, load\n", + "from lucid.misc.io.showing import _image_url\n", + "import lucid.scratch.web.svelte as lucid_svelte" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "HfG1iu5KdmUV" + }, + "source": [ + "# Semantic Dictionary Code" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "GABn20jcn4MC" + }, + "source": [ + "## **Defining the interface**\n", + "\n", + "First, we define our \"semantic dictionary\" interface as a [svelte component](https://svelte.technology/). This makes it easy to manage state, like which position we're looking at.\n", + "NB : this svelte component uses a SVG image with transparency for the viewbox. This does not work on Jupyterlab which seems to convert the SVG file to PDF. Please use or classic jupyter notebook instead." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 102, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 542, + "status": "ok", + "timestamp": 1520312330316, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 + }, + "id": "ZhUtaAmcOIrw", + "outputId": "20678755-ab0c-43fc-fa6c-24bdc361fc96" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_tnps1sft/SemanticDict_97ab18be_6a86_4b40_bd6e_2f3fe35bf6b4.html > /tmp/svelte_tnps1sft/SemanticDict_97ab18be_6a86_4b40_bd6e_2f3fe35bf6b4.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_tnps1sft/SemanticDict_97ab18be_6a86_4b40_bd6e_2f3fe35bf6b4.html...\\n'\n" + ] + } + ], + "source": [ + "%%html_define_svelte SemanticDict\n", + "\n", + "
\n", + "
\n", + "
\n", + " \n", + "\n", + " {{#each xs as x}}\n", + " {{#each ys as y}}\n", + " \n", + " {{/each}}\n", + " {{/each}}\n", + " \n", + "
\n", + "
\n", + " {{#each present_acts as act, act_ind}}\n", + "
\n", + "
\n", + "
\n", + "
\n", + "
{{Math.round(act.v*10)/10}}
\n", + "
\n", + "
\n", + "
\n", + " {{/each}}\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "F4l9Ki-UoVko" + }, + "source": [ + "## **Spritemaps**\n", + "\n", + "In order to use the semantic dictionaries, we need \"spritemaps\" of channel visualizations.\n", + "These visualization spritemaps are large grids of images (such as [this one](https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_mixed4d_channel.jpeg)) that visualize every channel in a layer.\n", + "We provide spritemaps for GoogLeNet because making them takes a few hours of GPU time, but\n", + "you can make your own channel spritemaps to explore other models. [Check out other notebooks](https://github.com/tensorflow/lucid#notebooks) on how to\n", + "make your own neuron visualizations.\n", + "\n", + "It's also worth noting that GoogLeNet has unusually semantically meaningful neurons. We don't know why this is -- although it's an active area of research for us. More sophisticated interfaces, such as neuron groups, may work better for networks where meaningful ideas are more entangled or less aligned with the neuron directions." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "BpGLiyEEoPfB" + }, + "outputs": [], + "source": [ + "# adapt the column sizes to the chosen model :\n", + "spritemap_column_sizes = {\n", + " 'mixed3a' : 16,\n", + " 'mixed3b' : 21,\n", + " 'mixed4a' : 22,\n", + " 'mixed4b' : 22,\n", + " 'mixed4c' : 22,\n", + " 'mixed4d' : 22,\n", + " 'mixed4e' : 28,\n", + " 'mixed5a' : 28,\n", + " }\n", + "\n", + "def spritemap(layer):\n", + " assert layer in spritemap_column_sizes\n", + " column_size = spritemap_column_sizes[layer]\n", + " spritemap_filename = \"spritemaps/spritemap_channel_%s.jpeg\" % layer.split(\"/\")[0]\n", + "\n", + " return column_size, spritemap_filename" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "eP9jcxiLowkZ" + }, + "source": [ + "## **User facing constructor**\n", + "\n", + "Now we'll create a convenient API for creating semantic dictionary visualizations. It will compute the network activations for an image, grab an appropriate spritemap, and render the interface." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "9czK9sf1d1bU" + }, + "outputs": [], + "source": [ + "def semantic_dict(model, layer, img_path):\n", + " img = load(img_path)\n", + " # Compute the activations\n", + " with tf.Graph().as_default(), tf.Session():\n", + " t_input = tf.placeholder(tf.float32, model.image_shape)\n", + " T = render.import_model(model, t_input, t_input)\n", + " acts = T(layer).eval({t_input: img})[0]\n", + " \n", + " # Find the most interesting position for our initial view\n", + " max_mag = acts.max(-1)\n", + " max_x = int(np.argmax(max_mag.max(-1)))\n", + " max_y = int(np.argmax(max_mag[max_x]))\n", + " \n", + " # Find appropriate spritemap\n", + " spritemap_n, spritemap_url = spritemap(layer)\n", + " \n", + " # Actually construct the semantic dictionary interface\n", + " # using our *custom component*\n", + " lucid_svelte.SemanticDict({\n", + " \"spritemap_url\": spritemap_url,\n", + " \"sprite_size\": 110,\n", + " \"sprite_n_wrap\": spritemap_n, # row size\n", + " \"image_url\": _image_url(img), # generate a data:image/ url\n", + " \"activations\": [\n", + " [\n", + " [{\"n\": int(n), \"v\": float(act_vec[n])} for n in np.argsort(-act_vec)[:12]]\n", + " for act_vec in act_slice\n", + " ]\n", + " for act_slice in acts\n", + " ], # the 12 features with the highest activation for the tile\n", + " \"pos\": [max_y, max_x], # best position for initial view\n", + " })" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Upload file from local machine and select uploading path (A) or just select one file (B):\n", + "A1) Select a file to upload\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ddb891ddc4e34821ad37659bcb50e64c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileUpload(value={}, description='Upload')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "A2) Select destination for uploaded file\n", + "B) Select file in this server\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "9f6deff3bc5e44fbb61fceed3127b9f3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileChooser(path='.', filename='', show_hidden='False')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the layer you want to use semantic dictionnary on : \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ef23fbb3edd64d639ee8a55b62222305", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Layers', options=('conv2d0', 'conv2d1', 'conv2d2', 'mixed3a', 'mixed3b', 'mixed4a', 'mix…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "print(\n", + " \"Upload file from local machine and select uploading path (A) or just select one file (B):\"\n", + ")\n", + "print(\"A1) Select a file to upload\")\n", + "uploader = widgets.FileUpload(accept='', multiple=False)\n", + "display(uploader)\n", + "\n", + "print(\"\\nA2) Select destination for uploaded file\")\n", + "print(\"B) Select file in this server\")\n", + "notebooks_root_path = \".\"\n", + "fc = FileChooser(\".\",\n", + " use_dir_icons=True,\n", + " select_default=True)\n", + "\n", + "display(fc)\n", + "\n", + "layers_list = [layer.name for layer in model.layers]\n", + "print(\"\\nSelect the layer you want to use semantic dictionnary on : \")\n", + "layers_widget = widgets.Dropdown(\n", + " options=layers_list,\n", + " value=layers_list[0],\n", + " description='Layers'\n", + ")\n", + "display(layers_widget)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "if uploader.value: # upload local file to server\n", + " picture_name = uploader.value[0]\n", + " content = uploader.value[picture_name]['content'] # memoryview of the file\n", + " picture_path = os.path.join(fc.selected_path, picture_name)\n", + " with open(picture_name, 'wb') as f:\n", + " f.write(content)\n", + "else: # use file already on the server\n", + " picture_path = fc.selected\n", + " \n", + "layer_name = layers_widget.value # layers to use semantic dictionnary on" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "semantic_dict(model, layer_name, picture_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "Semantic Dictionaries - Building Blocks of Interpretability", + "provenance": [ + { + "file_id": "1lPRYcRiLweDFSMwTl7CUwboCo0YHkAYB", + "timestamp": 1518586294543 + } + ], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 0ee1e2d6ca125575917780e67f68e42147f3c2a4 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:10:07 +0000 Subject: [PATCH 08/57] updating notebook of Activation grid --- .../building-blocks/ActivationGrid.ipynb | 1062 +++++++++-------- 1 file changed, 541 insertions(+), 521 deletions(-) diff --git a/notebooks/building-blocks/ActivationGrid.ipynb b/notebooks/building-blocks/ActivationGrid.ipynb index be7280e6..caa22919 100644 --- a/notebooks/building-blocks/ActivationGrid.ipynb +++ b/notebooks/building-blocks/ActivationGrid.ipynb @@ -1,556 +1,576 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "2CYog8wPxbFY" + }, + "source": [ + "# Activation Grids -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "The notebook studies **activation grids** a technique for visualizing how a network \"understood\" an image at a particular layer.\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oCzZKRcezFW0" + }, + "source": [ + "# Install / Import" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { "colab": { - "name": "Activation Grids - Building Blocks of Interpretability", - "version": "0.3.2", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" + "base_uri": "https://localhost:8080/", + "height": 34 }, - "accelerator": "GPU" - }, - "cells": [ + "colab_type": "code", + "id": "yz43YMKEvRW1", + "outputId": "eebf5d52-d154-4505-9dac-4e7f7fa22862" + }, + "outputs": [ { - "cell_type": "markdown", - "metadata": { - "id": "JndnmDMp66FL", - "colab_type": "text" - }, - "source": [ - "##### Copyright 2018 Google LLC.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\");" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "/bin/sh: 1: npm: not found\n" + ] + } + ], + "source": [ + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "%tensorflow_version 1.x\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "assert tf.__version__.startswith('1')\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.objectives as objectives\n", + "import lucid.optvis.param as param\n", + "import lucid.optvis.render as render\n", + "import lucid.optvis.transform as transform\n", + "from lucid.misc.channel_reducer import ChannelReducer\n", + "import sys\n", + "\n", + "from lucid.misc.io import show, load" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Qr6WER41vlp6" + }, + "outputs": [], + "source": [ + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0tZuQvKwL3ed" + }, + "source": [ + "# Very Naive Implementation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "W7SoAxu9L9xq" + }, + "source": [ + "This first implementation is a simplified, instructive demonstration of how to make activation grids.\n", + "\n", + "Unfortunately, it has two problems:\n", + "\n", + "* **Coherence:** At high levels, activation vectors can be quite abstract and correspond to a multi-modal input distribution. If we visualize them completely independently, we often end up with things being kind of incoherent. For example, some dog snouts may face left and some right. This can be visually distracting.\n", + "* **Memory:** It can run out of memory when you try to visualize large grids (eg. on low level layers with high spatial resolution)\n", + "\n", + "We'll address these in more complicated implementations shortly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "zvp-YTVjv-Tq" + }, + "outputs": [], + "source": [ + "def render_activation_grid_very_naive(img, model, layer=\"mixed4d\", W=42, n_steps=256):\n", + "\n", + " # Get the activations\n", + " with tf.Graph().as_default(), tf.Session() as sess:\n", + " t_input = tf.placeholder(\"float32\", [None, None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + " acts = T(layer).eval({t_input: img[None]})[0]\n", + " acts_flat = acts.reshape([-1] + [acts.shape[2]])\n", + "\n", + " # Render an image for each activation vector\n", + " param_f = lambda: param.image(W, batch=acts_flat.shape[0])\n", + " obj = objectives.Objective.sum(\n", + " [objectives.direction(layer, v, batch=n) for n, v in enumerate(acts_flat)]\n", + " )\n", + " thresholds = (n_steps // 2, n_steps)\n", + " vis_imgs = render.render_vis(model, obj, param_f, thresholds=thresholds)[-1]\n", + "\n", + " # Combine the images and display the resulting grid\n", + " vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])\n", + " vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]\n", + " show(np.hstack(np.hstack(vis_imgs_cropped)))\n", + " return vis_imgs_cropped" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 795 }, + "colab_type": "code", + "id": "LCVUAIDMLyaF", + "outputId": "b44bac97-dd40-4d4b-e01e-242b2dfd7bc9" + }, + "outputs": [ { - "cell_type": "code", - "metadata": { - "id": "hMqWDc_m6rUC", - "colab_type": "code", - "cellView": "both", - "colab": {} - }, - "source": [ - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "512 338167300.0\n" + ] }, { - "cell_type": "markdown", - "metadata": { - "id": "2CYog8wPxbFY", - "colab_type": "text" - }, - "source": [ - "# Activation Grids -- Building Blocks of Interpretability\n", - "\n", - "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", - "\n", - "The notebook studies **activation grids** a technique for visualizing how a network \"understood\" an image at a particular layer.\n", - "\n", - "
\n", - "\n", - "
\n", - "\n", - "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", - "\n", - "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", - "\n", - "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", - "\n", - "Thanks for trying Lucid!" + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" }, { - "cell_type": "markdown", - "metadata": { - "id": "oCzZKRcezFW0", - "colab_type": "text" - }, - "source": [ - "# Install / Import" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "1024 318903420.0\n" + ] }, { - "cell_type": "code", - "metadata": { - "id": "yz43YMKEvRW1", - "colab_type": "code", - "outputId": "eebf5d52-d154-4505-9dac-4e7f7fa22862", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 34 - } - }, - "source": [ - "!pip install --quiet lucid==0.3.8\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "\n", - "import lucid.modelzoo.vision_models as models\n", - "from lucid.misc.io import show\n", - "import lucid.optvis.objectives as objectives\n", - "import lucid.optvis.param as param\n", - "import lucid.optvis.render as render\n", - "import lucid.optvis.transform as transform\n", - "from lucid.misc.channel_reducer import ChannelReducer\n", - "import sys\n", - "\n", - "from lucid.misc.io import show, load" + "data": { + "text/html": [ + "" ], - "execution_count": 5, - "outputs": [ - { - "output_type": "stream", - "text": [ - "/bin/sh: 1: npm: not found\r\n" - ], - "name": "stdout" - } + "text/plain": [ + "" ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" }, { - "cell_type": "code", - "metadata": { - "id": "Qr6WER41vlp6", - "colab_type": "code", - "colab": {} - }, - "source": [ - "model = models.InceptionV1()\n", - "model.load_graphdef()" + "data": { + "text/html": [ + "" ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0tZuQvKwL3ed", - "colab_type": "text" - }, - "source": [ - "# Very Naive Implementation" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "W7SoAxu9L9xq", - "colab_type": "text" - }, - "source": [ - "This first implementation is a simplified, instructive demonstration of how to make activation grids.\n", - "\n", - "Unfortunately, it has two problems:\n", - "\n", - "* **Coherence:** At high levels, activation vectors can be quite abstract and correspond to a multi-modal input distribution. If we visualize them completely independently, we often end up with things being kind of incoherent. For example, some dog snouts may face left and some right. This can be visually distracting.\n", - "* **Memory:** It can run out of memory when you try to visualize large grids (eg. on low level layers with high spatial resolution)\n", - "\n", - "We'll address these in more complicated implementations shortly." + "text/plain": [ + "" ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "_ = render_activation_grid_very_naive(img, model, W=48, n_steps=1024)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "AXDEfsCKO7hR" + }, + "source": [ + "# Better Implementation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "bJHQinvv_BUP" + }, + "source": [ + "Earlier, we noticed that the naive implementation has two weakenesses: decoherence and memory. This version will fix both those concerns." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "tIms6FS2O95R" + }, + "outputs": [], + "source": [ + "def render_activation_grid_less_naive(\n", + " img, model, layer=\"mixed4d\", W=42, n_groups=6, subsample_factor=1, n_steps=256\n", + "):\n", + "\n", + " # Get the activations\n", + " with tf.Graph().as_default(), tf.Session() as sess:\n", + " t_input = tf.placeholder(\"float32\", [None, None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + " acts = T(layer).eval({t_input: img[None]})[0]\n", + " acts_flat = acts.reshape([-1] + [acts.shape[2]])\n", + " N = acts_flat.shape[0]\n", + "\n", + " # The trick to avoiding \"decoherence\" is to recognize images that are\n", + " # for similar activation vectors and\n", + " if n_groups > 0:\n", + " reducer = ChannelReducer(n_groups, \"NMF\")\n", + " groups = reducer.fit_transform(acts_flat)\n", + " groups /= groups.max(0)\n", + " else:\n", + " groups = np.zeros([])\n", + "\n", + " print(groups.shape)\n", + "\n", + " # The key trick to increasing memory efficiency is random sampling.\n", + " # Even though we're visualizing lots of images, we only run a small\n", + " # subset through the network at once. In order to do this, we'll need\n", + " # to hold tensors in a tensorflow graph around the visualization process.\n", + "\n", + " with tf.Graph().as_default() as graph, tf.Session() as sess:\n", + "\n", + " # Using the groups, create a paramaterization of images that\n", + " # partly shares paramters between the images for similar activation\n", + " # vectors. Each one still has a full set of unique parameters, and could\n", + " # optimize to any image. We're just making it easier to find solutions\n", + " # where things are the same.\n", + " group_imgs_raw = param.fft_image([n_groups, W, W, 3])\n", + " unique_imgs_raw = param.fft_image([N, W, W, 3])\n", + " opt_imgs = param.to_valid_rgb(\n", + " tf.stack(\n", + " [\n", + " 0.7 * unique_imgs_raw[i]\n", + " + 0.5\n", + " * sum(groups[i, j] * group_imgs_raw[j] for j in range(n_groups))\n", + " for i in range(N)\n", + " ]\n", + " ),\n", + " decorrelate=True,\n", + " )\n", + "\n", + " # Construct a random batch to optimize this step\n", + " batch_size = 64\n", + " rand_inds = tf.random_uniform([batch_size], 0, N, dtype=tf.int32)\n", + " pres_imgs = tf.gather(opt_imgs, rand_inds)\n", + " pres_acts = tf.gather(acts_flat, rand_inds)\n", + " obj = objectives.Objective.sum(\n", + " [\n", + " objectives.direction(layer, pres_acts[n], batch=n)\n", + " for n in range(batch_size)\n", + " ]\n", + " )\n", + "\n", + " # Actually do the optimization...\n", + " T = render.make_vis_T(model, obj, param_f=pres_imgs)\n", + " tf.global_variables_initializer().run()\n", + "\n", + " for i in range(n_steps):\n", + " T(\"vis_op\").run()\n", + " if (i + 1) % (n_steps // 2) == 0:\n", + " show(pres_imgs.eval()[::4])\n", + "\n", + " vis_imgs = opt_imgs.eval()\n", + "\n", + " # Combine the images and display the resulting grid\n", + " print(\"\")\n", + " vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])\n", + " vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]\n", + " show(np.hstack(np.hstack(vis_imgs_cropped)))\n", + " return vis_imgs_cropped" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 817 }, + "colab_type": "code", + "id": "EtGBGvCBRRhh", + "outputId": "574c3a7b-8e9d-4319-8320-fdfd59c2bdc0" + }, + "outputs": [ { - "cell_type": "code", - "metadata": { - "id": "zvp-YTVjv-Tq", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def render_activation_grid_very_naive(img, model, layer=\"mixed4d\", W=42, n_steps=256):\n", - " \n", - " # Get the activations\n", - " with tf.Graph().as_default(), tf.Session() as sess:\n", - " t_input = tf.placeholder(\"float32\", [None, None, None, 3])\n", - " T = render.import_model(model, t_input, t_input)\n", - " acts = T(layer).eval({t_input: img[None]})[0]\n", - " acts_flat = acts.reshape([-1] + [acts.shape[2]])\n", - " \n", - " # Render an image for each activation vector\n", - " param_f = lambda: param.image(W, batch=acts_flat.shape[0])\n", - " obj = objectives.Objective.sum(\n", - " [objectives.direction(layer, v, batch=n)\n", - " for n,v in enumerate(acts_flat)\n", - " ])\n", - " thresholds=(n_steps//2, n_steps)\n", - " vis_imgs = render.render_vis(model, obj, param_f, thresholds=thresholds)[-1]\n", - " \n", - " # Combine the images and display the resulting grid\n", - " vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])\n", - " vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]\n", - " show(np.hstack(np.hstack(vis_imgs_cropped)))\n", - " return vis_imgs_cropped" - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "(196, 6)\n" + ] }, { - "cell_type": "code", - "metadata": { - "id": "LCVUAIDMLyaF", - "colab_type": "code", - "outputId": "b44bac97-dd40-4d4b-e01e-242b2dfd7bc9", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 795 - } - }, - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "_ = render_activation_grid_very_naive(img, model, W=48, n_steps=1024)" + "data": { + "text/html": [ + "
\n", + " 0
\n", + " \n", + "
\n", + " 1
\n", + " \n", + "
\n", + " 2
\n", + " \n", + "
\n", + " 3
\n", + " \n", + "
\n", + " 4
\n", + " \n", + "
\n", + " 5
\n", + " \n", + "
\n", + " 6
\n", + " \n", + "
\n", + " 7
\n", + " \n", + "
\n", + " 8
\n", + " \n", + "
\n", + " 9
\n", + " \n", + "
\n", + " 10
\n", + " \n", + "
\n", + " 11
\n", + " \n", + "
\n", + " 12
\n", + " \n", + "
\n", + " 13
\n", + " \n", + "
\n", + " 14
\n", + " \n", + "
\n", + " 15
\n", + " \n", + "
" ], - "execution_count": 10, - "outputs": [ - { - "output_type": "stream", - "text": [ - "512 338167300.0\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "1024 318903420.0\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "AXDEfsCKO7hR", - "colab_type": "text" - }, - "source": [ - "# Better Implementation" + "text/plain": [ + "" ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" }, { - "cell_type": "markdown", - "metadata": { - "id": "bJHQinvv_BUP", - "colab_type": "text" - }, - "source": [ - "Earlier, we noticed that the naive implementation has two weakenesses: decoherence and memory. This version will fix both those concerns." + "data": { + "text/html": [ + "
\n", + " 0
\n", + " \n", + "
\n", + " 1
\n", + " \n", + "
\n", + " 2
\n", + " \n", + "
\n", + " 3
\n", + " \n", + "
\n", + " 4
\n", + " \n", + "
\n", + " 5
\n", + " \n", + "
\n", + " 6
\n", + " \n", + "
\n", + " 7
\n", + " \n", + "
\n", + " 8
\n", + " \n", + "
\n", + " 9
\n", + " \n", + "
\n", + " 10
\n", + " \n", + "
\n", + " 11
\n", + " \n", + "
\n", + " 12
\n", + " \n", + "
\n", + " 13
\n", + " \n", + "
\n", + " 14
\n", + " \n", + "
\n", + " 15
\n", + " \n", + "
" + ], + "text/plain": [ + "" ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" }, { - "cell_type": "code", - "metadata": { - "id": "tIms6FS2O95R", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def render_activation_grid_less_naive(img, model, layer=\"mixed4d\", W=42,\n", - " n_groups=6, subsample_factor=1, n_steps=256):\n", - " \n", - " # Get the activations\n", - " with tf.Graph().as_default(), tf.Session() as sess:\n", - " t_input = tf.placeholder(\"float32\", [None, None, None, 3])\n", - " T = render.import_model(model, t_input, t_input)\n", - " acts = T(layer).eval({t_input: img[None]})[0]\n", - " acts_flat = acts.reshape([-1] + [acts.shape[2]])\n", - " N = acts_flat.shape[0]\n", - " \n", - " # The trick to avoiding \"decoherence\" is to recognize images that are\n", - " # for similar activation vectors and \n", - " if n_groups > 0:\n", - " reducer = ChannelReducer(n_groups, \"NMF\")\n", - " groups = reducer.fit_transform(acts_flat)\n", - " groups /= groups.max(0)\n", - " else:\n", - " groups = np.zeros([])\n", - " \n", - " print(groups.shape)\n", - "\n", - " \n", - " # The key trick to increasing memory efficiency is random sampling.\n", - " # Even though we're visualizing lots of images, we only run a small\n", - " # subset through the network at once. In order to do this, we'll need\n", - " # to hold tensors in a tensorflow graph around the visualization process.\n", - " \n", - " with tf.Graph().as_default() as graph, tf.Session() as sess:\n", - " \n", - " \n", - " # Using the groups, create a paramaterization of images that\n", - " # partly shares paramters between the images for similar activation\n", - " # vectors. Each one still has a full set of unique parameters, and could\n", - " # optimize to any image. We're just making it easier to find solutions\n", - " # where things are the same.\n", - " group_imgs_raw = param.fft_image([n_groups, W, W, 3])\n", - " unique_imgs_raw = param.fft_image([N, W, W, 3])\n", - " opt_imgs = param.to_valid_rgb(tf.stack([\n", - " 0.7*unique_imgs_raw[i] + \n", - " 0.5*sum(groups[i, j] * group_imgs_raw[j] for j in range(n_groups))\n", - " for i in range(N) ]),\n", - " decorrelate=True)\n", - " \n", - " # Construct a random batch to optimize this step\n", - " batch_size = 64\n", - " rand_inds = tf.random_uniform([batch_size], 0, N, dtype=tf.int32)\n", - " pres_imgs = tf.gather(opt_imgs, rand_inds)\n", - " pres_acts = tf.gather(acts_flat, rand_inds)\n", - " obj = objectives.Objective.sum(\n", - " [objectives.direction(layer, pres_acts[n], batch=n)\n", - " for n in range(batch_size)\n", - " ])\n", - " \n", - " # Actually do the optimization...\n", - " T = render.make_vis_T(model, obj, param_f=pres_imgs)\n", - " tf.global_variables_initializer().run()\n", - " \n", - " for i in range(n_steps):\n", - " T(\"vis_op\").run()\n", - " if (i+1) % (n_steps//2) == 0:\n", - " show(pres_imgs.eval()[::4])\n", - " \n", - " vis_imgs = opt_imgs.eval()\n", - " \n", - " # Combine the images and display the resulting grid\n", - " print(\"\")\n", - " vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])\n", - " vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]\n", - " show(np.hstack(np.hstack(vis_imgs_cropped)))\n", - " return vis_imgs_cropped" - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] }, { - "cell_type": "code", - "metadata": { - "id": "EtGBGvCBRRhh", - "colab_type": "code", - "outputId": "574c3a7b-8e9d-4319-8320-fdfd59c2bdc0", - "colab": { - "base_uri": "https://localhost:8080/", - "height": 817 - } - }, - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "_ = render_activation_grid_less_naive(img, model, W=48, n_steps=1024)" + "data": { + "text/html": [ + "" ], - "execution_count": 14, - "outputs": [ - { - "output_type": "stream", - "text": [ - "(196, 6)\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "
\n", - " 0
\n", - " \n", - "
\n", - " 1
\n", - " \n", - "
\n", - " 2
\n", - " \n", - "
\n", - " 3
\n", - " \n", - "
\n", - " 4
\n", - " \n", - "
\n", - " 5
\n", - " \n", - "
\n", - " 6
\n", - " \n", - "
\n", - " 7
\n", - " \n", - "
\n", - " 8
\n", - " \n", - "
\n", - " 9
\n", - " \n", - "
\n", - " 10
\n", - " \n", - "
\n", - " 11
\n", - " \n", - "
\n", - " 12
\n", - " \n", - "
\n", - " 13
\n", - " \n", - "
\n", - " 14
\n", - " \n", - "
\n", - " 15
\n", - " \n", - "
" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "
\n", - " 0
\n", - " \n", - "
\n", - " 1
\n", - " \n", - "
\n", - " 2
\n", - " \n", - "
\n", - " 3
\n", - " \n", - "
\n", - " 4
\n", - " \n", - "
\n", - " 5
\n", - " \n", - "
\n", - " 6
\n", - " \n", - "
\n", - " 7
\n", - " \n", - "
\n", - " 8
\n", - " \n", - "
\n", - " 9
\n", - " \n", - "
\n", - " 10
\n", - " \n", - "
\n", - " 11
\n", - " \n", - "
\n", - " 12
\n", - " \n", - "
\n", - " 13
\n", - " \n", - "
\n", - " 14
\n", - " \n", - "
\n", - " 15
\n", - " \n", - "
" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "" - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" } - ] -} \ No newline at end of file + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "_ = render_activation_grid_less_naive(img, model, W=48, n_steps=1024)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "Activation Grids - Building Blocks of Interpretability", + "provenance": [], + "toc_visible": true, + "version": "0.3.2" + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From d620123e6c15785e95b7755c45a9794f82010186 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:17:29 +0000 Subject: [PATCH 09/57] adding a jupyter version of the notebook Activation grid --- .../ActivationGridJupyter.ipynb | 744 ++++++++++++++++++ 1 file changed, 744 insertions(+) create mode 100644 notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb diff --git a/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb new file mode 100644 index 00000000..dbac28b2 --- /dev/null +++ b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb @@ -0,0 +1,744 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "2CYog8wPxbFY" + }, + "source": [ + "# Activation Grids -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "The notebook studies **activation grids** a technique for visualizing how a network \"understood\" an image at a particular layer.\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "oCzZKRcezFW0" + }, + "source": [ + "# Install / Import" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "colab_type": "code", + "id": "yz43YMKEvRW1", + "outputId": "eebf5d52-d154-4505-9dac-4e7f7fa22862" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:143: FutureWarning: The sklearn.decomposition.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.decomposition. Anything that cannot be imported from sklearn.decomposition is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n" + ] + } + ], + "source": [ + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "\n", + "import os\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "assert tf.__version__.startswith('1')\n", + "\n", + "import numpy as np\n", + "from ipyfilechooser import FileChooser\n", + "import ipywidgets as widgets\n", + "from IPython.display import display\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.objectives as objectives\n", + "import lucid.optvis.param as param\n", + "import lucid.optvis.render as render\n", + "import lucid.optvis.transform as transform\n", + "from lucid.misc.channel_reducer import ChannelReducer\n", + "\n", + "from lucid.misc.io import show, load, save" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "Qr6WER41vlp6" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "(Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]))" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = models.InceptionV1()\n", + "model.layers" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "tIms6FS2O95R" + }, + "outputs": [], + "source": [ + "def render_activation_grid(\n", + " img, model, layer=\"mixed4d\", W=42, n_groups=6, subsample_factor=1, n_steps=256, verbose=True\n", + "):\n", + "\n", + " # Get the activations\n", + " with tf.Graph().as_default(), tf.Session() as sess:\n", + " t_input = tf.placeholder(\"float32\", [None, None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + " acts = T(layer).eval({t_input: img[None]})[0]\n", + " acts_flat = acts.reshape([-1] + [acts.shape[2]])\n", + " N = acts_flat.shape[0]\n", + "\n", + " # The trick to avoiding \"decoherence\" is to recognize images that are\n", + " # for similar activation vectors and\n", + " if n_groups > 0:\n", + " reducer = ChannelReducer(n_groups, \"NMF\")\n", + " groups = reducer.fit_transform(acts_flat)\n", + " groups /= groups.max(0)\n", + " else:\n", + " groups = np.zeros([])\n", + "\n", + " print(groups.shape)\n", + "\n", + " # The key trick to increasing memory efficiency is random sampling.\n", + " # Even though we're visualizing lots of images, we only run a small\n", + " # subset through the network at once. In order to do this, we'll need\n", + " # to hold tensors in a tensorflow graph around the visualization process.\n", + "\n", + " with tf.Graph().as_default() as graph, tf.Session() as sess:\n", + "\n", + " # Using the groups, create a paramaterization of images that\n", + " # partly shares paramters between the images for similar activation\n", + " # vectors. Each one still has a full set of unique parameters, and could\n", + " # optimize to any image. We're just making it easier to find solutions\n", + " # where things are the same.\n", + " group_imgs_raw = param.fft_image([n_groups, W, W, 3])\n", + " unique_imgs_raw = param.fft_image([N, W, W, 3])\n", + " opt_imgs = param.to_valid_rgb(\n", + " tf.stack(\n", + " [\n", + " 0.7 * unique_imgs_raw[i]\n", + " + 0.5\n", + " * sum(groups[i, j] * group_imgs_raw[j] for j in range(n_groups))\n", + " for i in range(N)\n", + " ]\n", + " ),\n", + " decorrelate=True,\n", + " )\n", + "\n", + " # Construct a random batch to optimize this step\n", + " batch_size = 64\n", + " rand_inds = tf.random_uniform([batch_size], 0, N, dtype=tf.int32)\n", + " pres_imgs = tf.gather(opt_imgs, rand_inds)\n", + " pres_acts = tf.gather(acts_flat, rand_inds)\n", + " obj = objectives.Objective.sum(\n", + " [\n", + " objectives.direction(layer, pres_acts[n], batch=n)\n", + " for n in range(batch_size)\n", + " ]\n", + " )\n", + "\n", + " # Actually do the optimization...\n", + " T = render.make_vis_T(model, obj, param_f=pres_imgs)\n", + " tf.global_variables_initializer().run()\n", + "\n", + " for i in range(n_steps):\n", + " T(\"vis_op\").run()\n", + " if (i + 1) % (n_steps // 2) == 0 and verbose:\n", + " show(pres_imgs.eval()[::4])\n", + "\n", + " vis_imgs = opt_imgs.eval()\n", + "\n", + " # Combine the images and display the resulting grid\n", + " print(\"\")\n", + " vis_imgs_ = vis_imgs.reshape(list(acts.shape[:2]) + [W, W, 3])\n", + " vis_imgs_cropped = vis_imgs_[:, :, 2:-2, 2:-2, :]\n", + " if verbose:\n", + " show(np.hstack(np.hstack(vis_imgs_cropped)))\n", + " return vis_imgs_cropped" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Upload file from local machine and select uploading path (A) or just select one file (B):\n", + "A1) Select a file to upload\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "abf1085b362641ff976c107ad3d07cfe", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileUpload(value={}, description='Upload')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "A2) Select destination for uploaded file\n", + "B) Select file in this server\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "e6899e5d06584531bc03feb2a15d7f72", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileChooser(path='.', filename='', show_hidden='False')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the destination folder of the activation grids: \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "0eb83b63262f4cc989e7112e8bfd8b69", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Text(value='.', description='Destination path:', layout=Layout(width='70%'), placeholder='destination/')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Check the layers you want to apply activation grid on : \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "65555b4859614780bb755d924f7bab9f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "SelectMultiple(description='Layers', index=(0,), options=('mixed3a', 'mixed3b', 'mixed4a', 'mixed4b', 'mixed4c…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "2da924d3894e426bb0a7682dde81f0a6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Checkbox(value=True, description='Display results in notebook', indent=False)" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8dc6cc97a3c5433daf64d922043404ef", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Checkbox(value=True, description='Save results as images in destination folder', indent=False)" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "print(\n", + " \"Upload file from local machine and select uploading path (A) or just select one file (B):\"\n", + ")\n", + "print(\"A1) Select a file to upload\")\n", + "uploader = widgets.FileUpload(accept='', multiple=False)\n", + "display(uploader)\n", + "\n", + "print(\"\\nA2) Select destination for uploaded file\")\n", + "print(\"B) Select file in this server\")\n", + "notebooks_root_path = \"\"\n", + "fc = FileChooser(\".\",\n", + " use_dir_icons=True,\n", + " select_default=True)\n", + "display(fc)\n", + "\n", + "\n", + "print(\"\\nSelect the destination folder of the activation grids: \")\n", + "dest_widget = widgets.Text(value='.',\n", + " placeholder='destination/',\n", + " description='Destination path:',\n", + " layout=widgets.Layout(width='70%'))\n", + "\n", + "display(dest_widget)\n", + "\n", + "\n", + "layers_list = [layer.name for layer in model.layers[3:]]\n", + "print(\"\\nCheck the layers you want to apply activation grid on : \")\n", + "layers_widget = widgets.SelectMultiple(\n", + " options=layers_list,\n", + " value=[layers_list[0]],\n", + " description='Layers'\n", + ")\n", + "display(layers_widget)\n", + "\n", + "\n", + "display_results_widget = widgets.Checkbox(\n", + " value=True,\n", + " description='Display results in notebook',\n", + " disabled=False,\n", + " indent=False\n", + ")\n", + "display(display_results_widget)\n", + "\n", + "\n", + "save_results_widget = widgets.Checkbox(\n", + " value=True,\n", + " description='Save results as images in destination folder',\n", + " disabled=False,\n", + " indent=False\n", + ")\n", + "display(save_results_widget)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "save_images = save_results_widget.value\n", + "verbose = display_results_widget.value\n", + "\n", + "if uploader.value: # upload local file to server\n", + " picture_name = uploader.value[0]\n", + " content = uploader.value[picture_name]['content'] # memoryview of the file\n", + " picture_full_path = os.path.join(fc.selected_path, picture_name)\n", + " with open(picture_name, 'wb') as f:\n", + " f.write(content)\n", + "else: # use files already on the server\n", + " picture_full_path = os.path.join(notebooks_root_path,fc.selected)\n", + " \n", + "selected_layers = layers_widget.value # layers to apply activation grid on\n", + "\n", + "dest_path = dest_widget.value\n", + " \n", + "img = load(picture_full_path)\n", + "show(img)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Setting optimization parameters: (you should adapt these parameters to your own model)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "n_steps = 64#2048\n", + "n_groups = 12\n", + "grid_resolution = 672 # the total resolution of the activation grid\n", + "W = 12" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mixed3a\n", + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(784, 12)\n", + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/param/spatial.py:82: The name tf.spectral.irfft2d is deprecated. Please use tf.signal.irfft2d instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/sklearn/decomposition/_nmf.py:1077: ConvergenceWarning: Maximum number of iterations 200 reached. Increase it to improve convergence.\n", + " \" improve convergence.\" % max_iter, ConvergenceWarning)\n", + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/param/spatial.py:82: The name tf.spectral.irfft2d is deprecated. Please use tf.signal.irfft2d instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:242: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:242: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:170: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:170: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:171: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:171: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:38: The name tf.random_crop is deprecated. Please use tf.image.random_crop instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:38: The name tf.random_crop is deprecated. Please use tf.image.random_crop instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:83: The name tf.image.resize_bilinear is deprecated. Please use tf.compat.v1.image.resize_bilinear instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:83: The name tf.image.resize_bilinear is deprecated. Please use tf.compat.v1.image.resize_bilinear instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:\n", + "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + " * https://github.com/tensorflow/io (for I/O related ops)\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:\n", + "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + " * https://github.com/tensorflow/io (for I/O related ops)\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n" + ] + } + ], + "source": [ + "for layer_name in selected_layers:\n", + " layer = model.get_layer(layer_name)\n", + " print(layer.name)\n", + " \n", + " # if you defined a custom Lucid Model class as in lucid/misc/custom_model.py\n", + " # you can define W depending on the spatial shape of the layer :\n", + " # W = grid_resolution // layer.shape[0] \n", + " # Setting W to obtain a grid of size grid_resolution\n", + "\n", + " result = render_activation_grid(img, model, layer=layer.name, W=W, n_groups=n_groups, n_steps=n_steps,verbose=verbose)\n", + " result = np.hstack(np.hstack(result))\n", + "\n", + " if save_images:\n", + " image_name = \"./spritemaps/activation_grid_%s.jpeg\" % layer_name.split(\"/\")[0]\n", + " save(result, image_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "Activation Grids - Building Blocks of Interpretability", + "provenance": [], + "toc_visible": true, + "version": "0.3.2" + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From c758cc226366c6ff083a7a8cc6b3b8732f6eb845 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:24:05 +0000 Subject: [PATCH 10/57] updating notebook of Spatial Attribution --- notebooks/building-blocks/AttrSpatial.ipynb | 15326 +++++++++--------- 1 file changed, 7588 insertions(+), 7738 deletions(-) diff --git a/notebooks/building-blocks/AttrSpatial.ipynb b/notebooks/building-blocks/AttrSpatial.ipynb index 53e98040..893d6e8b 100644 --- a/notebooks/building-blocks/AttrSpatial.ipynb +++ b/notebooks/building-blocks/AttrSpatial.ipynb @@ -1,7801 +1,7651 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", "colab": { - "name": "Spatial Attribution - Building Blocks of Interpretability", - "version": "0.3.2", - "views": {}, - "default_view": {}, - "provenance": [ - { - "file_id": "1uRqpBNPg-aW3tRU-uo-mWg6cQxuAquHW", - "timestamp": 1518822563463 - } - ], - "collapsed_sections": [] + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, - "kernelspec": { - "name": "python2", - "display_name": "Python 2" - } + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] }, - "cells": [ - { - "metadata": { - "id": "JndnmDMp66FL", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "##### Copyright 2018 Google LLC.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\");" - ] + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pNqKk1MmrakH" + }, + "source": [ + "# Spatial Attribution -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "This notebook demonstrates **Spatial Attribution**, a technique for exploring how detectors a different spatial positions in the network effected its output.\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hOBBuzMaxU37" + }, + "source": [ + "# Install / Import / Load" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "UL1yOZtjqkcj" + }, + "source": [ + "This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependancies such as TensorFlow. And then import them as appropriate." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 83, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 6441, + "status": "ok", + "timestamp": 1520296700305, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "AA17rJBLuyYH", + "outputId": "0e52d903-dbfa-4b20-ab3c-00a242061c63" + }, + "outputs": [], + "source": [ + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "# !npm install -g svelte-cli@2.2.0\n", + "# %tensorflow_version 1.x\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.render as render\n", + "from lucid.misc.io import show, load\n", + "from lucid.misc.io.reading import read\n", + "from lucid.misc.io.showing import _image_url\n", + "from lucid.misc.gradient_override import gradient_override_map\n", + "import lucid.scratch.web.svelte as lucid_svelte" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0cUPBCRyG9xE" + }, + "source": [ + "# Attribution Code" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "FWHqimIqk2Bs" + }, + "outputs": [], + "source": [ + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, + "colab_type": "code", + "id": "xIDcG0vjaDtk" + }, + "outputs": [ { - "metadata": { - "id": "hMqWDc_m6rUC", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - }, - "cellView": "both" - }, - "cell_type": "code", - "source": [ - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] }, { - "metadata": { - "id": "pNqKk1MmrakH", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Spatial Attribution -- Building Blocks of Interpretability\n", - "\n", - "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", - "\n", - "This notebook demonstrates **Spatial Attribution**, a technique for exploring how detectors a different spatial positions in the network effected its output.\n", - "\n", - "
\n", - "\n", - "\n", - "\n", - "
\n", - "\n", - "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", - "\n", - "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", - "\n", - "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", - "\n", - "Thanks for trying Lucid!\n" - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + } + ], + "source": [ + "labels_str = read(\"https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt\",mode='r')\n", + "labels = [line[line.find(\" \"):].strip() for line in labels_str.split(\"\\n\")]\n", + "labels = [label[label.find(\" \"):].strip().replace(\"_\", \" \") for label in labels]\n", + "labels = [\"dummy\"] + labels" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, + "colab_type": "code", + "id": "p1S73WcbKIdI" + }, + "outputs": [], + "source": [ + "def raw_class_spatial_attr(model, img, layer, label, override=None):\n", + " \"\"\"\n", + " How much did spatial positions at a given layer effect a output class?\n", + " Returns:\n", + " array containing attributions of layer 1 on layer2 where array[i,j] is\n", + " the influence of layer1 on spatial posittion (i,j) of layer 2.\n", + " \"\"\"\n", + "\n", + " # Set up a graph for doing attribution...\n", + " with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts = T(layer).eval()\n", + "\n", + " if label is None: return np.zeros(acts.shape[1:-1])\n", + "\n", + " # Compute gradient between current layer and output score\n", + " score = T(\"softmax2_pre_activation\")[0, labels.index(label)]\n", + "\n", + " t_grad = tf.gradients([score], [T(layer)])[0] \n", + " grad = t_grad.eval({T(layer) : acts})\n", + "\n", + " # Linear approximation of effect of spatial position\n", + " return np.sum(acts * grad, -1)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "v_jkx5Niji4Q" + }, + "outputs": [], + "source": [ + "def raw_spatial_spatial_attr(model, img, layer1, layer2, override=None):\n", + " \"\"\"Attribution between spatial positions in two different layers.\n", + " \"\"\"\n", + " # Set up a graph for doing attribution...\n", + " with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts1 = T(layer1).eval()\n", + " acts2 = T(layer2).eval({T(layer1) : acts1})\n", + "\n", + " # Construct gradient tensor\n", + " # Backprop from spatial position (n_x, n_y) in layer2 to layer1.\n", + " n_x, n_y = tf.placeholder(\"int32\", []), tf.placeholder(\"int32\", [])\n", + " # channelwise magnitude of layer2 activation for each spatial position:\n", + " layer2_mags = tf.sqrt(tf.reduce_sum(T(layer2)**2, -1))[0]\n", + " score = layer2_mags[n_x, n_y]\n", + " t_grad = tf.gradients([score], [T(layer1)])[0]\n", + "\n", + " # Compute attribution backwards from each position in layer2\n", + " attrs = [] #\n", + " for i in range(acts2.shape[1]):\n", + " attrs_ = []\n", + " for j in range(acts2.shape[2]):\n", + " grad = t_grad.eval({n_x : i, n_y : j, T(layer1) : acts1})\n", + " # linear approximation of impact (summed on channel dimension)\n", + " attr = np.sum(acts1 * grad, -1)[0]\n", + " attrs_.append(attr)\n", + " attrs.append(attrs_)\n", + " return np.asarray(attrs)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "OrlLGkWxKpmf" + }, + "outputs": [], + "source": [ + "def orange_blue(a,b,clip=False):\n", + " \"\"\"\n", + " Args:\n", + " a: spatial position - class gradients array of shape (y_dim, x_dim) of the current layer for class A\n", + " b: spatial position - class gradients array of shape (y_dim, x_dim) of the current layer for class B\n", + " clip: whether to clip negative gradients for a and b\n", + " \n", + " Returns:\n", + " Heatmap of the image for both classes A and B.\n", + " Red channel is for A, Green channel is the mean influence of both classes\n", + " and blue channel is for B.\n", + " \"\"\"\n", + " \n", + " if clip: # keeping positive values only\n", + " a,b = np.maximum(a,0), np.maximum(b,0)\n", + " arr = np.stack([a, (a + b)/2., b], -1)\n", + " arr /= 1e-2 + np.abs(arr).max()/1.5\n", + " arr += 0.3 \n", + " return arr" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ku6hGbYmiQNI" + }, + "source": [ + "# Spatial Attribution Interface\n", + "\n", + "In this section, we build the *interface* for interacting with the different kinds of spatial attribution data that we can compute using the above functions. Feel free to skip over this if you aren't interested in that part. The main reason we're including it is so that you can change the interface if you want to.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 164, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 731, + "status": "ok", + "timestamp": 1520296735140, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "X6TFCwbQhre2", + "outputId": "c53be0ec-3588-4282-d6ce-9bae16f939bb" + }, + "outputs": [ { - "metadata": { - "id": "hOBBuzMaxU37", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Install / Import / Load" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_7f5dd2kq/SpatialWidget_8726ca0f_ee10_45c4_aa24_99f35b762187.html > /tmp/svelte_7f5dd2kq/SpatialWidget_8726ca0f_ee10_45c4_aa24_99f35b762187.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../tmp/svelte_7f5dd2kq/SpatialWidget_8726ca0f_ee10_45c4_aa24_99f35b762187.html...\\n(4:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(5:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(21:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(22:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n'\n" + ] + } + ], + "source": [ + "%%html_define_svelte SpatialWidget\n", + "\n", + "
\n", + "
\n", + " \n", + " \n", + "\n", + " \n", + " {{#each xs1 as x}}\n", + " {{#each ys1 as y}}\n", + " \n", + " {{/each}}\n", + " {{/each}}\n", + " \n", + "\n", + "
{{layer1}}
\n", + "
\n", + "\n", + "
\n", + " \n", + " \n", + "\n", + " \n", + " {{#each xs2 as x}}\n", + " {{#each ys2 as y}}\n", + " \n", + " {{/each}}\n", + " {{/each}}\n", + " \n", + "\n", + "
{{layer2}}
\n", + "
\n", + " \n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "zYaLZ6Kd2xGC" + }, + "outputs": [], + "source": [ + "def image_url_grid(grid):\n", + " return [[_image_url(img) for img in line] for line in grid ]\n", + "\n", + "\n", + "def spatial_spatial_attr(model, img, layer1, layer2, hint_label_1=None, hint_label_2=None, override=None):\n", + " hint1 = orange_blue(\n", + " raw_class_spatial_attr(model, img, layer1, hint_label_1, override=override),\n", + " raw_class_spatial_attr(model, img, layer1, hint_label_2, override=override),\n", + " clip=True\n", + " )\n", + " hint2 = orange_blue(\n", + " raw_class_spatial_attr(model, img, layer2, hint_label_1, override=override),\n", + " raw_class_spatial_attr(model, img, layer2, hint_label_2, override=override),\n", + " clip=True\n", + " )\n", + "\n", + " attrs = raw_spatial_spatial_attr(model, img, layer1, layer2, override=override)\n", + " attrs = attrs / attrs.max()\n", + "\n", + " lucid_svelte.SpatialWidget({\n", + " \"spritemap1\": image_url_grid(attrs),\n", + " \"spritemap2\": image_url_grid(attrs.transpose(2,3,0,1)),\n", + " \"size1\": attrs.shape[3],\n", + " \"layer1\": layer1,\n", + " \"size2\": attrs.shape[0],\n", + " \"layer2\": layer2,\n", + " \"img\" : _image_url(img),\n", + " \"hint1\": _image_url(hint1),\n", + " \"hint2\": _image_url(hint2)\n", + " })" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "gQ1bysFVHDnL" + }, + "source": [ + "# Simple Attribution Example" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 316, + "output_extras": [ + {}, + {} + ] }, + "colab_type": "code", + "executionInfo": { + "elapsed": 4795, + "status": "ok", + "timestamp": 1520296765544, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "dIpwT7tMk-t9", + "outputId": "d049a5f8-5d1a-4d89-89da-1246d2da3a18" + }, + "outputs": [ { - "metadata": { - "id": "UL1yOZtjqkcj", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependancies such as TensorFlow. And then import them as appropriate." - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/gradient_override.py:103: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] }, { - "metadata": { - "id": "AA17rJBLuyYH", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 83 - }, - "outputId": "0e52d903-dbfa-4b20-ab3c-00a242061c63", - "executionInfo": { - "status": "ok", - "timestamp": 1520296700305, - "user_tz": 480, - "elapsed": 6441, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "!pip install --quiet lucid==0.0.5\n", - "!npm install -g svelte-cli@2.2.0\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "\n", - "import lucid.modelzoo.vision_models as models\n", - "from lucid.misc.io import show\n", - "import lucid.optvis.render as render\n", - "from lucid.misc.io import show, load\n", - "from lucid.misc.io.reading import read\n", - "from lucid.misc.io.showing import _image_url\n", - "from lucid.misc.gradient_override import gradient_override_map\n", - "import lucid.scratch.web.svelte as lucid_svelte" - ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "\u001b[K\u001b[?25h/tools/node/bin/svelte -> /tools/node/lib/node_modules/svelte-cli/bin.js\n", - "/tools/node/lib\n", - "└── \u001b[40m\u001b[33msvelte-cli@2.2.0\u001b[39m\u001b[49m \n", - "\n" - ], - "name": "stdout" - } - ] + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/gradient_override.py:103: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] }, { - "metadata": { - "id": "0cUPBCRyG9xE", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Attribution Code" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] }, { - "metadata": { - "id": "FWHqimIqk2Bs", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "model = models.InceptionV1()\n", - "model.load_graphdef()" - ], - "execution_count": 0, - "outputs": [] + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] }, { - "metadata": { - "id": "xIDcG0vjaDtk", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "labels_str = read(\"https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt\")\n", - "labels = [line[line.find(\" \"):].strip() for line in labels_str.split(\"\\n\")]\n", - "labels = [label[label.find(\" \"):].strip().replace(\"_\", \" \") for label in labels]\n", - "labels = [\"dummy\"] + labels" - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1375: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.where in 2.0, which has the same broadcast rule as np.where\n" + ] }, { - "metadata": { - "id": "p1S73WcbKIdI", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def raw_class_spatial_attr(img, layer, label, override=None):\n", - " \"\"\"How much did spatial positions at a given layer effect a output class?\"\"\"\n", - "\n", - " # Set up a graph for doing attribution...\n", - " with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):\n", - " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", - " T = render.import_model(model, t_input, t_input)\n", - " \n", - " # Compute activations\n", - " acts = T(layer).eval()\n", - " \n", - " if label is None: return np.zeros(acts.shape[1:-1])\n", - " \n", - " # Compute gradient\n", - " score = T(\"softmax2_pre_activation\")[0, labels.index(label)]\n", - " t_grad = tf.gradients([score], [T(layer)])[0] \n", - " grad = t_grad.eval({T(layer) : acts})\n", - " \n", - " # Linear approximation of effect of spatial position\n", - " return np.sum(acts * grad, -1)[0]" - ], - "execution_count": 0, - "outputs": [] + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_grad.py:1375: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.where in 2.0, which has the same broadcast rule as np.where\n" + ] }, { - "metadata": { - "id": "v_jkx5Niji4Q", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def raw_spatial_spatial_attr(img, layer1, layer2, override=None):\n", - " \"\"\"Attribution between spatial positions in two different layers.\"\"\"\n", - "\n", - " # Set up a graph for doing attribution...\n", - " with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):\n", - " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", - " T = render.import_model(model, t_input, t_input)\n", - " \n", - " # Compute activations\n", - " acts1 = T(layer1).eval()\n", - " acts2 = T(layer2).eval({T(layer1) : acts1})\n", - " \n", - " # Construct gradient tensor\n", - " # Backprop from spatial position (n_x, n_y) in layer2 to layer1.\n", - " n_x, n_y = tf.placeholder(\"int32\", []), tf.placeholder(\"int32\", [])\n", - " layer2_mags = tf.sqrt(tf.reduce_sum(T(layer2)**2, -1))[0]\n", - " score = layer2_mags[n_x, n_y]\n", - " t_grad = tf.gradients([score], [T(layer1)])[0]\n", - " \n", - " # Compute attribution backwards from each positin in layer2\n", - " attrs = []\n", - " for i in range(acts2.shape[1]):\n", - " attrs_ = []\n", - " for j in range(acts2.shape[2]):\n", - " grad = t_grad.eval({n_x : i, n_y : j, T(layer1) : acts1})\n", - " # linear approximation of imapct\n", - " attr = np.sum(acts1 * grad, -1)[0]\n", - " attrs_.append(attr)\n", - " attrs.append(attrs_)\n", - " return np.asarray(attrs)" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "\n", + "spatial_spatial_attr(model, img, \"mixed4d\", \"mixed5a\", hint_label_1=\"Labrador retriever\", hint_label_2=\"tiger cat\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 316, + "output_extras": [ + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 12772, + "status": "ok", + "timestamp": 1520296778377, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 }, + "id": "FUnCHkZPPjUH", + "outputId": "256b88b7-fd9b-4ec7-9200-4db37c413511" + }, + "outputs": [ { - "metadata": { - "id": "OrlLGkWxKpmf", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def orange_blue(a,b,clip=False):\n", - " if clip:\n", - " a,b = np.maximum(a,0), np.maximum(b,0)\n", - " arr = np.stack([a, (a + b)/2., b], -1)\n", - " arr /= 1e-2 + np.abs(arr).max()/1.5\n", - " arr += 0.3\n", - " return arr" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "\n", + "spatial_spatial_attr(model, img, \"mixed4a\", \"mixed4d\", hint_label_1=\"Labrador retriever\", hint_label_2=\"tiger cat\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "67UCOiFUHJ8U" + }, + "source": [ + "# Attribution With GradPool override hack" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "t4_HNz7a9icq" + }, + "outputs": [], + "source": [ + "def blur(x, w1, w2):\n", + " \"\"\"Spatially blur a 4D tensor.\"\"\"\n", + " x_ = tf.pad(x, [(0,0), (1,1), (1,1), (0,0)], \"CONSTANT\")\n", + " x_jitter_hv = (x_[:, 2:, 1:-1] + x_[:, :-2, 1:-1] + x_[:, 1:-1, 2:] + x_[:, 1:-1, :-2])/4.\n", + " x_jitter_diag = (x_[:, 2:, 2:] + x_[:, 2:, :-2] + x_[:, :-2, 2:] + x_[:, :-2, :-2])/4.\n", + " return (1-w1-w2)*x + w1*x_jitter_hv + w2*x_jitter_diag\n", + "\n", + "def make_MaxSmoothPoolGrad(blur_hack=False):\n", + " \"\"\"Create a relaxed version of the MaxPool gradient.\n", + "\n", + " GoogLeNet's use of MaxPooling creates a lot of gradient artifacts. This\n", + " function creates a fake gradient that gets rid of them, reducing distractions\n", + " in our UI demos.\n", + "\n", + " Be very very careful about using this in real life. It hides model behavior\n", + " from you. This can help you see other things more clearly, but in most cases\n", + " you probably should do something else.\n", + "\n", + " We're actively researching what's going on here.\n", + "\n", + " Args:\n", + " blur_hack: If True, use the second less principled trick of slightly\n", + " blurring the gradient to get rid of checkerboard artifacts.\n", + "\n", + " Returns:\n", + " Gradient function.\n", + "\n", + " \"\"\"\n", + " def MaxPoolGrad(op, grad):\n", + " inp = op.inputs[0]\n", + "\n", + " # Hack 1 (moderately principled): use a relaxation of the MaxPool grad\n", + " # ---------------------------------------------------------------------\n", + " #\n", + " # Construct a pooling function where, if we backprop through it,\n", + " # gradients get allocated proportional to the input activation.\n", + " # Then backpropr through that instead.\n", + " #\n", + " # In some ways, this is kind of spiritually similar to SmoothGrad\n", + " # (Smilkov et al.). To see the connection, note that MaxPooling introduces\n", + " # a pretty arbitrary discontinuity to your gradient; with the right\n", + " # distribution of input noise to the MaxPool op, you'd probably smooth out\n", + " # to this. It seems like this is one of the most natural ways to smooth.\n", + " #\n", + " # We'll probably talk about this and related things in future work.\n", + "\n", + " op_args = [op.get_attr(\"ksize\"), op.get_attr(\"strides\"), op.get_attr(\"padding\")]\n", + " smooth_out = tf.nn.avg_pool(inp**2, *op_args)/ (1e-2+tf.nn.avg_pool(tf.abs(inp), *op_args))\n", + " inp_smooth_grad = tf.gradients(smooth_out, [inp], grad)[0]\n", + "\n", + " # Hack 2 (if argument is set; not very principled) \n", + " # -------------------------------------------------\n", + " #\n", + " # Slightly blur gradient to get rid of checkerboard artifacts.\n", + " # Note, this really isn't principled. We're working around / hiding a bad\n", + " # property of the model. It should really be fixed by better model design.\n", + " #\n", + " # We do this so that the artifacts don't distract from the UI demo, but we\n", + " # don't endorse people doing it in real applications.\n", + "\n", + " if blur_hack:\n", + " inp_smooth_grad = blur(inp_smooth_grad, 0.5, 0.25)\n", + "\n", + " return inp_smooth_grad\n", + " return MaxPoolGrad" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "4KLEe280v-_z" + }, + "outputs": [], + "source": [ + "def compare_attrs(model, img, layer1, layer2, hint_label_1, hint_label_2):\n", + " print(\"Normal gradient:\\n\")\n", + "\n", + " spatial_spatial_attr(model, img, layer1, layer2,\n", + " hint_label_1=hint_label_1, hint_label_2=hint_label_2)\n", + "\n", + " print(\"\\nSmooth MaxPool Grad:\")\n", + " print(\"note the subtle checkerboard patterns)\\n\")\n", + "\n", + " spatial_spatial_attr(model, img, layer1, layer2,\n", + " hint_label_1=hint_label_1, hint_label_2=hint_label_2,\n", + " override={\"MaxPool\": make_MaxSmoothPoolGrad()})\n", + "\n", + " print(\"\\nSmooth + Blur MaxPool Grad:\\n\")\n", + "\n", + " spatial_spatial_attr(model, img, layer1, layer2,\n", + " hint_label_1=hint_label_1, hint_label_2=hint_label_2,\n", + " override={\"MaxPool\": make_MaxSmoothPoolGrad(blur_hack=True)})" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 914, + "output_extras": [ + {}, + {}, + {}, + {}, + {}, + {} + ] }, + "colab_type": "code", + "executionInfo": { + "elapsed": 16731, + "status": "ok", + "timestamp": 1520296867993, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "AR9LqXn0-Eh0", + "outputId": "ef2014c9-2598-4ad1-fc5c-9cbd8f463a7b" + }, + "outputs": [ { - "metadata": { - "id": "ku6hGbYmiQNI", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Spatial Attribution Interface\n", - "\n", - "In this section, we build the *interface* for interacting with the different kinds of spatial attribution data that we can compute using the above functions. Feel free to skip over this if you aren't interested in that part. The main reason we're including it is so that you can change the interface if you want to.\n" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Normal gradient:\n", + "\n" + ] }, { - "metadata": { - "id": "X6TFCwbQhre2", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 164 - }, - "outputId": "c53be0ec-3588-4282-d6ce-9bae16f939bb", - "executionInfo": { - "status": "ok", - "timestamp": 1520296735140, - "user_tz": 480, - "elapsed": 731, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "%%html_define_svelte SpatialWidget\n", - "\n", - "
\n", - "
\n", - " \n", - " \n", - "\n", - " \n", - " {{#each xs1 as x}}\n", - " {{#each ys1 as y}}\n", - " \n", - " {{/each}}\n", - " {{/each}}\n", - " \n", - "\n", - "
{{layer1}}
\n", - "
\n", - "\n", - "
\n", - " \n", - " \n", - "\n", - " \n", - " {{#each xs2 as x}}\n", - " {{#each ys2 as y}}\n", - " \n", - " {{/each}}\n", - " {{/each}}\n", - " \n", - "\n", - "
{{layer2}}
\n", - "
\n", - " \n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_g3iLlo/SpatialWidget_3725625.html > /tmp/svelte_g3iLlo/SpatialWidget_3725625.js\n", - "svelte version 1.56.2\n", - "compiling ../tmp/svelte_g3iLlo/SpatialWidget_3725625.html...\n", - "(4:4) – A11y: element should have an alt attribute\n", - "(5:4) – A11y: element should have an alt attribute\n", - "(21:4) – A11y: element should have an alt attribute\n", - "(22:4) – A11y: element should have an alt attribute\n", - "\n" - ], - "name": "stdout" - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "zYaLZ6Kd2xGC", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def image_url_grid(grid):\n", - " return [[_image_url(img) for img in line] for line in grid ]\n", - "\n", - "def spatial_spatial_attr(img, layer1, layer2, hint_label_1=None, hint_label_2=None, override=None):\n", - " \n", - " hint1 = orange_blue(\n", - " raw_class_spatial_attr(img, layer1, hint_label_1, override=override),\n", - " raw_class_spatial_attr(img, layer1, hint_label_2, override=override),\n", - " clip=True\n", - " )\n", - " hint2 = orange_blue(\n", - " raw_class_spatial_attr(img, layer2, hint_label_1, override=override),\n", - " raw_class_spatial_attr(img, layer2, hint_label_2, override=override),\n", - " clip=True\n", - " )\n", - "\n", - " attrs = raw_spatial_spatial_attr(img, layer1, layer2, override=override)\n", - " attrs = attrs / attrs.max()\n", - " \n", - " lucid_svelte.SpatialWidget({\n", - " \"spritemap1\": image_url_grid(attrs),\n", - " \"spritemap2\": image_url_grid(attrs.transpose(2,3,0,1)),\n", - " \"size1\": attrs.shape[3],\n", - " \"layer1\": layer1,\n", - " \"size2\": attrs.shape[0],\n", - " \"layer2\": layer2,\n", - " \"img\" : _image_url(img),\n", - " \"hint1\": _image_url(hint1),\n", - " \"hint2\": _image_url(hint2)\n", - " })" - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Smooth MaxPool Grad:\n", + "note the subtle checkerboard patterns)\n", + "\n" + ] }, { - "metadata": { - "id": "gQ1bysFVHDnL", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Simple Attribution Example" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "dIpwT7tMk-t9", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 316 - }, - "outputId": "d049a5f8-5d1a-4d89-89da-1246d2da3a18", - "executionInfo": { - "status": "ok", - "timestamp": 1520296765544, - "user_tz": 480, - "elapsed": 4795, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "\n", - "spatial_spatial_attr(img, \"mixed4d\", \"mixed5a\", hint_label_1=\"Labrador retriever\", hint_label_2=\"tiger cat\")\n", - "\n", - "print \"\\nHover on images to interact! :D\\n\"" - ], - "execution_count": 0, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Hover on images to interact! :D\n", - "\n" - ], - "name": "stdout" - } - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Smooth + Blur MaxPool Grad:\n", + "\n" + ] }, { - "metadata": { - "id": "FUnCHkZPPjUH", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 316 - }, - "outputId": "256b88b7-fd9b-4ec7-9200-4db37c413511", - "executionInfo": { - "status": "ok", - "timestamp": 1520296778377, - "user_tz": 480, - "elapsed": 12772, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "\n", - "spatial_spatial_attr(img, \"mixed4a\", \"mixed4d\", hint_label_1=\"Labrador retriever\", hint_label_2=\"tiger cat\")\n", - "\n", - "print \"\\nHover on images to interact! :D\\n\"" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Hover on images to interact! :D\n", - "\n" - ], - "name": "stdout" - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "\n", + "compare_attrs(model, img, \"mixed4d\", \"mixed5a\", \"Labrador retriever\", \"tiger cat\")" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 914, + "output_extras": [ + {}, + {}, + {}, + {}, + {}, + {} + ] }, + "colab_type": "code", + "executionInfo": { + "elapsed": 16673, + "status": "ok", + "timestamp": 1520296894369, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "ZprncSrY1FfQ", + "outputId": "43304031-eef4-4d44-b530-e74230cf47ef" + }, + "outputs": [ { - "metadata": { - "id": "67UCOiFUHJ8U", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Attribution With GradPool override hack" - ] + "name": "stdout", + "output_type": "stream", + "text": [ + "Normal gradient:\n", + "\n" + ] }, { - "metadata": { - "id": "t4_HNz7a9icq", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def blur(x, w1, w2):\n", - " \"\"\"Spatially blur a 4D tensor.\"\"\"\n", - " x_ = tf.pad(x, [(0,0), (1,1), (1,1), (0,0)], \"CONSTANT\")\n", - " x_jitter_hv = (x_[:, 2:, 1:-1] + x_[:, :-2, 1:-1] + x_[:, 1:-1, 2:] + x_[:, 1:-1, :-2])/4.\n", - " x_jitter_diag = (x_[:, 2:, 2:] + x_[:, 2:, :-2] + x_[:, :-2, 2:] + x_[:, :-2, :-2])/4.\n", - " return (1-w1-w2)*x + w1*x_jitter_hv + w2*x_jitter_diag\n", - "\n", - "def make_MaxSmoothPoolGrad(blur_hack=False):\n", - " \"\"\"Create a relaxed version of the MaxPool gradient.\n", - " \n", - " GoogLeNet's use of MaxPooling creates a lot of gradient artifacts. This\n", - " function creates a fake gradient that gets rid of them, reducing distractions\n", - " in our UI demos.\n", - " \n", - " Be very very careful about using this in real life. It hides model behavior\n", - " from you. This can help you see other things more clearly, but in most cases\n", - " you probably should do something else.\n", - " \n", - " We're actively researching what's going on here.\n", - " \n", - " Args:\n", - " blur_hack: If True, use the second less principled trick of slightly\n", - " blurring the gradient to get rid of checkerboard artifacts.\n", - " \n", - " Returns:\n", - " Gradient function.\n", - " \n", - " \"\"\"\n", - " def MaxPoolGrad(op, grad):\n", - " inp = op.inputs[0]\n", - " \n", - " # Hack 1 (moderately principled): use a relaxation of the MaxPool grad\n", - " # ---------------------------------------------------------------------\n", - " #\n", - " # Construct a pooling function where, if we backprop through it,\n", - " # gradients get allocated proportional to the input activation.\n", - " # Then backpropr through that instead.\n", - " #\n", - " # In some ways, this is kind of spiritually similar to SmoothGrad\n", - " # (Smilkov et al.). To see the connection, note that MaxPooling introduces\n", - " # a pretty arbitrary discontinuity to your gradient; with the right\n", - " # distribution of input noise to the MaxPool op, you'd probably smooth out\n", - " # to this. It seems like this is one of the most natural ways to smooth.\n", - " #\n", - " # We'll probably talk about this and related things in future work.\n", - " \n", - " op_args = [op.get_attr(\"ksize\"), op.get_attr(\"strides\"), op.get_attr(\"padding\")]\n", - " smooth_out = tf.nn.avg_pool(inp**2, *op_args)/ (1e-2+tf.nn.avg_pool(tf.abs(inp), *op_args))\n", - " inp_smooth_grad = tf.gradients(smooth_out, [inp], grad)[0]\n", - " \n", - " # Hack 2 (if argument is set; not very principled) \n", - " # -------------------------------------------------\n", - " #\n", - " # Slightly blur gradient to get rid of checkerboard artifacts.\n", - " # Note, this really isn't principled. We're working around / hiding a bad\n", - " # property of the model. It should really be fixed by better model design.\n", - " #\n", - " # We do this so that the artifacts don't distract from the UI demo, but we\n", - " # don't endorse people doing it in real applications.\n", - " \n", - " if blur_hack:\n", - " inp_smooth_grad = blur(inp_smooth_grad, 0.5, 0.25)\n", - " \n", - " return inp_smooth_grad\n", - " return MaxPoolGrad" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "4KLEe280v-_z", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def compare_attrs(img, layer1, layer2, hint_label_1, hint_label_2):\n", - " print \"Normal gradient:\\n\"\n", - "\n", - " spatial_spatial_attr(img, layer1, layer2,\n", - " hint_label_1=hint_label_1, hint_label_2=hint_label_2)\n", - "\n", - " print \"\\nSmooth MaxPool Grad:\"\n", - " print \"(note the subtle checkerboard patterns)\\n\"\n", - "\n", - " spatial_spatial_attr(img, layer1, layer2,\n", - " hint_label_1=hint_label_1, hint_label_2=hint_label_2,\n", - " override={\"MaxPool\": make_MaxSmoothPoolGrad()})\n", - "\n", - " print \"\\nSmooth + Blur MaxPool Grad:\\n\"\n", - "\n", - " spatial_spatial_attr(img, layer1, layer2,\n", - " hint_label_1=hint_label_1, hint_label_2=hint_label_2,\n", - " override={\"MaxPool\": make_MaxSmoothPoolGrad(blur_hack=True)})" - ], - "execution_count": 0, - "outputs": [] + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Smooth MaxPool Grad:\n", + "note the subtle checkerboard patterns)\n", + "\n" + ] }, { - "metadata": { - "id": "AR9LqXn0-Eh0", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 914 - }, - "outputId": "ef2014c9-2598-4ad1-fc5c-9cbd8f463a7b", - "executionInfo": { - "status": "ok", - "timestamp": 1520296867993, - "user_tz": 480, - "elapsed": 16731, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "\n", - "compare_attrs(img, \"mixed4d\", \"mixed5a\", \"Labrador retriever\", \"tiger cat\")" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Normal gradient:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Smooth MaxPool Grad:\n", - "(note the subtle checkerboard patterns)\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Smooth + Blur MaxPool Grad:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "ZprncSrY1FfQ", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 914 - }, - "outputId": "43304031-eef4-4d44-b530-e74230cf47ef", - "executionInfo": { - "status": "ok", - "timestamp": 1520296894369, - "user_tz": 480, - "elapsed": 16673, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")\n", - "\n", - "compare_attrs(img, \"mixed4d\", \"mixed5a\", \"lemon\", \"vase\")" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Smooth + Blur MaxPool Grad:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Normal gradient:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Smooth MaxPool Grad:\n", - "(note the subtle checkerboard patterns)\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Smooth + Blur MaxPool Grad:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")\n", + "\n", + "compare_attrs(model, img, \"mixed4d\", \"mixed5a\", \"lemon\", \"vase\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } }, + "colab_type": "code", + "id": "f47jydotwV5K" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "Spatial Attribution - Building Blocks of Interpretability", + "provenance": [ { - "metadata": { - "id": "f47jydotwV5K", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "" - ], - "execution_count": 0, - "outputs": [] + "file_id": "1uRqpBNPg-aW3tRU-uo-mWg6cQxuAquHW", + "timestamp": 1518822563463 } - ] -} \ No newline at end of file + ], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 88607b5a8b0898d3b547cca5e331016200cecd83 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:50:56 +0000 Subject: [PATCH 11/57] adding a Jupyter version of the notebook Spatial Attribution --- .../jupyter_versions/AttrSpatialJupyter.ipynb | 982 ++++++++++++++++++ 1 file changed, 982 insertions(+) create mode 100644 notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb diff --git a/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb new file mode 100644 index 00000000..c4a8ce4c --- /dev/null +++ b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb @@ -0,0 +1,982 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pNqKk1MmrakH" + }, + "source": [ + "# Spatial Attribution -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "This notebook demonstrates **Spatial Attribution**, a technique for exploring how detectors a different spatial positions in the network effected its output.\n", + "\n", + "
\n", + "\n", + "\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!\n", + "\n", + "#### **This notebook is a Jupyter version of the original Google Colab Notebook. This version adds widgets to facilitate the use of Lucid on your own images.**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hOBBuzMaxU37" + }, + "source": [ + "# Install / Import / Load" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "UL1yOZtjqkcj" + }, + "source": [ + "This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependancies such as TensorFlow. And then import them as appropriate." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 83, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 6441, + "status": "ok", + "timestamp": 1520296700305, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "AA17rJBLuyYH", + "outputId": "0e52d903-dbfa-4b20-ab3c-00a242061c63" + }, + "outputs": [], + "source": [ + "# !npm install -g svelte-cli@2.2.0\n", + "import os\n", + "\n", + "import numpy as np\n", + "from ipyfilechooser import FileChooser\n", + "import ipywidgets as widgets\n", + "from IPython.display import display\n", + "from IPython.core.display import display, HTML\n", + "\n", + "import tensorflow as tf\n", + "# uncomment to avoid deprecation warnings :\n", + "from tensorflow.python.util import deprecation\n", + "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", + "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.render as render\n", + "from lucid.misc.io import show, load\n", + "from lucid.misc.io.reading import read\n", + "from lucid.misc.io.showing import _image_url\n", + "from lucid.misc.gradient_override import gradient_override_map\n", + "import lucid.scratch.web.svelte as lucid_svelte" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0cUPBCRyG9xE" + }, + "source": [ + "# Attribution Code" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "FWHqimIqk2Bs" + }, + "outputs": [], + "source": [ + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "xIDcG0vjaDtk" + }, + "outputs": [], + "source": [ + "labels_str = read(\"https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt\",mode='r')\n", + "labels = [line[line.find(\" \"):].strip() for line in labels_str.split(\"\\n\")]\n", + "labels = [label[label.find(\" \"):].strip().replace(\"_\", \" \") for label in labels]\n", + "labels = sorted([\"dummy\"] + labels)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "p1S73WcbKIdI" + }, + "outputs": [], + "source": [ + "def raw_class_spatial_attr(model, img, layer, label, override=None):\n", + " \"\"\"\n", + " How much did spatial positions at a given layer effect a output class?\n", + " Returns:\n", + " array containing attributions of layer 1 on layer2 where array[i,j] is\n", + " the influence of layer1 on spatial posittion (i,j) of layer 2.\n", + " \"\"\"\n", + "\n", + " # Set up a graph for doing attribution...\n", + " with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts = T(layer).eval()\n", + "\n", + " if label is None: return np.zeros(acts.shape[1:-1])\n", + "\n", + " # Compute gradient between current layer and output score\n", + " score = T(\"softmax2_pre_activation\")[0, labels.index(label)]\n", + "\n", + " t_grad = tf.gradients([score], [T(layer)])[0] \n", + " grad = t_grad.eval({T(layer) : acts})\n", + "\n", + " # Linear approximation of effect of spatial position\n", + " return np.sum(acts * grad, -1)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "v_jkx5Niji4Q" + }, + "outputs": [], + "source": [ + "def raw_spatial_spatial_attr(model, img, layer1, layer2, override=None):\n", + " \"\"\"Attribution between spatial positions in two different layers.\n", + " \"\"\"\n", + " # Set up a graph for doing attribution...\n", + " with tf.Graph().as_default(), tf.Session(), gradient_override_map(override or {}):\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts1 = T(layer1).eval()\n", + " acts2 = T(layer2).eval({T(layer1) : acts1})\n", + "\n", + " # Construct gradient tensor\n", + " # Backprop from spatial position (n_x, n_y) in layer2 to layer1.\n", + " n_x, n_y = tf.placeholder(\"int32\", []), tf.placeholder(\"int32\", [])\n", + " # channelwise magnitude of layer2 activation for each spatial position:\n", + " layer2_mags = tf.sqrt(tf.reduce_sum(T(layer2)**2, -1))[0]\n", + " score = layer2_mags[n_x, n_y]\n", + " t_grad = tf.gradients([score], [T(layer1)])[0]\n", + "\n", + " # Compute attribution backwards from each position in layer2\n", + " attrs = [] #\n", + " for i in range(acts2.shape[1]):\n", + " attrs_ = []\n", + " for j in range(acts2.shape[2]):\n", + " grad = t_grad.eval({n_x : i, n_y : j, T(layer1) : acts1})\n", + " # linear approximation of impact (summed on channel dimension)\n", + " attr = np.sum(acts1 * grad, -1)[0]\n", + " attrs_.append(attr)\n", + " attrs.append(attrs_)\n", + " return np.asarray(attrs)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "OrlLGkWxKpmf" + }, + "outputs": [], + "source": [ + "def orange_blue(a,b,clip=False):\n", + " \"\"\"\n", + " Args:\n", + " a: spatial position - class gradients array of shape (y_dim, x_dim) of the current layer for class A\n", + " b: spatial position - class gradients array of shape (y_dim, x_dim) of the current layer for class B\n", + " clip: whether to clip negative gradients for a and b\n", + " \n", + " Returns:\n", + " Heatmap of the image for both classes A and B.\n", + " Red channel is for A, Green channel is the mean influence of both classes\n", + " and blue channel is for B.\n", + " \"\"\"\n", + " \n", + " if clip: # keeping positive values only\n", + " a,b = np.maximum(a,0), np.maximum(b,0)\n", + " arr = np.stack([a, (a + b)/2., b], -1)\n", + " arr /= 1e-2 + np.abs(arr).max()/1.5\n", + " arr += 0.3 \n", + " return arr" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ku6hGbYmiQNI" + }, + "source": [ + "# Spatial Attribution Interface\n", + "\n", + "In this section, we build the *interface* for interacting with the different kinds of spatial attribution data that we can compute using the above functions. Feel free to skip over this if you aren't interested in that part. The main reason we're including it is so that you can change the interface if you want to.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 164, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 731, + "status": "ok", + "timestamp": 1520296735140, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "X6TFCwbQhre2", + "outputId": "c53be0ec-3588-4282-d6ce-9bae16f939bb" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_0f883yoj/SpatialWidget_a5fb8c7f_0cb5_45d2_912b_6f11532bd357.html > /tmp/svelte_0f883yoj/SpatialWidget_a5fb8c7f_0cb5_45d2_912b_6f11532bd357.js\n", + "Svelte build failed! Output:\n", + "svelte version 1.64.1\n", + "compiling ../../../../../../../tmp/svelte_0f883yoj/SpatialWidget_a5fb8c7f_0cb5_45d2_912b_6f11532bd357.html...\n", + "Identifier is expected\n", + "62: position: absolute;\n", + "63: left: 0px;\n", + "64: top: 0px; {{#replace with -14px for Jupyter Classic}}\n", + " ^\n", + "65: width: 224px;\n", + "66: }\n", + "\n" + ] + } + ], + "source": [ + "%%html_define_svelte SpatialWidget\n", + "\n", + "
\n", + "
\n", + " \n", + " \n", + "\n", + " \n", + " {{#each xs1 as x}}\n", + " {{#each ys1 as y}}\n", + " \n", + " {{/each}}\n", + " {{/each}}\n", + " \n", + "\n", + "
{{layer1}}
\n", + "
\n", + "\n", + "
\n", + " \n", + " \n", + "\n", + " \n", + " {{#each xs2 as x}}\n", + " {{#each ys2 as y}}\n", + " \n", + " {{/each}}\n", + " {{/each}}\n", + " \n", + "\n", + "
{{layer2}}
\n", + "
\n", + " \n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "zYaLZ6Kd2xGC" + }, + "outputs": [], + "source": [ + "def image_url_grid(grid):\n", + " return [[_image_url(img) for img in line] for line in grid ]\n", + "\n", + "\n", + "def spatial_spatial_attr(model, img, layer1, layer2, hint_label_1=None, hint_label_2=None, override=None):\n", + " hint1 = orange_blue(\n", + " raw_class_spatial_attr(model, img, layer1, hint_label_1, override=override),\n", + " raw_class_spatial_attr(model, img, layer1, hint_label_2, override=override),\n", + " clip=True\n", + " )\n", + " hint2 = orange_blue(\n", + " raw_class_spatial_attr(model, img, layer2, hint_label_1, override=override),\n", + " raw_class_spatial_attr(model, img, layer2, hint_label_2, override=override),\n", + " clip=True\n", + " )\n", + "\n", + " attrs = raw_spatial_spatial_attr(model, img, layer1, layer2, override=override)\n", + " attrs = attrs / attrs.max()\n", + "\n", + " lucid_svelte.SpatialWidget({\n", + " \"spritemap1\": image_url_grid(attrs),\n", + " \"spritemap2\": image_url_grid(attrs.transpose(2,3,0,1)),\n", + " \"size1\": attrs.shape[3],\n", + " \"layer1\": layer1,\n", + " \"size2\": attrs.shape[0],\n", + " \"layer2\": layer2,\n", + " \"img\" : _image_url(img),\n", + " \"hint1\": _image_url(hint1),\n", + " \"hint2\": _image_url(hint2)\n", + " })" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "67UCOiFUHJ8U" + }, + "source": [ + "# Attribution With GradPool override hack" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "t4_HNz7a9icq" + }, + "outputs": [], + "source": [ + "def blur(x, w1, w2):\n", + " \"\"\"Spatially blur a 4D tensor.\"\"\"\n", + " x_ = tf.pad(x, [(0,0), (1,1), (1,1), (0,0)], \"CONSTANT\")\n", + " x_jitter_hv = (x_[:, 2:, 1:-1] + x_[:, :-2, 1:-1] + x_[:, 1:-1, 2:] + x_[:, 1:-1, :-2])/4.\n", + " x_jitter_diag = (x_[:, 2:, 2:] + x_[:, 2:, :-2] + x_[:, :-2, 2:] + x_[:, :-2, :-2])/4.\n", + " return (1-w1-w2)*x + w1*x_jitter_hv + w2*x_jitter_diag\n", + "\n", + "def make_MaxSmoothPoolGrad(blur_hack=False):\n", + " \"\"\"Create a relaxed version of the MaxPool gradient.\n", + "\n", + " GoogLeNet's use of MaxPooling creates a lot of gradient artifacts. This\n", + " function creates a fake gradient that gets rid of them, reducing distractions\n", + " in our UI demos.\n", + "\n", + " Be very very careful about using this in real life. It hides model behavior\n", + " from you. This can help you see other things more clearly, but in most cases\n", + " you probably should do something else.\n", + "\n", + " We're actively researching what's going on here.\n", + "\n", + " Args:\n", + " blur_hack: If True, use the second less principled trick of slightly\n", + " blurring the gradient to get rid of checkerboard artifacts.\n", + "\n", + " Returns:\n", + " Gradient function.\n", + "\n", + " \"\"\"\n", + " def MaxPoolGrad(op, grad):\n", + " inp = op.inputs[0]\n", + "\n", + " # Hack 1 (moderately principled): use a relaxation of the MaxPool grad\n", + " # ---------------------------------------------------------------------\n", + " #\n", + " # Construct a pooling function where, if we backprop through it,\n", + " # gradients get allocated proportional to the input activation.\n", + " # Then backpropr through that instead.\n", + " #\n", + " # In some ways, this is kind of spiritually similar to SmoothGrad\n", + " # (Smilkov et al.). To see the connection, note that MaxPooling introduces\n", + " # a pretty arbitrary discontinuity to your gradient; with the right\n", + " # distribution of input noise to the MaxPool op, you'd probably smooth out\n", + " # to this. It seems like this is one of the most natural ways to smooth.\n", + " #\n", + " # We'll probably talk about this and related things in future work.\n", + "\n", + " op_args = [op.get_attr(\"ksize\"), op.get_attr(\"strides\"), op.get_attr(\"padding\")]\n", + " smooth_out = tf.nn.avg_pool(inp**2, *op_args)/ (1e-2+tf.nn.avg_pool(tf.abs(inp), *op_args))\n", + " inp_smooth_grad = tf.gradients(smooth_out, [inp], grad)[0]\n", + "\n", + " # Hack 2 (if argument is set; not very principled) \n", + " # -------------------------------------------------\n", + " #\n", + " # Slightly blur gradient to get rid of checkerboard artifacts.\n", + " # Note, this really isn't principled. We're working around / hiding a bad\n", + " # property of the model. It should really be fixed by better model design.\n", + " #\n", + " # We do this so that the artifacts don't distract from the UI demo, but we\n", + " # don't endorse people doing it in real applications.\n", + "\n", + " if blur_hack:\n", + " inp_smooth_grad = blur(inp_smooth_grad, 0.5, 0.25)\n", + "\n", + " return inp_smooth_grad\n", + " return MaxPoolGrad" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "4KLEe280v-_z" + }, + "outputs": [], + "source": [ + "def compare_attrs(model, img, layer1, layer2, hint_label_1, hint_label_2):\n", + " print(\"Normal gradient:\\n\")\n", + "\n", + " spatial_spatial_attr(model, img, layer1, layer2,\n", + " hint_label_1=hint_label_1, hint_label_2=hint_label_2)\n", + "\n", + " print(\"\\nSmooth MaxPool Grad:\")\n", + " print(\"note the subtle checkerboard patterns)\\n\")\n", + "\n", + " spatial_spatial_attr(model, img, layer1, layer2,\n", + " hint_label_1=hint_label_1, hint_label_2=hint_label_2,\n", + " override={\"MaxPool\": make_MaxSmoothPoolGrad()})\n", + "\n", + " print(\"\\nSmooth + Blur MaxPool Grad:\\n\")\n", + "\n", + " spatial_spatial_attr(model, img, layer1, layer2,\n", + " hint_label_1=hint_label_1, hint_label_2=hint_label_2,\n", + " override={\"MaxPool\": make_MaxSmoothPoolGrad(blur_hack=True)})" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Upload file from local machine and select uploading path (A) or just select one file (B):\n", + "A1) Select a file to upload\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c9dfc900eb6243e6a73f637f86a8205c", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileUpload(value={}, description='Upload')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "A2) Select destination for uploaded file\n", + "B) Select file in this server\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "513fc610459445edba069c4a2b433956", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileChooser(path='.', filename='', show_hidden='False')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the shallowest layer whose influence is being studied: \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4807a0330dea4df1a28782779577d5c3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Layers', options=('mixed3a', 'mixed3b', 'mixed4a', 'mixed4b', 'mixed4c', 'mixed4d', 'mix…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the deepest layer whose influence is being studied: \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "101ac87a992b4ea58ed0024b07c39401", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Layers', index=1, options=('mixed3a', 'mixed3b', 'mixed4a', 'mixed4b', 'mixed4c', 'mixed…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the first class whose influence is being studied:: \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "18c0662f132444d7ac0a5e189ca29912", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Classes of documents', options=('Afghan hound', 'African chameleon', 'African crocodile'…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the second class whose influence is being studied:: \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "312c076c23ac4be7b08211e7fc2cb1b3", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Classes of documents', index=1, options=('Afghan hound', 'African chameleon', 'African c…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "print(\n", + " \"Upload file from local machine and select uploading path (A) or just select one file (B):\"\n", + ")\n", + "print(\"A1) Select a file to upload\")\n", + "uploader = widgets.FileUpload(accept='', multiple=False)\n", + "display(uploader)\n", + "\n", + "print(\"\\nA2) Select destination for uploaded file\")\n", + "print(\"B) Select file in this server\")\n", + "notebooks_root_path = \"\"\n", + "fc = FileChooser(\".\",\n", + " use_dir_icons=True,\n", + " select_default=True)\n", + "display(fc)\n", + "\n", + "\n", + "layers_list = [layer.name for layer in model.layers[3:]]\n", + "print(\"\\nSelect the shallowest layer whose influence is being studied: \")\n", + "layers_widget = widgets.Dropdown(\n", + " options=layers_list,\n", + " value=layers_list[0],\n", + " description='Layers'\n", + ")\n", + "display(layers_widget)\n", + "\n", + "print(\"\\nSelect the deepest layer whose influence is being studied: \")\n", + "layers_widget_bis = widgets.Dropdown(\n", + " options=layers_list,\n", + " value=layers_list[1],\n", + " description='Layers'\n", + ")\n", + "display(layers_widget_bis)\n", + "\n", + "print(\"\\nSelect the first class whose influence is being studied:: \")\n", + "classes_widget = widgets.Dropdown(\n", + " options=labels,\n", + " value=labels[0],\n", + " description='Classes of documents'\n", + ")\n", + "display(classes_widget)\n", + "\n", + "print(\"\\nSelect the second class whose influence is being studied:: \")\n", + "classes_widget_bis = widgets.Dropdown(\n", + " options=labels,\n", + " value=labels[1],\n", + " description='Classes of documents'\n", + ")\n", + "display(classes_widget_bis)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "if uploader.value: # upload local file to server\n", + " picture_name = uploader.value[0]\n", + " content = uploader.value[picture_name]['content'] # memoryview of the file\n", + " picture_path = os.path.join(fc.selected_path, picture_name)\n", + " with open(picture_name, 'wb') as f:\n", + " f.write(content)\n", + "else: # use files already on the server\n", + " picture_path = fc.default_filename\n", + " \n", + "layer_name_1 = layers_widget.value # layers to use semantic dictionnary on\n", + "layer_name_2 = layers_widget_bis.value # layers to use semantic dictionnary on\n", + "\n", + "class_name_1 = classes_widget.value # layers to use semantic dictionnary on\n", + "class_name_2 = classes_widget_bis.value # layers to use semantic dictionnary on" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 316 + }, + "colab_type": "code", + "id": "dIpwT7tMk-t9", + "outputId": "d049a5f8-5d1a-4d89-89da-1246d2da3a18" + }, + "outputs": [ + { + "data": { + "text/html": [ + "

Legend :

Afghan hound
African chameleon
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "ename": "RuntimeError", + "evalue": "No extension in URL: ", + "output_type": "error", + "traceback": [ + "\u001b[0;31m\u001b[0m", + "\u001b[0;31mRuntimeError\u001b[0mTraceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mdisplay\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mHTML\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlegend\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mimg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpicture_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mspatial_spatial_attr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayer_name_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayer_name_2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhint_label_1\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mclass_name_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhint_label_2\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mclass_name_2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(url_or_handle, allow_unsafe_formats, cache, **kwargs)\u001b[0m\n\u001b[1;32m 193\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_load_urls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murl_or_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcache\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcache\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 194\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 195\u001b[0;31m \u001b[0mext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdecompressor_ext\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_get_extension\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murl_or_handle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 196\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mext\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlower\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py\u001b[0m in \u001b[0;36m_get_extension\u001b[0;34m(url_or_handle)\u001b[0m\n\u001b[1;32m 265\u001b[0m \u001b[0mdecompressor_ext\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mext\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 267\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"No extension in URL: \"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0murl_or_handle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 268\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdecompressor_ext\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mRuntimeError\u001b[0m: No extension in URL: " + ] + } + ], + "source": [ + "legend = \"

Legend :

\"\n", + "legend += \"
%s
\" % class_name_1\n", + "legend += \"
%s
\" % class_name_2\n", + "display(HTML(legend))\n", + "\n", + "img = load(picture_path)\n", + "\n", + "spatial_spatial_attr(model, img, layer_name_1, layer_name_2, hint_label_1=class_name_1, hint_label_2=class_name_2)" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "Spatial Attribution - Building Blocks of Interpretability", + "provenance": [ + { + "file_id": "1uRqpBNPg-aW3tRU-uo-mWg6cQxuAquHW", + "timestamp": 1518822563463 + } + ], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From ea7ad4a3eedf33a6c655f650e7a40db94aa22241 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:51:58 +0000 Subject: [PATCH 12/57] updating notebook of Channel Attribution --- notebooks/building-blocks/AttrChannel.ipynb | 27626 +++++++++--------- 1 file changed, 13319 insertions(+), 14307 deletions(-) diff --git a/notebooks/building-blocks/AttrChannel.ipynb b/notebooks/building-blocks/AttrChannel.ipynb index 6eaf2cff..722caae0 100644 --- a/notebooks/building-blocks/AttrChannel.ipynb +++ b/notebooks/building-blocks/AttrChannel.ipynb @@ -1,14409 +1,13421 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pNqKk1MmrakH" + }, + "source": [ + "# Channel Attribution -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "This notebook demonstrates **Channel Attribution**, a technique for exploring how different detectors in the network effected its output.\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "hOBBuzMaxU37" + }, + "source": [ + "# Install / Import / Load" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "UL1yOZtjqkcj" + }, + "source": [ + "This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependencies such as TensorFlow. And then import them as appropriate." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 99, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 13764, + "status": "ok", + "timestamp": 1520295348008, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "AA17rJBLuyYH", + "outputId": "3ca56c02-9c8c-412e-bed4-49625c959af8" + }, + "outputs": [], + "source": [ + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "# !npm install -g svelte-cli@2.2.0\n", + "# %tensorflow_version 1.x\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.objectives as objectives\n", + "import lucid.optvis.param as param\n", + "import lucid.optvis.render as render\n", + "import lucid.optvis.transform as transform\n", + "from lucid.misc.io import show, load\n", + "from lucid.misc.io.reading import read\n", + "from lucid.misc.io.showing import _image_url, _display_html\n", + "import lucid.scratch.web.svelte as lucid_svelte" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "FWHqimIqk2Bs" + }, + "outputs": [], + "source": [ + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "f6TI0Rqz4P7z" + }, + "source": [ + "# Setup (feel free to skip)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "g6V3qvLk4nTq" + }, + "source": [ + "**ChannelAttrWidget**\n", + "\n", + "Let's make a little widget for showing all our channels and attribution values." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 99, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 690, + "status": "ok", + "timestamp": 1520295353087, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "lZRgaVagH_pC", + "outputId": "3cf0f1c0-bebe-4fd6-9fe3-a17f787cd606" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_f7h4hxj8/ChannelAttrWidget_ee26ae16_8d13_43db_afeb_e99f067cdd68.html > /tmp/svelte_f7h4hxj8/ChannelAttrWidget_ee26ae16_8d13_43db_afeb_e99f067cdd68.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../tmp/svelte_f7h4hxj8/ChannelAttrWidget_ee26ae16_8d13_43db_afeb_e99f067cdd68.html...\\n'\n", + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + } + ], + "source": [ + "%%html_define_svelte ChannelAttrWidget\n", + "\n", + "
\n", + "
\n", + " {{#each attrsPos as attr}}\n", + "
\n", + "
\n", + "
0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)\">{{attr.v}}
\n", + "
\n", + " {{/each}}\n", + " {{#if attrsPos.length > 5}}\n", + "
\n", + "
\n", + " {{/if}}\n", + "
...
\n", + " {{#each attrsNeg as attr}}\n", + "
\n", + "
\n", + "
0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)\">{{attr.v}}
\n", + "
\n", + " {{/each}}\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ku6hGbYmiQNI" + }, + "source": [ + "**BarsWidget**\n", + "\n", + "It would also be nice to see the distribution of attribution magnitudes. Let's make another widget for that." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { "colab": { - "name": "Channel Attribution - Building Blocks of Interpretability", - "version": "0.3.2", - "views": {}, - "default_view": {}, - "provenance": [], - "collapsed_sections": [] + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 99, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 604, + "status": "ok", + "timestamp": 1520295354181, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 }, - "kernelspec": { - "name": "python2", - "display_name": "Python 2" + "id": "X6TFCwbQhre2", + "outputId": "c3bb1ea8-4027-4b4b-d72f-a152de346ec1" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_f7h4hxj8/BarsWidget_48d45968_8e97_4b68_8d07_e10b4b342693.html > /tmp/svelte_f7h4hxj8/BarsWidget_48d45968_8e97_4b68_8d07_e10b4b342693.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../tmp/svelte_f7h4hxj8/BarsWidget_48d45968_8e97_4b68_8d07_e10b4b342693.html...\\n'\n" + ] } + ], + "source": [ + "%%html_define_svelte BarsWidget\n", + "\n", + "
\n", + "
\n", + " {{#each vals as val}}\n", + "
0)? 210 : 0}}, {{Math.max(90, 110*Math.abs(val)/1.8)}}%, {{Math.min(80, 100-40*Math.abs(val)/1.8)}}%);\">\n", + "
\n", + " {{/each}}\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "F4l9Ki-UoVko" + }, + "source": [ + "## **Spritemaps**\n", + "\n", + "In order to show the channels, we need \"spritemaps\" of channel visualizations.\n", + "These visualization spritemaps are large grids of images (such as [this one](https://storage.googleapis.com/lucid-static/building-blocks/sprite_mixed4d_channel.jpeg)) that visualize every channel in a layer.\n", + "We provide spritemaps for GoogLeNet because making them takes a few hours of GPU time, but\n", + "you can make your own channel spritemaps to explore other models. Check out other notebooks on how to\n", + "make your own neuron visualizations.\n", + "\n", + "It's also worth noting that GoogLeNet has unusually semantically meaningful neurons. We don't know why this is -- although it's an active area of research for us. More sophisticated interfaces, such as neuron groups, may work better for networks where meaningful ideas are more entangled or less aligned with the neuron directions." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "BpGLiyEEoPfB" + }, + "outputs": [], + "source": [ + "layer_spritemap_sizes = {\n", + " 'mixed3a' : 16,\n", + " 'mixed3b' : 21,\n", + " 'mixed4a' : 22,\n", + " 'mixed4b' : 22,\n", + " 'mixed4c' : 22,\n", + " 'mixed4d' : 22,\n", + " 'mixed4e' : 28,\n", + " 'mixed5a' : 28,\n", + " }\n", + "\n", + "def spritemap(layer):\n", + " assert layer in layer_spritemap_sizes\n", + " size = layer_spritemap_sizes[layer]\n", + " url = \"https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg\" % layer\n", + " return size, url" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "5vifBdxqijXX" + }, + "source": [ + "**Attribution Code**" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "CGKiP5Pr2pci" + }, + "outputs": [], + "source": [ + "def score_f(model, logit, name):\n", + " if name is None:\n", + " return 0\n", + " elif name == \"logsumexp\":\n", + " base = tf.reduce_max(logit)\n", + " return base + tf.log(tf.reduce_sum(tf.exp(logit-base)))\n", + " elif name in model.labels:\n", + " return logit[model.labels.index(name)]\n", + " else:\n", + " raise RuntimeError(\"Unsupported\")\n", + "\n", + "def channel_attr_simple(model, img, layer, class1, class2, n_show=4):\n", + "\n", + " # Set up a graph for doing attribution...\n", + " with tf.Graph().as_default(), tf.Session() as sess:\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts = T(layer).eval()\n", + "\n", + " # Compute gradient\n", + " logit = T(\"softmax2_pre_activation\")[0]\n", + " score = score_f(model, logit, class1) - score_f(model, logit, class2)\n", + " t_grad = tf.gradients([score], [T(layer)])[0]\n", + " grad = t_grad.eval()\n", + "\n", + " # Let's do a very simple linear approximation attribution.\n", + " # That is, we say the attribution of y to x is \n", + " # the rate at which x changes y (grad of x on y) \n", + " # times the value of x. (activation of x)\n", + " attr = (grad*acts)[0]\n", + "\n", + " # Then we reduce down to channels.\n", + " channel_attr = attr.sum(0).sum(0)\n", + "\n", + " # Now we just need to present the results.\n", + "\n", + " # Get spritemaps\n", + "\n", + "\n", + " spritemap_n, spritemap_url = spritemap(layer)\n", + "\n", + " # Let's show the distribution of attributions\n", + " print(\"Distribution of attribution accross channels:\")\n", + " print(\"\")\n", + " lucid_svelte.BarsWidget({\"vals\" : [float(v) for v in np.sort(channel_attr)[::-1]]})\n", + "\n", + " # Let's pick the most extreme channels to show\n", + " ns_pos = list(np.argsort(-channel_attr)[:n_show])\n", + " ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])\n", + "\n", + " # ... and show them with ChannelAttrWidget\n", + " print(\"\")\n", + " print(\"Top\", n_show, \"channels in each direction:\")\n", + " print(\"\")\n", + " lucid_svelte.ChannelAttrWidget({\n", + " \"spritemap_url\": spritemap_url,\n", + " \"sprite_size\": 110,\n", + " \"sprite_n_wrap\": spritemap_n,\n", + " \"attrsPos\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_pos],\n", + " \"attrsNeg\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_neg] \n", + " })\n" + ] }, - "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "GSeU39MD4Uqu" + }, + "source": [ + "# Channel attributions from article teaser" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 270, + "output_extras": [ + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 998, + "status": "ok", + "timestamp": 1520295392749, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "xUIODkiA3Eg8", + "outputId": "1be77417-dad1-458a-a077-93467f874ed6" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, { - "metadata": { - "id": "JndnmDMp66FL", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "##### Copyright 2018 Google LLC.\n", - "\n", - "Licensed under the Apache License, Version 2.0 (the \"License\");" + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 3 channels in each direction:\n", + "\n" + ] }, { - "metadata": { - "id": "hMqWDc_m6rUC", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - }, - "cellView": "both" - }, - "cell_type": "code", - "source": [ - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "channel_attr_simple(model, img, \"mixed4d\", \"Labrador retriever\", \"tiger cat\", n_show=3)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 295, + "output_extras": [ + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 1224, + "status": "ok", + "timestamp": 1520295364539, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 }, + "id": "VZcAlV3O4vuQ", + "outputId": "9e567c9c-9c94-41f6-fd58-337ca2388db7" + }, + "outputs": [ { - "metadata": { - "id": "pNqKk1MmrakH", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Channel Attribution -- Building Blocks of Interpretability\n", - "\n", - "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", - "\n", - "This notebook demonstrates **Channel Attribution**, a technique for exploring how different detectors in the network effected its output.\n", - "\n", - "
\n", - "\n", - "
\n", - "\n", - "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", - "\n", - "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", - "\n", - "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", - "\n", - "Thanks for trying Lucid!\n" + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/writing.py:69: The name tf.gfile.MakeDirs is deprecated. Please use tf.io.gfile.makedirs instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/writing.py:69: The name tf.gfile.MakeDirs is deprecated. Please use tf.io.gfile.makedirs instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "hOBBuzMaxU37", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Install / Import / Load" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 3 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")\n", + "channel_attr_simple(model, img, \"mixed4d\", \"vase\", \"lemon\", n_show=3)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 356, + "output_extras": [ + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 1169, + "status": "ok", + "timestamp": 1519789269196, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "bz3I78QH4sKd", + "outputId": "6616b624-d458-4ab4-c793-18dd935a5650" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] }, { - "metadata": { - "id": "UL1yOZtjqkcj", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "This code depends on [Lucid](https://github.com/tensorflow/lucid) (our visualization library), and [svelte](https://svelte.technology/) (a web framework). The following cell will install both of them, and dependencies such as TensorFlow. And then import them as appropriate." + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "AA17rJBLuyYH", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 99 - }, - "outputId": "3ca56c02-9c8c-412e-bed4-49625c959af8", - "executionInfo": { - "status": "ok", - "timestamp": 1520295348008, - "user_tz": 480, - "elapsed": 13764, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "!pip install --quiet lucid==0.0.5\n", - "!npm install -g svelte-cli@2.2.0\n", - "\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "\n", - "import lucid.modelzoo.vision_models as models\n", - "from lucid.misc.io import show\n", - "import lucid.optvis.objectives as objectives\n", - "import lucid.optvis.param as param\n", - "import lucid.optvis.render as render\n", - "import lucid.optvis.transform as transform\n", - "from lucid.misc.io import show, load\n", - "from lucid.misc.io.reading import read\n", - "from lucid.misc.io.showing import _image_url, _display_html\n", - "import lucid.scratch.web.svelte as lucid_svelte" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 3 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "\u001b[K\u001b[?25h/tools/node/bin/svelte -> /tools/node/lib/node_modules/svelte-cli/bin.js\n", - "/tools/node/lib\n", - "└─┬ \u001b[40m\u001b[33msvelte-cli@2.2.0\u001b[39m\u001b[49m \n", - " └── \u001b[40m\u001b[33msvelte@1.56.2\u001b[39m\u001b[49m \n", - "\n" - ], - "name": "stdout" - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/sunglasses_tux.png\")\n", + "channel_attr_simple(model, img, \"mixed4d\", \"bow tie\", \"sunglasses\", n_show=3)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "uMXGSQ_M44wQ" + }, + "source": [ + "# Bigger channel attribution!!!" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 918, + "output_extras": [ + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 1150, + "status": "ok", + "timestamp": 1519789270381, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "7ORVc1p_5SGX", + "outputId": "6ed872fc-18f7-44f9-bead-2fe594489536" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] }, { - "metadata": { - "id": "FWHqimIqk2Bs", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "model = models.InceptionV1()\n", - "model.load_graphdef()" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] }, { - "metadata": { - "id": "f6TI0Rqz4P7z", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Setup (feel free to skip)" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "channel_attr_simple(model, img, \"mixed4d\", \"Labrador retriever\", \"tiger cat\", n_show=30)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RxHkhWLu5QBX" + }, + "source": [ + "# Channel Attribution - Path Integrated" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "8hCsSL134dPM" + }, + "outputs": [], + "source": [ + "def channel_attr_path(\n", + " model, img, layer, class1, class2, n_show=4, stochastic_path=False, N=100\n", + "):\n", + "\n", + " # Set up a graph for doing attribution\n", + " with tf.Graph().as_default(), tf.Session() as sess:\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts = T(layer).eval()\n", + "\n", + " # Compute gradient\n", + " logit = T(\"softmax2_pre_activation\")[0]\n", + " score = score_f(model, logit, class1) - score_f(model, logit, class2)\n", + " t_grad = tf.gradients([score], [T(layer)])[0]\n", + "\n", + " # Integrate on a path from acts=0 to acts=acts\n", + " attr = np.zeros(acts.shape[1:])\n", + " # acts_ = [acts * 0/N ; acts * 1/N; ... ; acts * (N-1)/N]\n", + " \n", + " n_channels = model.get_layer(layer).depth\n", + " \n", + " for n in range(N):\n", + " acts_ = acts * float(n) / N\n", + " if stochastic_path:\n", + " acts_ *= (\n", + " np.random.uniform(0, 1, [n_channels]) + np.random.uniform(0, 1, [n_channels])\n", + " ) / 1.5 # vector of dim n_channels containing values in [0, 1.33]\n", + " grad = t_grad.eval({T(layer): acts_})\n", + " attr += 1.0 / N * (grad * acts)[0] # mean of attributions with different value of n\n", + "\n", + " # Then we reduce down to channels.\n", + " channel_attr = attr.sum(0).sum(0)\n", + "\n", + " # Now we just need to present the results.\n", + "\n", + " # Get spritemaps\n", + "\n", + " spritemap_n, spritemap_url = spritemap(layer)\n", + "\n", + " # Let's show the distribution of attributions\n", + " print(\"Distribution of attribution accross channels:\")\n", + " print(\"\")\n", + " lucid_svelte.BarsWidget({\"vals\": [float(v) for v in np.sort(channel_attr)[::-1]]})\n", + "\n", + " # Let's pick the most extreme channels to show\n", + " ns_pos = list(np.argsort(-channel_attr)[:n_show])\n", + " ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])\n", + "\n", + " # ... and show them with ChannelAttrWidget\n", + " print(\"\")\n", + " print(\"Top\", n_show, \"channels in each direction:\")\n", + " print(\"\")\n", + " lucid_svelte.ChannelAttrWidget(\n", + " {\n", + " \"spritemap_url\": spritemap_url,\n", + " \"sprite_size\": 110,\n", + " \"sprite_n_wrap\": spritemap_n,\n", + " \"attrsPos\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_pos],\n", + " \"attrsNeg\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_neg]\n", + " }\n", + " )\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "_GLvytGjhs2O" + }, + "outputs": [], + "source": [ + "def compare_attr_methods(model, img, layer_name, class1, class2, n_show):\n", + " _display_html(\"

Linear Attribution

\")\n", + " channel_attr_simple(model, img, layer_name, class1, class2, n_show=n_show)\n", + "\n", + " _display_html(\"

Path Integrated Attribution

\")\n", + " channel_attr_path(model, img, layer_name, class1, class2, n_show=n_show)\n", + "\n", + " _display_html(\"

Stochastic Path Integrated Attribution

\")\n", + " channel_attr_path(model, img, layer_name, class1, class2, n_show=n_show, stochastic_path=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 1466, + "output_extras": [ + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 9286, + "status": "ok", + "timestamp": 1520295451852, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 }, + "id": "iuBg_tyi6TFt", + "outputId": "d08427a4-3546-4a79-efff-6c14e38eef6f" + }, + "outputs": [ { - "metadata": { - "id": "g6V3qvLk4nTq", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "**ChannelAttrWidget**\n", - "\n", - "Let's make a little widget for showing all our channels and attribution values." + "data": { + "text/html": [ + "

Linear Attribution

" + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "lZRgaVagH_pC", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 99 - }, - "outputId": "3cf0f1c0-bebe-4fd6-9fe3-a17f787cd606", - "executionInfo": { - "status": "ok", - "timestamp": 1520295353087, - "user_tz": 480, - "elapsed": 690, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "%%html_define_svelte ChannelAttrWidget\n", - "\n", - "
\n", - "
\n", - " {{#each attrsPos as attr}}\n", - "
\n", - "
\n", - "
0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)\">{{attr.v}}
\n", - "
\n", - " {{/each}}\n", - " {{#if attrsPos.length > 5}}\n", - "
\n", - "
\n", - " {{/if}}\n", - "
...
\n", - " {{#each attrsNeg as attr}}\n", - "
\n", - "
\n", - "
0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)\">{{attr.v}}
\n", - "
\n", - " {{/each}}\n", - "
\n", - "
\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "" + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_8NKFID/ChannelAttrWidget_105f5be.html > /tmp/svelte_8NKFID/ChannelAttrWidget_105f5be.js\n", - "svelte version 1.56.2\n", - "compiling ../tmp/svelte_8NKFID/ChannelAttrWidget_105f5be.html...\n", - "\n" - ], - "name": "stdout" - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] }, { - "metadata": { - "id": "ku6hGbYmiQNI", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "**BarsWidget**\n", - "\n", - "It would also be nice to see the distribution of attribution magnitudes. Let's make another widget for that." + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "

Path Integrated Attribution

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "

Stochastic Path Integrated Attribution

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "\n", + "compare_attr_methods(model, img, \"mixed4d\", \"Labrador retriever\", \"tiger cat\", n_show=30)" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 1576, + "output_extras": [ + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 9472, + "status": "ok", + "timestamp": 1520295472485, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 }, + "id": "Grb1kE1uJhJj", + "outputId": "cf43bdef-0090-4414-ef95-2de9d7db435c" + }, + "outputs": [ { - "metadata": { - "id": "X6TFCwbQhre2", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {} - ], - "base_uri": "https://localhost:8080/", - "height": 99 - }, - "outputId": "c3bb1ea8-4027-4b4b-d72f-a152de346ec1", - "executionInfo": { - "status": "ok", - "timestamp": 1520295354181, - "user_tz": 480, - "elapsed": 604, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "%%html_define_svelte BarsWidget\n", - "\n", - "
\n", - "
\n", - " {{#each vals as val}}\n", - "
0)? 210 : 0}}, {{Math.max(90, 110*Math.abs(val)/1.8)}}%, {{Math.min(80, 100-40*Math.abs(val)/1.8)}}%);\">\n", - "
\n", - " {{/each}}\n", - "
\n", - "
\n", - "
\n", - "\n", - "\n", - "\n", - "\n", - "" + "data": { + "text/html": [ + "

Linear Attribution

" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_8NKFID/BarsWidget_282a79.html > /tmp/svelte_8NKFID/BarsWidget_282a79.js\n", - "svelte version 1.56.2\n", - "compiling ../tmp/svelte_8NKFID/BarsWidget_282a79.html...\n", - "\n" - ], - "name": "stdout" - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] }, { - "metadata": { - "id": "F4l9Ki-UoVko", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "## **Spritemaps**\n", - "\n", - "In order to show the channels, we need \"spritemaps\" of channel visualizations.\n", - "These visualization spritemaps are large grids of images (such as [this one](https://storage.googleapis.com/lucid-static/building-blocks/sprite_mixed4d_channel.jpeg)) that visualize every channel in a layer.\n", - "We provide spritemaps for GoogLeNet because making them takes a few hours of GPU time, but\n", - "you can make your own channel spritemaps to explore other models. Check out other notebooks on how to\n", - "make your own neuron visualizations.\n", - "\n", - "It's also worth noting that GoogLeNet has unusually semantically meaningful neurons. We don't know why this is -- although it's an active area of research for us. More sophisticated interfaces, such as neuron groups, may work better for networks where meaningful ideas are more entangled or less aligned with the neuron directions." + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] }, { - "metadata": { - "id": "BpGLiyEEoPfB", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "layer_spritemap_sizes = {\n", - " 'mixed3a' : 16,\n", - " 'mixed3b' : 21,\n", - " 'mixed4a' : 22,\n", - " 'mixed4b' : 22,\n", - " 'mixed4c' : 22,\n", - " 'mixed4d' : 22,\n", - " 'mixed4e' : 28,\n", - " 'mixed5a' : 28,\n", - " }\n", - "\n", - "def googlenet_spritemap(layer):\n", - " assert layer in layer_spritemap_sizes\n", - " size = layer_spritemap_sizes[layer]\n", - " url = \"https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg\" % layer\n", - " return size, url" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "5vifBdxqijXX", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "**Attribution Code**" + "data": { + "text/html": [ + "

Path Integrated Attribution

" + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "CGKiP5Pr2pci", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def score_f(logit, name):\n", - " if name is None:\n", - " return 0\n", - " elif name == \"logsumexp\":\n", - " base = tf.reduce_max(logit)\n", - " return base + tf.log(tf.reduce_sum(tf.exp(logit-base)))\n", - " elif name in model.labels:\n", - " return logit[model.labels.index(name)]\n", - " else:\n", - " raise RuntimeError(\"Unsupported\")\n", - "\n", - "def channel_attr_simple(img, layer, class1, class2, n_show=4):\n", - "\n", - " # Set up a graph for doing attribution...\n", - " with tf.Graph().as_default(), tf.Session() as sess:\n", - " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", - " T = render.import_model(model, t_input, t_input)\n", - " \n", - " # Compute activations\n", - " acts = T(layer).eval()\n", - " \n", - " # Compute gradient\n", - " logit = T(\"softmax2_pre_activation\")[0]\n", - " score = score_f(logit, class1) - score_f(logit, class2)\n", - " t_grad = tf.gradients([score], [T(layer)])[0]\n", - " grad = t_grad.eval()\n", - " \n", - " # Let's do a very simple linear approximation attribution.\n", - " # That is, we say the attribution of y to x is \n", - " # the rate at which x changes y times the value of x.\n", - " attr = (grad*acts)[0]\n", - " \n", - " # Then we reduce down to channels.\n", - " channel_attr = attr.sum(0).sum(0)\n", - "\n", - " # Now we just need to present the results.\n", - " \n", - " # Get spritemaps\n", - " \n", - " \n", - " spritemap_n, spritemap_url = googlenet_spritemap(layer)\n", - " \n", - " # Let's show the distribution of attributions\n", - " print \"Distribution of attribution accross channels:\"\n", - " print \"\"\n", - " lucid_svelte.BarsWidget({\"vals\" : [float(v) for v in np.sort(channel_attr)[::-1]]})\n", - "\n", - " # Let's pick the most extreme channels to show\n", - " ns_pos = list(np.argsort(-channel_attr)[:n_show])\n", - " ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])\n", - " \n", - " # ... and show them with ChannelAttrWidget\n", - " print \"\"\n", - " print \"Top\", n_show, \"channels in each direction:\"\n", - " print \"\"\n", - " lucid_svelte.ChannelAttrWidget({\n", - " \"spritemap_url\": spritemap_url,\n", - " \"sprite_size\": 110,\n", - " \"sprite_n_wrap\": spritemap_n,\n", - " \"attrsPos\": [{\"n\": n, \"v\": str(float(channel_attr[n]))[:5]} for n in ns_pos],\n", - " \"attrsNeg\": [{\"n\": n, \"v\": str(float(channel_attr[n]))[:5]} for n in ns_neg] \n", - " })\n" + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "GSeU39MD4Uqu", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Channel attributions from article teaser" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "xUIODkiA3Eg8", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 270 - }, - "outputId": "1be77417-dad1-458a-a077-93467f874ed6", - "executionInfo": { - "status": "ok", - "timestamp": 1520295392749, - "user_tz": 480, - "elapsed": 998, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "channel_attr_simple(img, \"mixed4d\", \"Labrador retriever\", \"tiger cat\", n_show=3)" + "data": { + "text/html": [ + "

Stochastic Path Integrated Attribution

" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 3 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "VZcAlV3O4vuQ", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 295 - }, - "outputId": "9e567c9c-9c94-41f6-fd58-337ca2388db7", - "executionInfo": { - "status": "ok", - "timestamp": 1520295364539, - "user_tz": 480, - "elapsed": 1224, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")\n", - "channel_attr_simple(img, \"mixed4d\", \"vase\", \"lemon\", n_show=3)" + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 3 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "bz3I78QH4sKd", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 356 - }, - "outputId": "6616b624-d458-4ab4-c793-18dd935a5650", - "executionInfo": { - "status": "ok", - "timestamp": 1519789269196, - "user_tz": 480, - "elapsed": 1169, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/sunglasses_tux.png\")\n", - "channel_attr_simple(img, \"mixed4d\", \"bow tie\", \"sunglasses\", n_show=3)" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 3 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")\n", + "\n", + "compare_attr_methods(model, img,\"mixed4d\", \"Labrador retriever\", \"tiger cat\", n_show=30)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 1545, + "output_extras": [ + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 9736, + "status": "ok", + "timestamp": 1520295615428, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 }, + "id": "Zdz_B1nM-IgW", + "outputId": "4e0e59b2-1583-4e2d-dc80-24a996506e70" + }, + "outputs": [ { - "metadata": { - "id": "uMXGSQ_M44wQ", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Bigger channel attribution!!!" + "data": { + "text/html": [ + "

Linear Attribution

" + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] }, { - "metadata": { - "id": "7ORVc1p_5SGX", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 918 - }, - "outputId": "6ed872fc-18f7-44f9-bead-2fe594489536", - "executionInfo": { - "status": "ok", - "timestamp": 1519789270381, - "user_tz": 480, - "elapsed": 1150, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "channel_attr_simple(img, \"mixed4d\", \"Labrador retriever\", \"tiger cat\", n_show=30)" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 30 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "RxHkhWLu5QBX", - "colab_type": "text" - }, - "cell_type": "markdown", - "source": [ - "# Channel Attribution - Path Integrated" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "8hCsSL134dPM", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def channel_attr_path(img, layer, class1, class2, n_show=4, stochastic_path=False, N = 100):\n", - "\n", - " # Set up a graph for doing attribution...\n", - " with tf.Graph().as_default(), tf.Session() as sess:\n", - " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", - " T = render.import_model(model, t_input, t_input)\n", - " \n", - " # Compute activations\n", - " acts = T(layer).eval()\n", - " \n", - " # Compute gradient\n", - " logit = T(\"softmax2_pre_activation\")[0]\n", - " score = score_f(logit, class1) - score_f(logit, class2)\n", - " t_grad = tf.gradients([score], [T(layer)])[0]\n", - "\n", - " \n", - " # Inegrate on a path from acts=0 to acts=acts\n", - " attr = np.zeros(acts.shape[1:])\n", - " for n in range(N):\n", - " acts_ = acts * float(n) / N\n", - " if stochastic_path:\n", - " acts_ *= (np.random.uniform(0, 1, [528])+np.random.uniform(0, 1, [528]))/1.5\n", - " grad = t_grad.eval({T(layer): acts_})\n", - " attr += 1.0 / N * (grad*acts)[0]\n", - " \n", - " # Then we reduce down to channels.\n", - " channel_attr = attr.sum(0).sum(0)\n", - "\n", - " # Now we just need to present the results.\n", - " \n", - " # Get spritemaps\n", - " \n", - " \n", - " spritemap_n, spritemap_url = googlenet_spritemap(layer)\n", - " \n", - " # Let's show the distribution of attributions\n", - " print \"Distribution of attribution accross channels:\"\n", - " print \"\"\n", - " lucid_svelte.BarsWidget({\"vals\" : [float(v) for v in np.sort(channel_attr)[::-1]]})\n", - "\n", - " # Let's pick the most extreme channels to show\n", - " ns_pos = list(np.argsort(-channel_attr)[:n_show])\n", - " ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])\n", - " \n", - " # ... and show them with ChannelAttrWidget\n", - " print \"\"\n", - " print \"Top\", n_show, \"channels in each direction:\"\n", - " print \"\"\n", - " lucid_svelte.ChannelAttrWidget({\n", - " \"spritemap_url\": spritemap_url,\n", - " \"sprite_size\": 110,\n", - " \"sprite_n_wrap\": spritemap_n,\n", - " \"attrsPos\": [{\"n\": n, \"v\": str(float(channel_attr[n]))[:5]} for n in ns_pos],\n", - " \"attrsNeg\": [{\"n\": n, \"v\": str(float(channel_attr[n]))[:5]} for n in ns_neg] \n", - " })\n" + "data": { + "text/html": [ + "

Path Integrated Attribution

" ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] }, { - "metadata": { - "id": "_GLvytGjhs2O", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "def compare_attr_methods(img, class1, class2):\n", - " \n", - " _display_html(\"

Linear Attribution

\")\n", - " channel_attr_simple(img, \"mixed4d\", class1, class2, n_show=10)\n", - "\n", - " _display_html(\"

Path Integrated Attribution

\")\n", - " channel_attr_path(img, \"mixed4d\", class1, class2, n_show=10)\n", - " \n", - " _display_html(\"

Stochastic Path Integrated Attribution

\")\n", - " channel_attr_path(img, \"mixed4d\", class1, class2, n_show=10, stochastic_path=True)" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] }, { - "metadata": { - "id": "iuBg_tyi6TFt", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 1466 - }, - "outputId": "d08427a4-3546-4a79-efff-6c14e38eef6f", - "executionInfo": { - "status": "ok", - "timestamp": 1520295451852, - "user_tz": 480, - "elapsed": 9286, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "\n", - "compare_attr_methods(img, \"Labrador retriever\", \"tiger cat\")" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Linear Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Path Integrated Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Stochastic Path Integrated Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "Grb1kE1uJhJj", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 1576 - }, - "outputId": "cf43bdef-0090-4414-ef95-2de9d7db435c", - "executionInfo": { - "status": "ok", - "timestamp": 1520295472485, - "user_tz": 480, - "elapsed": 9472, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png\")\n", - "\n", - "compare_attr_methods(img, \"vase\", \"lemon\")" + "data": { + "text/html": [ + "

Stochastic Path Integrated Attribution

" ], - "execution_count": 0, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Linear Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Path Integrated Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Stochastic Path Integrated Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] }, { - "metadata": { - "id": "Zdz_B1nM-IgW", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "output_extras": [ - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {}, - {} - ], - "base_uri": "https://localhost:8080/", - "height": 1545 - }, - "outputId": "4e0e59b2-1583-4e2d-dc80-24a996506e70", - "executionInfo": { - "status": "ok", - "timestamp": 1520295615428, - "user_tz": 480, - "elapsed": 9736, - "user": { - "displayName": "Christopher Olah", - "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", - "userId": "104171973056281402320" - } - } - }, - "cell_type": "code", - "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/pig.jpeg\")\n", - "\n", - "compare_attr_methods(img, \"hog\", \"dalmatian\")\n" + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [ - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Linear Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Path Integrated Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "

Stochastic Path Integrated Attribution

" - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "Distribution of attribution accross channels:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - }, - { - "output_type": "stream", - "text": [ - "\n", - "Top 10 channels in each direction:\n", - "\n" - ], - "name": "stdout" - }, - { - "output_type": "display_data", - "data": { - "text/plain": [ - "" - ], - "text/html": [ - "\n", - "
\n", - " \n", - " \n", - " " - ] - }, - "metadata": { - "tags": [] - } - } + "text/plain": [ + "" ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "metadata": { - "id": "YzKpcGVPhcsD", - "colab_type": "code", - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - } - } - }, - "cell_type": "code", - "source": [ - "" + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 30 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " ], - "execution_count": 0, - "outputs": [] + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } - ] -} \ No newline at end of file + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/pig.jpeg\")\n", + "\n", + "compare_attr_methods(model, img, \"mixed4d\", \"Labrador retriever\", \"tiger cat\", n_show=30)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "YzKpcGVPhcsD" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "Channel Attribution - Building Blocks of Interpretability", + "provenance": [], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 94d9354c899eef0999e24d8f577e81471a4f496f Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:54:26 +0000 Subject: [PATCH 13/57] adding a Jupyter version of the notebook Channel Attribution --- .../jupyter_versions/AttrChannelJupyter.ipynb | 4877 +++++++++++++++++ 1 file changed, 4877 insertions(+) create mode 100644 notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb diff --git a/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb new file mode 100644 index 00000000..a7752e59 --- /dev/null +++ b/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb @@ -0,0 +1,4877 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "##### Copyright 2018 Google LLC.\n", + "\n", + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pNqKk1MmrakH" + }, + "source": [ + "# Channel Attribution -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "This notebook demonstrates **Channel Attribution**, a technique for exploring how different detectors in the network effected its output.\n", + "\n", + "
\n", + "\n", + "
\n", + "\n", + "This tutorial is based on [**Lucid**](https://github.com/tensorflow/lucid), a network for visualizing neural networks. Lucid is a kind of spiritual successor to DeepDream, but provides flexible abstractions so that it can be used for a wide range of interpretability research.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!\n", + "\n", + "#### **This notebook is a Jupyter version of the original Google Colab Notebook. This version adds widgets to facilitate the use of Lucid on your own images.**" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 99, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 13764, + "status": "ok", + "timestamp": 1520295348008, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "AA17rJBLuyYH", + "outputId": "3ca56c02-9c8c-412e-bed4-49625c959af8" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" + ] + } + ], + "source": [ + "# !npm install -g svelte-cli@2.2.0\n", + "!pip install ipyfilechooser --quiet\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "\n", + "from ipyfilechooser import FileChooser\n", + "import ipywidgets as widgets\n", + "from IPython.core.display import display, HTML\n", + "from pathlib import Path\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.objectives as objectives\n", + "import lucid.optvis.param as param\n", + "import lucid.optvis.render as render\n", + "import lucid.optvis.transform as transform\n", + "from lucid.misc.io import show, load\n", + "from lucid.misc.io.reading import read\n", + "from lucid.misc.io.showing import _image_url, _display_html\n", + "import lucid.scratch.web.svelte as lucid_svelte" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "FWHqimIqk2Bs" + }, + "outputs": [], + "source": [ + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "xIDcG0vjaDtk" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + } + ], + "source": [ + "labels_str = read(\"https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt\",mode='r')\n", + "labels = [line[line.find(\" \"):].strip() for line in labels_str.split(\"\\n\")]\n", + "labels = [label[label.find(\" \"):].strip().replace(\"_\", \" \") for label in labels]\n", + "labels = sorted([\"dummy\"] + labels)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "g6V3qvLk4nTq" + }, + "source": [ + "**ChannelAttrWidget**\n", + "\n", + "Let's make a little widget for showing all our channels and attribution values." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 99, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 690, + "status": "ok", + "timestamp": 1520295353087, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "lZRgaVagH_pC", + "outputId": "3cf0f1c0-bebe-4fd6-9fe3-a17f787cd606" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_twnny2bj/ChannelAttrWidget_2c0e7508_1aff_491a_98dd_2044d7a4a6ee.html > /tmp/svelte_twnny2bj/ChannelAttrWidget_2c0e7508_1aff_491a_98dd_2044d7a4a6ee.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_twnny2bj/ChannelAttrWidget_2c0e7508_1aff_491a_98dd_2044d7a4a6ee.html...\\n'\n" + ] + } + ], + "source": [ + "%%html_define_svelte ChannelAttrWidget\n", + "\n", + "
\n", + "
\n", + " {{#each attrsPos as attr}}\n", + "
\n", + "
\n", + "
0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)\">{{attr.v}}
\n", + "
\n", + " {{/each}}\n", + " {{#if attrsPos.length > 5}}\n", + "
\n", + "
\n", + " {{/if}}\n", + "
...
\n", + " {{#each attrsNeg as attr}}\n", + "
\n", + "
\n", + "
0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)\">{{attr.v}}
\n", + "
\n", + " {{/each}}\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "ku6hGbYmiQNI" + }, + "source": [ + "**BarsWidget**\n", + "\n", + "It would also be nice to see the distribution of attribution magnitudes. Let's make another widget for that." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 99, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 604, + "status": "ok", + "timestamp": 1520295354181, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "X6TFCwbQhre2", + "outputId": "c3bb1ea8-4027-4b4b-d72f-a152de346ec1" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Trying to build svelte component from html...\n", + "svelte compile --format iife /tmp/svelte_twnny2bj/BarsWidget_2f4d23a9_8b29_42bf_b86f_fc6b0f5697d7.html > /tmp/svelte_twnny2bj/BarsWidget_2f4d23a9_8b29_42bf_b86f_fc6b0f5697d7.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_twnny2bj/BarsWidget_2f4d23a9_8b29_42bf_b86f_fc6b0f5697d7.html...\\n'\n" + ] + } + ], + "source": [ + "%%html_define_svelte BarsWidget\n", + "\n", + "
\n", + "
\n", + " {{#each vals as val}}\n", + "
0)? 210 : 0}}, {{Math.max(90, 110*Math.abs(val)/1.8)}}%, {{Math.min(80, 100-40*Math.abs(val)/1.8)}}%);\">\n", + "
\n", + " {{/each}}\n", + "
\n", + "
\n", + "
\n", + "\n", + "\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "F4l9Ki-UoVko" + }, + "source": [ + "## **Spritemaps**\n", + "\n", + "In order to show the channels, we need \"spritemaps\" of channel visualizations.\n", + "These visualization spritemaps are large grids of images (such as [this one](https://storage.googleapis.com/lucid-static/building-blocks/sprite_mixed4d_channel.jpeg)) that visualize every channel in a layer.\n", + "We provide spritemaps for GoogLeNet because making them takes a few hours of GPU time, but\n", + "you can make your own channel spritemaps to explore other models. Check out other notebooks on how to\n", + "make your own neuron visualizations.\n", + "\n", + "It's also worth noting that GoogLeNet has unusually semantically meaningful neurons. We don't know why this is -- although it's an active area of research for us. More sophisticated interfaces, such as neuron groups, may work better for networks where meaningful ideas are more entangled or less aligned with the neuron directions." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "BpGLiyEEoPfB" + }, + "outputs": [], + "source": [ + "layer_spritemap_sizes = {\n", + " 'mixed3a' : 16,\n", + " 'mixed3b' : 21,\n", + " 'mixed4a' : 22,\n", + " 'mixed4b' : 22,\n", + " 'mixed4c' : 22,\n", + " 'mixed4d' : 22,\n", + " 'mixed4e' : 28,\n", + " 'mixed5a' : 28,\n", + " }\n", + "\n", + "def spritemap(layer):\n", + " assert layer in layer_spritemap_sizes\n", + " size = layer_spritemap_sizes[layer]\n", + " url = \"https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg\" % layer\n", + " return size, url" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "5vifBdxqijXX" + }, + "source": [ + "**Attribution Code**" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "CGKiP5Pr2pci" + }, + "outputs": [], + "source": [ + "def score_f(model, logit, name):\n", + " if name is None:\n", + " return 0\n", + " elif name == \"logsumexp\":\n", + " base = tf.reduce_max(logit)\n", + " return base + tf.log(tf.reduce_sum(tf.exp(logit-base)))\n", + " elif name in model.labels:\n", + " return logit[model.labels.index(name)]\n", + " else:\n", + " raise RuntimeError(\"Unsupported\")\n", + "\n", + "def channel_attr_simple(model, img, layer, class1, class2, n_show=4):\n", + "\n", + " # Set up a graph for doing attribution...\n", + " with tf.Graph().as_default(), tf.Session() as sess:\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts = T(layer).eval()\n", + "\n", + " # Compute gradient\n", + " # (Adapt the softmax layer to to your model)\n", + " logit = T(\"softmax2_pre_activation\")[0]\n", + " score = score_f(model, logit, class1) - score_f(model, logit, class2)\n", + " t_grad = tf.gradients([score], [T(layer)])[0]\n", + " grad = t_grad.eval()\n", + "\n", + " # Let's do a very simple linear approximation attribution.\n", + " # That is, we say the attribution of y to x is \n", + " # the rate at which x changes y (grad of x on y) \n", + " # times the value of x. (activation of x)\n", + " attr = (grad*acts)[0]\n", + "\n", + " # Then we reduce down to channels.\n", + " channel_attr = attr.sum(0).sum(0)\n", + "\n", + " # Now we just need to present the results.\n", + "\n", + " # Get spritemaps\n", + "\n", + "\n", + " spritemap_n, spritemap_url = spritemap(layer)\n", + "\n", + " # Let's show the distribution of attributions\n", + " print(\"Distribution of attribution accross channels:\")\n", + " print(\"\")\n", + " lucid_svelte.BarsWidget({\"vals\" : [float(v) for v in np.sort(channel_attr)[::-1]]})\n", + "\n", + " # Let's pick the most extreme channels to show\n", + " ns_pos = list(np.argsort(-channel_attr)[:n_show])\n", + " ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])\n", + "\n", + " # ... and show them with ChannelAttrWidget\n", + " print(\"\")\n", + " print(\"Top\", n_show, \"channels in each direction:\")\n", + " print(\"\")\n", + " lucid_svelte.ChannelAttrWidget({\n", + " \"spritemap_url\": spritemap_url,\n", + " \"sprite_size\": 110,\n", + " \"sprite_n_wrap\": spritemap_n,\n", + " \"attrsPos\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_pos],\n", + " \"attrsNeg\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_neg] \n", + " })\n" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Upload file from local machine and select uploading path (A) or just select one file (B):\n", + "A1) Select a file to upload\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "51ddf7efbd8748caa06b0128ef997fc7", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileUpload(value={}, description='Upload')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "A2) Select destination for uploaded file\n", + "B) Select file in this server\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d7a19b5bf12440f697f5badfd19d4d73", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "FileChooser(path='.', filename='', show_hidden='False')" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the layer \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "c8c34f45157844d196a215be30b4805d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Layers', index=3, options=('conv2d0', 'conv2d1', 'conv2d2', 'mixed3a', 'mixed3b', 'mixed…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the first class whose influence is being studied: \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "cfbd61064df7424bbc8f0c336e7d214b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Classes of documents', index=2, options=('Afghan hound', 'African chameleon', 'African c…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Select the second class whose influence is being studied: \n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4b225755301349f0b0295024ee248273", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(description='Classes of documents', index=1, options=('Afghan hound', 'African chameleon', 'African c…" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3f7d510febae44099b3cd0e401339b91", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "IntSlider(value=3, description='Number of features to display :', layout=Layout(width='70%'), max=30, min=1)" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "print(\n", + " \"Upload file from local machine and select uploading path (A) or just select one file (B):\"\n", + ")\n", + "print(\"A1) Select a file to upload\")\n", + "uploader = widgets.FileUpload(accept='', multiple=False)\n", + "display(uploader)\n", + "\n", + "print(\"\\nA2) Select destination for uploaded file\")\n", + "print(\"B) Select file in this server\")\n", + "notebooks_root_path = \"\"\n", + "fc = FileChooser(\".\",\n", + " use_dir_icons=True,\n", + " select_default=True)\n", + "display(fc)\n", + "\n", + "layers_list = [layer.name for layer in model.layers]\n", + "print(\"\\nSelect the layer \")\n", + "layers_widget = widgets.Dropdown(\n", + " options=layers_list,\n", + " value=layers_list[3],\n", + " description='Layers'\n", + ")\n", + "display(layers_widget)\n", + "\n", + "print(\"\\nSelect the first class whose influence is being studied: \")\n", + "classes_widget = widgets.Dropdown(\n", + " options=labels,\n", + " value=labels[2],\n", + " description='Classes of documents'\n", + ")\n", + "display(classes_widget)\n", + "\n", + "print(\"\\nSelect the second class whose influence is being studied: \")\n", + "classes_widget_bis = widgets.Dropdown(\n", + " options=labels,\n", + " value=labels[1],\n", + " description='Classes of documents'\n", + ")\n", + "display(classes_widget_bis)\n", + "\n", + "# Selection of number of images to display\n", + "slider = widgets.IntSlider(\n", + " value=3,\n", + " min=1,\n", + " max=30,\n", + " step=1,\n", + " description='Number of features to display :',\n", + " layout=widgets.Layout(width='70%')\n", + ")\n", + "display(slider)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "if uploader.value: # upload local file to server\n", + " picture_name = uploader.value[0]\n", + " content = uploader.value[picture_name]['content'] # memoryview of the file\n", + " picture_path = os.path.join(fc.selected_path, picture_name)\n", + " with open(picture_name, 'wb') as f:\n", + " f.write(content)\n", + "else: # use files already on the server\n", + " picture_path = fc.selected\n", + " \n", + "layer_name = layers_widget.value # layers to use semantic dictionnary on\n", + "\n", + "class_name_1 = classes_widget.value # layers to use semantic dictionnary on\n", + "class_name_2 = classes_widget_bis.value # layers to use semantic dictionnary on\n", + "\n", + "n_show = slider.value" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "GSeU39MD4Uqu" + }, + "source": [ + "# Channel attributions from article teaser" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 270, + "output_extras": [ + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 998, + "status": "ok", + "timestamp": 1520295392749, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "xUIODkiA3Eg8", + "outputId": "1be77417-dad1-458a-a077-93467f874ed6" + }, + "outputs": [ + { + "data": { + "text/html": [ + "

Legend :

African crocodile
African chameleon
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 3 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "\n", + "legend = \"

Legend :

\"\n", + "legend += \"
%s
\" % class_name_1\n", + "legend += \"
%s
\" % class_name_2\n", + "display(HTML(legend))\n", + "\n", + "channel_attr_simple(model, img, layer_name, class_name_1, class_name_2, n_show=n_show)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "RxHkhWLu5QBX" + }, + "source": [ + "# Channel Attribution - Path Integrated" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "8hCsSL134dPM" + }, + "outputs": [], + "source": [ + "def channel_attr_path(\n", + " model, img, layer, class1, class2, n_show=4, stochastic_path=False, N=100\n", + "):\n", + "\n", + " # Set up a graph for doing attribution\n", + " with tf.Graph().as_default(), tf.Session() as sess:\n", + " t_input = tf.placeholder_with_default(img, [None, None, 3])\n", + " T = render.import_model(model, t_input, t_input)\n", + "\n", + " # Compute activations\n", + " acts = T(layer).eval()\n", + "\n", + " # Compute gradient\n", + " logit = T(\"softmax2_pre_activation\")[0]\n", + " score = score_f(model, logit, class1) - score_f(model, logit, class2)\n", + " t_grad = tf.gradients([score], [T(layer)])[0]\n", + "\n", + " # Integrate on a path from acts=0 to acts=acts\n", + " attr = np.zeros(acts.shape[1:])\n", + " # acts_ = [acts * 0/N ; acts * 1/N; ... ; acts * (N-1)/N]\n", + " \n", + " n_channels = model.get_layer(layer).depth\n", + " \n", + " for n in range(N):\n", + " acts_ = acts * float(n) / N\n", + " if stochastic_path:\n", + " acts_ *= (\n", + " np.random.uniform(0, 1, [n_channels]) + np.random.uniform(0, 1, [n_channels])\n", + " ) / 1.5 # vector of dim n_channels containing values in [0, 1.33]\n", + " grad = t_grad.eval({T(layer): acts_})\n", + " attr += 1.0 / N * (grad * acts)[0] # mean of attributions with different value of n\n", + "\n", + " # Then we reduce down to channels.\n", + " channel_attr = attr.sum(0).sum(0)\n", + "\n", + " # Now we just need to present the results.\n", + "\n", + " # Get spritemaps\n", + "\n", + " spritemap_n, spritemap_url = spritemap(layer)\n", + "\n", + " # Let's show the distribution of attributions\n", + " print(\"Distribution of attribution accross channels:\")\n", + " print(\"\")\n", + " lucid_svelte.BarsWidget({\"vals\": [float(v) for v in np.sort(channel_attr)[::-1]]})\n", + "\n", + " # Let's pick the most extreme channels to show\n", + " ns_pos = list(np.argsort(-channel_attr)[:n_show])\n", + " ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])\n", + "\n", + " # ... and show them with ChannelAttrWidget\n", + " print(\"\")\n", + " print(\"Top\", n_show, \"channels in each direction:\")\n", + " print(\"\")\n", + " lucid_svelte.ChannelAttrWidget(\n", + " {\n", + " \"spritemap_url\": spritemap_url,\n", + " \"sprite_size\": 110,\n", + " \"sprite_n_wrap\": spritemap_n,\n", + " \"attrsPos\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_pos],\n", + " \"attrsNeg\": [{\"n\": int(n), \"v\": str(float(channel_attr[n]))[:5]} for n in ns_neg]\n", + " }\n", + " )\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "_GLvytGjhs2O" + }, + "outputs": [], + "source": [ + "def compare_attr_methods(model, img, layer_name, class1, class2, n_show):\n", + " _display_html(\"

Linear Attribution

\")\n", + " channel_attr_simple(model, img, layer_name, class1, class2, n_show=n_show)\n", + "\n", + " _display_html(\"

Path Integrated Attribution

\")\n", + " channel_attr_path(model, img, layer_name, class1, class2, n_show=n_show)\n", + "\n", + " _display_html(\"

Stochastic Path Integrated Attribution

\")\n", + " channel_attr_path(model, img, layer_name, class1, class2, n_show=n_show, stochastic_path=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 1466, + "output_extras": [ + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {}, + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 9286, + "status": "ok", + "timestamp": 1520295451852, + "user": { + "displayName": "Christopher Olah", + "photoUrl": "//lh5.googleusercontent.com/-GhJP0RTFLEs/AAAAAAAAAAI/AAAAAAAAEZ8/wDVK-lwJYfA/s50-c-k-no/photo.jpg", + "userId": "104171973056281402320" + }, + "user_tz": 480 + }, + "id": "iuBg_tyi6TFt", + "outputId": "d08427a4-3546-4a79-efff-6c14e38eef6f" + }, + "outputs": [ + { + "data": { + "text/html": [ + "

Legend :

African crocodile
African chameleon
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "

Linear Attribution

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 3 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "

Path Integrated Attribution

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 3 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "

Stochastic Path Integrated Attribution

" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Distribution of attribution accross channels:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Top 3 channels in each direction:\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "\n", + "legend = \"

Legend :

\"\n", + "legend += \"
%s
\" % class_name_1\n", + "legend += \"
%s
\" % class_name_2\n", + "display(HTML(legend))\n", + "\n", + "compare_attr_methods(model, img, layer_name, class_name_1, class_name_2, n_show=n_show)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "YzKpcGVPhcsD" + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "Channel Attribution - Building Blocks of Interpretability", + "provenance": [], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 266fb0bd22aa43dd9f624cbd51b875952913e45b Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:55:43 +0000 Subject: [PATCH 14/57] adding a disclaimer at the beginning of the Jupyter version of ActivationGrid notebook --- .../jupyter_versions/ActivationGridJupyter.ipynb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb index dbac28b2..0b797931 100644 --- a/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb @@ -59,7 +59,9 @@ "\n", "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", "\n", - "Thanks for trying Lucid!" + "Thanks for trying Lucid!\n", + "\n", + "#### **This notebook is a Jupyter version of the original Google Colab Notebook. This version adds widgets to facilitate the use of Lucid on your own images.**" ] }, { From c4bb3a9cfd3959ab2900a8b8b0a907aae3831c8f Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 09:58:31 +0000 Subject: [PATCH 15/57] adding an example of a custom model defined like ModelZoo models --- lucid/misc/custom_model.py | 85 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) create mode 100644 lucid/misc/custom_model.py diff --git a/lucid/misc/custom_model.py b/lucid/misc/custom_model.py new file mode 100644 index 00000000..f959d46a --- /dev/null +++ b/lucid/misc/custom_model.py @@ -0,0 +1,85 @@ +from lucid.modelzoo.vision_base import Model, _layers_from_list_of_dicts + + +class CustomModel(Model): + """Example of custom Lucid Model class. This example is based on Mobilenet + from Keras Applications + """ + + model_path = "lucid_protobuf_file.pb" + dataset = "ImageNet" + image_shape = [224, 224, 3] + image_value_range = (-1, 1) + input_name = "input" + # Labels as a index-class name dictionnary : + # Of course if you really use a daset with 1000 classes you + # should consider loading them from a file. + _labels = { + 0: 'tench, Tinca tinca', + 1: 'goldfish, Carassius auratus', + 2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', + 3: 'tiger shark, Galeocerdo cuvieri', + 4: 'hammerhead, hammerhead shark', + 5: 'electric ray, crampfish, numbfish, torpedo', + 6: 'stingray', + 7: 'cock', + 8: 'hen', + 9: 'ostrich, Struthio camelus', + # ... + 999: 'toilet tissue, toilet paper, bathroom tissue'} + } + + @property + def labels(self): + return self._labels + + def label_index(self, label): + return list(self._labels.values()).index(label) + +CustomModel.layers = _layers_from_list_of_dicts( + CustomModel(), + [ + {"name": "conv1_relu/Relu6", "depth": 32, "tags": ["conv"]}, + {"name": "conv_pw_1_relu/Relu6", "depth": 64, "tags": ["conv"]}, + {"name": "conv_pw_2_relu/Relu6", "depth": 128, "tags": ["conv"]}, + {"name": "conv_pw_3_relu/Relu6", "depth": 128, "tags": ["conv"]}, + {"name": "conv_pw_4_relu/Relu6", "depth": 256, "tags": ["conv"]}, + {"name": "conv_pw_5_relu/Relu6", "depth": 256, "tags": ["conv"]}, + {"name": "conv_pw_6_relu/Relu6", "depth": 512, "tags": ["conv"]}, + {"name": "conv_pw_7_relu/Relu6", "depth": 512, "tags": ["conv"]}, + {"name": "conv_pw_8_relu/Relu6", "depth": 512, "tags": ["conv"]}, + {"name": "conv_pw_9_relu/Relu6", "depth": 512, "tags": ["conv"]}, + {"name": "conv_pw_10_relu/Relu6", "depth": 512, "tags": ["conv"]}, + {"name": "conv_pw_11_relu/Relu6", "depth": 512, "tags": ["conv"]}, + {"name": "conv_pw_12_relu/Relu6", "depth": 1024, "tags": ["conv"]}, + {"name": "conv_pw_13_relu/Relu6", "depth": 1024, "tags": ["conv"]}, + {"name": "dense/BiasAdd", "depth": 256, "tags": ["dense"]}, + {"name": "dense_1/BiasAdd", "depth": 256, "tags": ["dense"]}, + {"name": "dense_2/BiasAdd", "depth": 1000, "tags": ["dense"]}, + {"name": "softmax/Softmax", "depth": 1000, "tags": ["dense"]}, + ], +) + +output_shapes = { + "conv1_relu/Relu6": (112, 112, 32), + "conv_pw_1_relu/Relu6": (112, 112, 64), + "conv_pw_2_relu/Relu6": (56, 56, 128), + "conv_pw_3_relu/Relu6": (56, 56, 128), + "conv_pw_4_relu/Relu6": (28, 28, 256), + "conv_pw_5_relu/Relu6": (28, 28, 256), + "conv_pw_6_relu/Relu6": (14, 14, 512), + "conv_pw_7_relu/Relu6": (14, 14, 512), + "conv_pw_8_relu/Relu6": (14, 14, 512), + "conv_pw_9_relu/Relu6": (14, 14, 512), + "conv_pw_10_relu/Relu6": (14, 14, 512), + "conv_pw_11_relu/Relu6": (14, 14, 512), + "conv_pw_12_relu/Relu6": (7, 7, 1024), + "conv_pw_13_relu/Relu6": (7, 7, 1024), + "dense/BiasAdd": (256,), + "dense_1/BiasAdd": (256,), + "dense_2/BiasAdd": (1000,), + "softmax/Softmax": (1000,), +} + +for layer in CustomModel.layers: + layer.shape = output_shapes[layer.name] From 862070d52103282d4aeee138e88f067080c562ae Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 10:02:16 +0000 Subject: [PATCH 16/57] adding a notebook containing examples of available objectives. It completes the tutorial notebook. --- .../misc_objectives.ipynb | 473 ++++++++++++++++++ 1 file changed, 473 insertions(+) create mode 100644 notebooks/feature-visualization/misc_objectives.ipynb diff --git a/notebooks/feature-visualization/misc_objectives.ipynb b/notebooks/feature-visualization/misc_objectives.ipynb new file mode 100644 index 00000000..7aa5381e --- /dev/null +++ b/notebooks/feature-visualization/misc_objectives.ipynb @@ -0,0 +1,473 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "_vAVmphMywZR" + }, + "source": [ + "# Lucid: A Quick Tutorial\n", + "\n", + "This notebook quickly complete the tutorial notebook with further examples of objectives.\n", + "\n", + "**Note**: The easiest way to use this tutorial is [as a colab notebook](https://colab.sandbox.google.com/github/tensorflow/lucid/blob/master/notebooks/tutorial.ipynb), which allows you to dive in with no setup. We recommend you enable a free GPU by going:\n", + "\n", + "> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**\n", + "\n", + "Thanks for trying Lucid!\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "FsFc1mE51tCd" + }, + "source": [ + "## Install, Import, Load Model" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "tavMPe3KQ8Cs" + }, + "outputs": [], + "source": [ + "# Install Lucid\n", + "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "\n", + "# %tensorflow_version only works on colab\n", + "# %tensorflow_version 1.x" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "RBr8QbboRAdU" + }, + "outputs": [], + "source": [ + "# Imports\n", + "\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "assert tf.__version__.startswith('1')\n", + "\n", + "# uncomment to avoid deprecation warnings :\n", + "from tensorflow.python.util import deprecation\n", + "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", + "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.misc.io import show\n", + "import lucid.optvis.objectives as objectives\n", + "import lucid.optvis.param as param\n", + "import lucid.optvis.render as render\n", + "import lucid.optvis.transform as transform" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "yNALaA0QRJVT" + }, + "outputs": [], + "source": [ + "# Let's import a model from the Lucid modelzoo!\n", + "\n", + "model = models.InceptionV1()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "1l31v18X42gc" + }, + "source": [ + "In this tutorial, we will be visualizing InceptionV1, also known as GoogLeNet.\n", + "\n", + "While we will focus on a few neurons, you may wish to experiment with visualizing others. If you'd like, you can try any of the following layers: `conv2d0, maxpool0, conv2d1, conv2d2, maxpool1, mixed3a, mixed3b, maxpool4, mixed4a, mixed4b, mixed4c, mixed4d, mixed4e, maxpool10, mixed5a, mixed5b`.\n", + "\n", + "You can learn more about GoogLeNet in the [paper](https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf). You can also find visualizations of all neurons in mixed3a-mixed5b [here](https://distill.pub/2017/feature-visualization/appendix/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Deep dream\n", + "Maximize \"interestingness\" of a given layer. See Mordvintsev et al., 2015.\n", + "The deep dream objective in Lucid maximizes the squared activation of every channels in the layer jointly." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 93273.055\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "obj = objectives.deepdream(\"mixed4a\")\n", + "_ = render.render_vis(model, obj)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Depending on the layer, the result of every channel optimized jointly may be more or less similar:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 166 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 9883, + "status": "ok", + "timestamp": 1520528085592, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 + }, + "id": "CLDYzkKoRQtw", + "outputId": "47739b06-c868-4627-924c-dc28ada359d2" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 78209.99\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "channel = lambda n: objectives.channel(\"mixed4a\", n)\n", + "layer_depth = model.get_layer(\"mixed4a\").depth\n", + "obj = sum([channel(k) for k in range(layer_depth)])\n", + "_ = render.render_vis(model, obj)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Logit visualisation\n", + "Just as we can visualize convolutional layers, it is possible to visualize what activates the most a logit of a given class.\n", + "\n", + "To do that we have to set the dimensions of the image to the standard input size of the nerwork, (229\\*229 for InceptionV1) because the default value is 128\\*128 and produces dimension errors. Moreover, the set of transformations used for transformation robustness has to keep the dimensions constant.\n", + "\n", + "By default, Lucid does 5 successive image transformation which can cause dimension errors. That's why we define a set of transformations that add at the end of these transformations a padding tp input dimensions to keep the dimensions constant." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "param_funct = lambda: param.image(299)\n", + "transforms_set = [transformation for transformation in transform.standard_transforms]\n", + "transforms_set.append(transform.crop_or_pad_to(224,224))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's optimize the activation of logit 208 corresponding to the class Labrador retriever. See https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a for index-class mapping. \n", + "\n", + "As explained in https://distill.pub/2017/feature-visualization/, optimizing pre-softmax logits produces images of better visual quality than optimizing softmax directly. We can verify this by first optimizing the softmax layer then the pre-softmax layer." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 0.9999856\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "layer_name = \"softmax2\"\n", + "obj = objectives.class_logit(layer_name, 208) \n", + "_ = render.render_vis(model, obj, param_funct, transforms=transforms_set)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 174.55798\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "layer_name = \"softmax2_pre_activation\"\n", + "obj = objectives.class_logit(layer_name, 208) \n", + "_ = render.render_vis(model, obj, param_funct, transforms=transforms_set)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We barely see something in the first image but in the second image we can distinguish dog hair and some snout." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Directions\n", + "Let's consider that each channel can be seen as a basis vector of an activation space. Therefore every possible linear combinations of channels represent a vector in this activation space. That's why we can also visualize a random direction in this activation space and not just basis vectors." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Direction objective\n", + "The direction objective visualize a layer along the given direction. \n", + "\n", + "Let $nb\\_channels$ be the number of channels for the layer $layer\\_name$ and $n$ a unit index in $[0,nb\\_channels[$.\n", + "The method are equivalents for visualizing a single channel : \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 822.14716\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "512 807.95703\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "layer_depth = model.get_layer(\"mixed4a\").depth\n", + "channel_index = 18\n", + "\n", + "# First method\n", + "obj_1 = objectives.direction(\"mixed4a\", np.eye(layer_depth)[channel_index])\n", + "_ = render.render_vis(model, obj_1)\n", + "\n", + "# Second method\n", + "obj_2 = objectives.channel(\"mixed4a\", channel_index)\n", + "_ = render.render_vis(model, obj_2)\n", + "\n", + "# Third method\n", + "_ = render.render_vis(model, \"mixed4a:\"+str(channel_index))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 27fb86784b1bc1f03c6f1deef3feebbabb66f0b5 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 10:44:24 +0000 Subject: [PATCH 17/57] adding a module showing how to import a Keras Model --- lucid/recipes/keras_model_import_example.py | 29 +++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 lucid/recipes/keras_model_import_example.py diff --git a/lucid/recipes/keras_model_import_example.py b/lucid/recipes/keras_model_import_example.py new file mode 100644 index 00000000..fd7c5d6e --- /dev/null +++ b/lucid/recipes/keras_model_import_example.py @@ -0,0 +1,29 @@ +""" + +This module show how to import a Keras Model into Lucid. +In this case, it is the Mobilenet architecture available with Keras Applications + +""" + +import tensorflow as tf +from lucid.modelzoo.vision_models import Model as LucidModel + +with tf.keras.backend.get_session().as_default(): + tf.keras.backend.set_learning_phase(0) + + model = tf.keras.applications.MobileNet( + include_top=True, + weights='imagenet' + ) + + # You can use suggest_save_args() to get suggestions on the metadata + # you should use for your model. + # LucidModel.suggest_save_args() + + LucidModel.save( + "keras_mobilenet.pb", + image_shape=[224, 224, 3], + input_name='input', + output_names=['softmax/Softmax'], + image_value_range=[-1,1] + ) From 7f00d77753a1a30fc6e2eed0726c789ef0b572b5 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 10:45:34 +0000 Subject: [PATCH 18/57] adding a notebook for spritemaps generation --- .../jupyter_versions/SpritemapGenerator.ipynb | 294 ++++++++++++++++++ 1 file changed, 294 insertions(+) create mode 100644 notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb diff --git a/notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb b/notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb new file mode 100644 index 00000000..678dbccc --- /dev/null +++ b/notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb @@ -0,0 +1,294 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "JndnmDMp66FL" + }, + "source": [ + "Licensed under the Apache License, Version 2.0 (the \"License\");" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "cellView": "both", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hMqWDc_m6rUC" + }, + "outputs": [], + "source": [ + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pNqKk1MmrakH" + }, + "source": [ + "# Spritemap generation -- Building Blocks of Interpretability\n", + "\n", + "This colab notebook is part of our **Building Blocks of Intepretability** series exploring how intepretability techniques combine together to explain neural networks. If you haven't already, make sure to look at the [**corresponding paper**](https://distill.pub/2018/building-blocks) as well!\n", + "\n", + "This notebook allow to generate spritemaps used in the notebooks *Semantic dictionnary* and *Channel attribution*. This notebook is meant to be used with Jupyter lab or Jupyter notebook only." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 102, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 15116, + "status": "ok", + "timestamp": 1520312194763, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 + }, + "id": "AA17rJBLuyYH", + "outputId": "3acd867e-4fc2-4369-8684-cbdcd3f70c7d" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:143: FutureWarning: The sklearn.decomposition.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.decomposition. Anything that cannot be imported from sklearn.decomposition is now part of the private API.\n", + " warnings.warn(message, FutureWarning)\n" + ] + } + ], + "source": [ + "from math import ceil\n", + "\n", + "import numpy as np\n", + "\n", + "import tensorflow as tf\n", + "# uncomment to avoid deprecation warnings :\n", + "from tensorflow.python.util import deprecation\n", + "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", + "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", + "\n", + "import lucid.modelzoo.vision_models as models\n", + "from lucid.optvis import render, objectives, transform, param\n", + "from lucid.misc.channel_reducer import ChannelReducer\n", + "from lucid.misc.io import show, load, save" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "(Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'conv'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]),\n", + " Layer (belonging to InceptionV1) ([{'dense'}]))" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model = models.InceptionV1()\n", + "model.layers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Layer 6 - Mixed 4d" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def create_spritemap(model, layer_name, sprite_size, row_size, column_size=24):\n", + " param_funct = lambda : param.image(sprite_size)\n", + " \n", + " height = column_size * sprite_size\n", + " width = row_size * sprite_size\n", + " \n", + " neuron_sprite_map_array = np.full((height,width,3), 0.9, dtype=\"single\")\n", + " channel_sprite_map_array = np.full((height,width,3), 0.9, dtype=\"single\")\n", + " \n", + " for i in range(column_size):\n", + " top_left_y = sprite_size * i\n", + " for j in range(row_size):\n", + " top_left_x = sprite_size * j\n", + " unit_index = i*row_size+j\n", + " \n", + " neuron_objectif_f = objectives.neuron(layer_name, channel_n=unit_index)\n", + " neuron_feature_viz = render.render_vis(model, neuron_objectif_f, param_funct, verbose=False)\n", + " neuron_sprite_map_array[top_left_y : top_left_y+sprite_size,\n", + " top_left_x : top_left_x+sprite_size,:] = neuron_feature_viz[0][0]\n", + " \n", + " channel_objectif_f = layer_name + \":\" + str(unit_index) \n", + " channel_feature_viz = render.render_vis(model, channel_objectif_f, param_funct, verbose=False)\n", + " channel_sprite_map_array[top_left_y : top_left_y+sprite_size,\n", + " top_left_x : top_left_x+sprite_size,:] = channel_feature_viz[0][0]\n", + " \n", + " return neuron_sprite_map_array, channel_sprite_map_array" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "mixed3a 256\n" + ] + }, + { + "ename": "NameError", + "evalue": "name 'neuron_sprite_map_nam' is not defined", + "output_type": "error", + "traceback": [ + "\u001b[0;31m\u001b[0m", + "\u001b[0;31mNameError\u001b[0mTraceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mneuron_sprite_map_array\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchannel_sprite_map_array\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_spritemap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msprite_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrow_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolumn_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mneuron_sprite_map_nam\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0msave\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mneuron_sprite_map_array\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"./spritemaps/spritemap_neuron_\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\".jpeg\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0msave\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mchannel_sprite_map_array\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"./spritemaps/spritemap_channel_\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\".jpeg\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNameError\u001b[0m: name 'neuron_sprite_map_nam' is not defined" + ] + } + ], + "source": [ + "column_size = 24\n", + "sprite_size = 110\n", + "\n", + "for layer in model.layers[3:12]:\n", + " print(layer.name, layer.depth)\n", + " row_size = ceil(model.get_layer(layer.name).depth / column_size)\n", + " \n", + " neuron_sprite_map_array, channel_sprite_map_array = create_spritemap(model, layer.name, sprite_size, row_size, column_size)\n", + " \n", + " save(neuron_sprite_map_array, \"./spritemaps/spritemap_neuron_\" + layer.name.split(\"/\")[0] + \".jpeg\")\n", + " save(channel_sprite_map_array, \"./spritemaps/spritemap_channel_\" + layer.name.split(\"/\")[0] + \".jpeg\")\n", + " \n", + " # we use split(\"/\") to avoid path problems with layer names containing \"/\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "hide_input": false, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + }, + "varInspector": { + "cols": { + "lenName": 16, + "lenType": 16, + "lenVar": 40 + }, + "kernels_config": { + "python": { + "delete_cmd_postfix": "", + "delete_cmd_prefix": "del ", + "library": "var_list.py", + "varRefreshCmd": "print(var_dic_list())" + }, + "r": { + "delete_cmd_postfix": ") ", + "delete_cmd_prefix": "rm(", + "library": "var_list.r", + "varRefreshCmd": "cat(var_dic_list()) " + } + }, + "types_to_exclude": [ + "module", + "function", + "builtin_function_or_method", + "instance", + "_Feature" + ], + "window_display": false + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 1fba450faf0f481f945342dd815e4a96e7d14eb1 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 10:56:00 +0000 Subject: [PATCH 19/57] adding a license to the created python files --- lucid/misc/custom_model.py | 15 +++++++++++++++ lucid/recipes/keras_model_import_example.py | 16 ++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/lucid/misc/custom_model.py b/lucid/misc/custom_model.py index f959d46a..0ff69536 100644 --- a/lucid/misc/custom_model.py +++ b/lucid/misc/custom_model.py @@ -1,3 +1,18 @@ +# Copyright 2018 The Lucid Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + from lucid.modelzoo.vision_base import Model, _layers_from_list_of_dicts diff --git a/lucid/recipes/keras_model_import_example.py b/lucid/recipes/keras_model_import_example.py index fd7c5d6e..0652b058 100644 --- a/lucid/recipes/keras_model_import_example.py +++ b/lucid/recipes/keras_model_import_example.py @@ -1,3 +1,19 @@ + +# Copyright 2018 The Lucid Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + """ This module show how to import a Keras Model into Lucid. From 9c34f4f3d306ed8d276379687919d006d326c5bb Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 11:02:15 +0000 Subject: [PATCH 20/57] changing the path of uploaded file to a relative path --- .../jupyter_versions/ActivationGridJupyter.ipynb | 2 +- .../building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb index 0b797931..a1994ef1 100644 --- a/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb @@ -477,7 +477,7 @@ " with open(picture_name, 'wb') as f:\n", " f.write(content)\n", "else: # use files already on the server\n", - " picture_full_path = os.path.join(notebooks_root_path,fc.selected)\n", + " picture_full_path = fc.selected\n", " \n", "selected_layers = layers_widget.value # layers to apply activation grid on\n", "\n", diff --git a/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb index c4a8ce4c..19d11ccc 100644 --- a/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb @@ -885,7 +885,7 @@ " with open(picture_name, 'wb') as f:\n", " f.write(content)\n", "else: # use files already on the server\n", - " picture_path = fc.default_filename\n", + " picture_path = fc.selected\n", " \n", "layer_name_1 = layers_widget.value # layers to use semantic dictionnary on\n", "layer_name_2 = layers_widget_bis.value # layers to use semantic dictionnary on\n", From d74dd9321bcec6c9138e9dc917f89e444e7a9c60 Mon Sep 17 00:00:00 2001 From: Thomas Constum Date: Tue, 16 Jun 2020 15:56:41 +0000 Subject: [PATCH 21/57] refactoring of imports + reloading of every notebooks in order to contain every outputs --- .../building-blocks/SemanticDictionary.ipynb | 1048 ++++++++++++++--- .../ActivationGridJupyter.ipynb | 341 +++--- .../jupyter_versions/AttrChannelJupyter.ipynb | 236 ++-- .../jupyter_versions/AttrSpatialJupyter.ipynb | 905 +++++++++++++- .../SemanticDictionaryJupyter.ipynb | 65 +- .../jupyter_versions/SpritemapGenerator.ipynb | 79 +- 6 files changed, 2094 insertions(+), 580 deletions(-) diff --git a/notebooks/building-blocks/SemanticDictionary.ipynb b/notebooks/building-blocks/SemanticDictionary.ipynb index 5ba44fa8..46bbfb5c 100644 --- a/notebooks/building-blocks/SemanticDictionary.ipynb +++ b/notebooks/building-blocks/SemanticDictionary.ipynb @@ -14,7 +14,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 1, "metadata": { "cellView": "both", "colab": { @@ -89,7 +89,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 2, "metadata": { "colab": { "autoexec": { @@ -121,7 +121,7 @@ "source": [ "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", "# !npm install -g svelte-cli@2.2.0\n", - "%tensorflow_version 1.x\n", + "# %tensorflow_version 1.x\n", "\n", "import numpy as np\n", "import tensorflow as tf\n", @@ -158,7 +158,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "metadata": { "colab": { "autoexec": { @@ -192,9 +192,17 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_wdjk7ptl/SemanticDict_66496726_d99e_4104_8df6_99fd6104c2e6.html > /tmp/svelte_wdjk7ptl/SemanticDict_66496726_d99e_4104_8df6_99fd6104c2e6.js\n", - "Svelte build failed! Output:\n", - "/bin/sh: 1: svelte: not found\n", + "svelte compile --format iife /tmp/svelte_ae67hzla/SemanticDict_b3405b79_742c_4b9b_9aba_c16fbcd2ead8.html > /tmp/svelte_ae67hzla/SemanticDict_b3405b79_742c_4b9b_9aba_c16fbcd2ead8.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../tmp/svelte_ae67hzla/SemanticDict_b3405b79_742c_4b9b_9aba_c16fbcd2ead8.html...\\n'\n", + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", "\n" ] } @@ -335,7 +343,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 4, "metadata": { "colab": { "autoexec": { @@ -380,7 +388,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 5, "metadata": { "colab": { "autoexec": { @@ -446,7 +454,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": { "colab": { "autoexec": { @@ -474,50 +482,63 @@ "id": "MEWC-UKdqRGC", "outputId": "096bc577-d5d9-4c48-a914-411b8e77b03a" }, - "outputs": [], - "source": [ - "googlenet_semantic_dict(\"mixed4d\", \"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "autoexec": { - "startup": false, - "wait_interval": 0 - }, - "base_uri": "https://localhost:8080/", - "height": 265, - "output_extras": [ - {} + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" ] }, - "colab_type": "code", - "executionInfo": { - "elapsed": 1259, - "status": "ok", - "timestamp": 1520312338047, - "user": { - "displayName": "", - "photoUrl": "", - "userId": "" - }, - "user_tz": 480 + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", + "\n" + ] }, - "id": "Izf_YqCRe6E7", - "outputId": "ed2b0826-52b4-4714-e019-773162c2b170" - }, - "outputs": [ { "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -1196,9 +1205,720 @@ "" ] }, - "metadata": { - "tags": [] + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "googlenet_semantic_dict(\"mixed4d\", \"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + }, + "base_uri": "https://localhost:8080/", + "height": 265, + "output_extras": [ + {} + ] + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 1259, + "status": "ok", + "timestamp": 1520312338047, + "user": { + "displayName": "", + "photoUrl": "", + "userId": "" + }, + "user_tz": 480 + }, + "id": "Izf_YqCRe6E7", + "outputId": "ed2b0826-52b4-4714-e019-773162c2b170" + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] }, + "metadata": {}, "output_type": "display_data" } ], diff --git a/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb index a1994ef1..17487446 100644 --- a/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/ActivationGridJupyter.ipynb @@ -87,6 +87,14 @@ "outputId": "eebf5d52-d154-4505-9dac-4e7f7fa22862" }, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\r\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" + ] + }, { "name": "stderr", "output_type": "stream", @@ -98,14 +106,17 @@ ], "source": [ "# !pip install --quiet --upgrade-strategy=only-if-needed git+https://github.com/tensorflow/lucid.git\n", + "!pip install ipyfilechooser ipywidgets --quiet\n", "\n", "import os\n", "\n", "import numpy as np\n", "import tensorflow as tf\n", - "assert tf.__version__.startswith('1')\n", + "# uncomment to avoid deprecation warnings :\n", + "from tensorflow.python.util import deprecation\n", + "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", + "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", "\n", - "import numpy as np\n", "from ipyfilechooser import FileChooser\n", "import ipywidgets as widgets\n", "from IPython.display import display\n", @@ -117,7 +128,6 @@ "import lucid.optvis.render as render\n", "import lucid.optvis.transform as transform\n", "from lucid.misc.channel_reducer import ChannelReducer\n", - "\n", "from lucid.misc.io import show, load, save" ] }, @@ -271,7 +281,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "abf1085b362641ff976c107ad3d07cfe", + "model_id": "2c835bf8a1bb4dcbaabff952e21403c9", "version_major": 2, "version_minor": 0 }, @@ -294,7 +304,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "e6899e5d06584531bc03feb2a15d7f72", + "model_id": "06a397450add468ba16fa1c030230bf4", "version_major": 2, "version_minor": 0 }, @@ -316,7 +326,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "0eb83b63262f4cc989e7112e8bfd8b69", + "model_id": "e5b56c0036634af7bc3a0671c5e99424", "version_major": 2, "version_minor": 0 }, @@ -338,7 +348,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "65555b4859614780bb755d924f7bab9f", + "model_id": "7a770f831eb9406b84daf4292d1dfe5f", "version_major": 2, "version_minor": 0 }, @@ -352,7 +362,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "2da924d3894e426bb0a7682dde81f0a6", + "model_id": "aaa82a76294745c1bfcfbb046eecdc27", "version_major": 2, "version_minor": 0 }, @@ -366,7 +376,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "8dc6cc97a3c5433daf64d922043404ef", + "model_id": "e3c3f1849d9149e29dc8fefedd6cfa24", "version_major": 2, "version_minor": 0 }, @@ -434,25 +444,9 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 11, "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", - "\n" - ] - }, { "data": { "text/html": [ @@ -496,196 +490,167 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ - "n_steps = 64#2048\n", - "n_groups = 12\n", + "n_steps = 512#2048\n", + "n_groups = 6\n", "grid_resolution = 672 # the total resolution of the activation grid\n", - "W = 12" + "W = 48" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "mixed3a\n", - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/util.py:58: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", - "\n" + "mixed4d\n", + "(196, 6)\n" ] }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "(784, 12)\n", - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/param/spatial.py:82: The name tf.spectral.irfft2d is deprecated. Please use tf.signal.irfft2d instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/usr/local/lib/python3.6/dist-packages/sklearn/decomposition/_nmf.py:1077: ConvergenceWarning: Maximum number of iterations 200 reached. Increase it to improve convergence.\n", - " \" improve convergence.\" % max_iter, ConvergenceWarning)\n", - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/param/spatial.py:82: The name tf.spectral.irfft2d is deprecated. Please use tf.signal.irfft2d instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:242: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:242: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:170: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:170: The name tf.train.get_or_create_global_step is deprecated. Please use tf.compat.v1.train.get_or_create_global_step instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:171: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/render.py:171: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:38: The name tf.random_crop is deprecated. Please use tf.image.random_crop instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:38: The name tf.random_crop is deprecated. Please use tf.image.random_crop instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:83: The name tf.image.resize_bilinear is deprecated. Please use tf.compat.v1.image.resize_bilinear instead.\n", - "\n" - ] + "data": { + "text/html": [ + "
\n", + " 0
\n", + " \n", + "
\n", + " 1
\n", + " \n", + "
\n", + " 2
\n", + " \n", + "
\n", + " 3
\n", + " \n", + "
\n", + " 4
\n", + " \n", + "
\n", + " 5
\n", + " \n", + "
\n", + " 6
\n", + " \n", + "
\n", + " 7
\n", + " \n", + "
\n", + " 8
\n", + " \n", + "
\n", + " 9
\n", + " \n", + "
\n", + " 10
\n", + " \n", + "
\n", + " 11
\n", + " \n", + "
\n", + " 12
\n", + " \n", + "
\n", + " 13
\n", + " \n", + "
\n", + " 14
\n", + " \n", + "
\n", + " 15
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/optvis/transform.py:83: The name tf.image.resize_bilinear is deprecated. Please use tf.compat.v1.image.resize_bilinear instead.\n", - "\n" - ] + "data": { + "text/html": [ + "
\n", + " 0
\n", + " \n", + "
\n", + " 1
\n", + " \n", + "
\n", + " 2
\n", + " \n", + "
\n", + " 3
\n", + " \n", + "
\n", + " 4
\n", + " \n", + "
\n", + " 5
\n", + " \n", + "
\n", + " 6
\n", + " \n", + "
\n", + " 7
\n", + " \n", + "
\n", + " 8
\n", + " \n", + "
\n", + " 9
\n", + " \n", + "
\n", + " 10
\n", + " \n", + "
\n", + " 11
\n", + " \n", + "
\n", + " 12
\n", + " \n", + "
\n", + " 13
\n", + " \n", + "
\n", + " 14
\n", + " \n", + "
\n", + " 15
\n", + " \n", + "
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ - "WARNING:tensorflow:\n", - "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", - "For more information, please see:\n", - " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", - " * https://github.com/tensorflow/addons\n", - " * https://github.com/tensorflow/io (for I/O related ops)\n", - "If you depend on functionality not listed there, please file an issue.\n", "\n" ] }, { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:\n", - "The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", - "For more information, please see:\n", - " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", - " * https://github.com/tensorflow/addons\n", - " * https://github.com/tensorflow/io (for I/O related ops)\n", - "If you depend on functionality not listed there, please file an issue.\n", - "\n" - ] + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ diff --git a/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb index a7752e59..d35d6f18 100644 --- a/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb @@ -104,22 +104,25 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\n", - "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\r\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" ] } ], "source": [ "# !npm install -g svelte-cli@2.2.0\n", - "!pip install ipyfilechooser --quiet\n", + "!pip install ipyfilechooser ipywidgets --quiet\n", "\n", "import numpy as np\n", "import tensorflow as tf\n", + "# uncomment to avoid deprecation warnings :\n", + "from tensorflow.python.util import deprecation\n", + "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", + "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", "\n", "from ipyfilechooser import FileChooser\n", "import ipywidgets as widgets\n", "from IPython.core.display import display, HTML\n", - "from pathlib import Path\n", "\n", "import lucid.modelzoo.vision_models as models\n", "from lucid.misc.io import show\n", @@ -164,24 +167,7 @@ "colab_type": "code", "id": "xIDcG0vjaDtk" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/reading.py:124: The name tf.gfile.Open is deprecated. Please use tf.io.gfile.GFile instead.\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "labels_str = read(\"https://gist.githubusercontent.com/aaronpolhamus/964a4411c0906315deb9f4a3723aac57/raw/aa66dd9dbf6b56649fa3fab83659b2acbf3cbfd1/map_clsloc.txt\",mode='r')\n", "labels = [line[line.find(\" \"):].strip() for line in labels_str.split(\"\\n\")]\n", @@ -237,8 +223,8 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_twnny2bj/ChannelAttrWidget_2c0e7508_1aff_491a_98dd_2044d7a4a6ee.html > /tmp/svelte_twnny2bj/ChannelAttrWidget_2c0e7508_1aff_491a_98dd_2044d7a4a6ee.js\n", - "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_twnny2bj/ChannelAttrWidget_2c0e7508_1aff_491a_98dd_2044d7a4a6ee.html...\\n'\n" + "svelte compile --format iife /tmp/svelte_kcad2_h2/ChannelAttrWidget_dc20ba62_58d4_4050_b0d2_0a567afed2fd.html > /tmp/svelte_kcad2_h2/ChannelAttrWidget_dc20ba62_58d4_4050_b0d2_0a567afed2fd.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_kcad2_h2/ChannelAttrWidget_dc20ba62_58d4_4050_b0d2_0a567afed2fd.html...\\n'\n" ] } ], @@ -352,8 +338,8 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_twnny2bj/BarsWidget_2f4d23a9_8b29_42bf_b86f_fc6b0f5697d7.html > /tmp/svelte_twnny2bj/BarsWidget_2f4d23a9_8b29_42bf_b86f_fc6b0f5697d7.js\n", - "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_twnny2bj/BarsWidget_2f4d23a9_8b29_42bf_b86f_fc6b0f5697d7.html...\\n'\n" + "svelte compile --format iife /tmp/svelte_kcad2_h2/BarsWidget_9f40af41_fd2e_490a_bdc9_8f63f2c397b6.html > /tmp/svelte_kcad2_h2/BarsWidget_9f40af41_fd2e_490a_bdc9_8f63f2c397b6.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_kcad2_h2/BarsWidget_9f40af41_fd2e_490a_bdc9_8f63f2c397b6.html...\\n'\n" ] } ], @@ -552,7 +538,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "51ddf7efbd8748caa06b0128ef997fc7", + "model_id": "ee940f09767d4f15910d050a7d0953d6", "version_major": 2, "version_minor": 0 }, @@ -575,7 +561,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "d7a19b5bf12440f697f5badfd19d4d73", + "model_id": "98c40181a1f94ece8a9fbb108cd06089", "version_major": 2, "version_minor": 0 }, @@ -597,7 +583,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "c8c34f45157844d196a215be30b4805d", + "model_id": "b38f5d1dd0a74ff486bd8b0135ccd01f", "version_major": 2, "version_minor": 0 }, @@ -619,7 +605,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "cfbd61064df7424bbc8f0c336e7d214b", + "model_id": "9e19d42a0d2b4c82a23ad67c905ce457", "version_major": 2, "version_minor": 0 }, @@ -641,7 +627,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "4b225755301349f0b0295024ee248273", + "model_id": "ab3344b68cad4bc8b921f3b02e6c9389", "version_major": 2, "version_minor": 0 }, @@ -655,7 +641,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "3f7d510febae44099b3cd0e401339b91", + "model_id": "dee1f1ad0cbd479182e461b622751d87", "version_major": 2, "version_minor": 0 }, @@ -799,38 +785,6 @@ "metadata": {}, "output_type": "display_data" }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/modelzoo/vision_base.py:192: The name tf.get_default_graph is deprecated. Please use tf.compat.v1.get_default_graph instead.\n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", - "\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/joyvan/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py:129: The name tf.GraphDef is deprecated. Please use tf.compat.v1.GraphDef instead.\n", - "\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -843,10 +797,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -1205,10 +1159,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", @@ -1978,10 +1932,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -2340,10 +2294,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", @@ -2932,10 +2886,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -3294,10 +3248,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", @@ -3886,10 +3840,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -4248,10 +4202,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " diff --git a/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb index 19d11ccc..e9d605ec 100644 --- a/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb @@ -121,23 +121,34 @@ "id": "AA17rJBLuyYH", "outputId": "0e52d903-dbfa-4b20-ab3c-00a242061c63" }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\r\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" + ] + } + ], "source": [ "# !npm install -g svelte-cli@2.2.0\n", + "!pip install ipyfilechooser ipywidgets --quiet\n", + "\n", "import os\n", "\n", "import numpy as np\n", - "from ipyfilechooser import FileChooser\n", - "import ipywidgets as widgets\n", - "from IPython.display import display\n", - "from IPython.core.display import display, HTML\n", - "\n", "import tensorflow as tf\n", "# uncomment to avoid deprecation warnings :\n", "from tensorflow.python.util import deprecation\n", "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", "\n", + "from ipyfilechooser import FileChooser\n", + "import ipywidgets as widgets\n", + "from IPython.display import display\n", + "from IPython.core.display import display, HTML\n", + "\n", "import lucid.modelzoo.vision_models as models\n", "from lucid.misc.io import show\n", "import lucid.optvis.render as render\n", @@ -372,18 +383,8 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_0f883yoj/SpatialWidget_a5fb8c7f_0cb5_45d2_912b_6f11532bd357.html > /tmp/svelte_0f883yoj/SpatialWidget_a5fb8c7f_0cb5_45d2_912b_6f11532bd357.js\n", - "Svelte build failed! Output:\n", - "svelte version 1.64.1\n", - "compiling ../../../../../../../tmp/svelte_0f883yoj/SpatialWidget_a5fb8c7f_0cb5_45d2_912b_6f11532bd357.html...\n", - "Identifier is expected\n", - "62: position: absolute;\n", - "63: left: 0px;\n", - "64: top: 0px; {{#replace with -14px for Jupyter Classic}}\n", - " ^\n", - "65: width: 224px;\n", - "66: }\n", - "\n" + "svelte compile --format iife /tmp/svelte_en7a7hmz/SpatialWidget_2decdfab_b9d6_4fc0_9d6a_ceebf14056e1.html > /tmp/svelte_en7a7hmz/SpatialWidget_2decdfab_b9d6_4fc0_9d6a_ceebf14056e1.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_en7a7hmz/SpatialWidget_2decdfab_b9d6_4fc0_9d6a_ceebf14056e1.html...\\n(4:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(5:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(21:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(22:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n'\n" ] } ], @@ -452,7 +453,8 @@ " opacity: 0.6;\n", " position: absolute;\n", " left: 0px;\n", - " top: 0px; {{#replace with -14px for Jupyter Classic}}\n", + " top: -14px;\n", + " /* replace with \"top: 0px\" for Jupyter Lab} */\n", " width: 224px;\n", " }\n", " .outer .pointer_container {\n", @@ -698,7 +700,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "c9dfc900eb6243e6a73f637f86a8205c", + "model_id": "2cd5278703bf4478b4c785183de4df9c", "version_major": 2, "version_minor": 0 }, @@ -721,7 +723,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "513fc610459445edba069c4a2b433956", + "model_id": "f5067cdac7ab47bcad1678b764b2f812", "version_major": 2, "version_minor": 0 }, @@ -743,7 +745,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "4807a0330dea4df1a28782779577d5c3", + "model_id": "90e2803133894d5f91280c4ed353f0cc", "version_major": 2, "version_minor": 0 }, @@ -765,7 +767,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "101ac87a992b4ea58ed0024b07c39401", + "model_id": "101bb74d27134564b4a8edeb7fb536e0", "version_major": 2, "version_minor": 0 }, @@ -787,7 +789,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "18c0662f132444d7ac0a5e189ca29912", + "model_id": "02e21b60f82f4eeeaa3af9efa548bf33", "version_major": 2, "version_minor": 0 }, @@ -809,7 +811,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "312c076c23ac4be7b08211e7fc2cb1b3", + "model_id": "1e0c2de160424900a7c48b4fe6bfa1d7", "version_major": 2, "version_minor": 0 }, @@ -874,7 +876,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -896,7 +898,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 16, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -920,17 +922,835 @@ "output_type": "display_data" }, { - "ename": "RuntimeError", - "evalue": "No extension in URL: ", - "output_type": "error", - "traceback": [ - "\u001b[0;31m\u001b[0m", - "\u001b[0;31mRuntimeError\u001b[0mTraceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mdisplay\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mHTML\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlegend\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mimg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpicture_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0mspatial_spatial_attr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayer_name_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayer_name_2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhint_label_1\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mclass_name_1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhint_label_2\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mclass_name_2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(url_or_handle, allow_unsafe_formats, cache, **kwargs)\u001b[0m\n\u001b[1;32m 193\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0m_load_urls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murl_or_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcache\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcache\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 194\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 195\u001b[0;31m \u001b[0mext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdecompressor_ext\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_get_extension\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0murl_or_handle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 196\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mext\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mext\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlower\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/lucid/misc/io/loading.py\u001b[0m in \u001b[0;36m_get_extension\u001b[0;34m(url_or_handle)\u001b[0m\n\u001b[1;32m 265\u001b[0m \u001b[0mdecompressor_ext\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mext\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 267\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"No extension in URL: \"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0murl_or_handle\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 268\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mext\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdecompressor_ext\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mRuntimeError\u001b[0m: No extension in URL: " - ] + "data": { + "text/html": [ + "\n", + "
\n", + " \n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" } ], "source": [ @@ -941,8 +1761,15 @@ "\n", "img = load(picture_path)\n", "\n", - "spatial_spatial_attr(model, img, layer_name_1, layer_name_2, hint_label_1=class_name_1, hint_label_2=class_name_2)" + "spatial_spatial_attr(model, img, layer_name_1, layer_name_2, class_name_1, class_name_2)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb index 8897c2d4..469d41f7 100644 --- a/notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/SemanticDictionaryJupyter.ipynb @@ -65,15 +65,15 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\n", - "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\r\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" ] } ], @@ -83,7 +83,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": { "colab": { "autoexec": { @@ -123,7 +123,6 @@ "import PIL.Image as Image\n", "\n", "import tensorflow as tf\n", - "assert tf.__version__.startswith('1')\n", "# uncomment to avoid deprecation warnings :\n", "from tensorflow.python.util import deprecation\n", "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", @@ -148,7 +147,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -170,7 +169,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": { "colab": { "autoexec": { @@ -204,8 +203,8 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_tnps1sft/SemanticDict_97ab18be_6a86_4b40_bd6e_2f3fe35bf6b4.html > /tmp/svelte_tnps1sft/SemanticDict_97ab18be_6a86_4b40_bd6e_2f3fe35bf6b4.js\n", - "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_tnps1sft/SemanticDict_97ab18be_6a86_4b40_bd6e_2f3fe35bf6b4.html...\\n'\n" + "svelte compile --format iife /tmp/svelte_f7ihgo11/SemanticDict_f80a5c51_8ca3_4726_bbab_d27848f6dd9d.html > /tmp/svelte_f7ihgo11/SemanticDict_f80a5c51_8ca3_4726_bbab_d27848f6dd9d.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_f7ihgo11/SemanticDict_f80a5c51_8ca3_4726_bbab_d27848f6dd9d.html...\\n'\n" ] } ], @@ -274,7 +273,7 @@ " height: 128px;\n", " vertical-align: bottom;\n", " padding-bottom: 64px;\n", - " width: 85%;\n", + " width: 81%;\n", " margin: auto;\n", " }\n", " .entry {\n", @@ -342,7 +341,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "metadata": { "colab": { "autoexec": { @@ -389,7 +388,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "metadata": { "colab": { "autoexec": { @@ -438,7 +437,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -452,7 +451,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "ddb891ddc4e34821ad37659bcb50e64c", + "model_id": "d3f363c8b50f4d70b4299d8fb946e3fd", "version_major": 2, "version_minor": 0 }, @@ -475,7 +474,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "9f6deff3bc5e44fbb61fceed3127b9f3", + "model_id": "fbd0d845408d424dbf370ffe12bd0599", "version_major": 2, "version_minor": 0 }, @@ -497,12 +496,12 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "ef23fbb3edd64d639ee8a55b62222305", + "model_id": "681d7a6c39b74bcd997d9196ee072628", "version_major": 2, "version_minor": 0 }, "text/plain": [ - "Dropdown(description='Layers', options=('conv2d0', 'conv2d1', 'conv2d2', 'mixed3a', 'mixed3b', 'mixed4a', 'mix…" + "Dropdown(description='Layers', index=3, options=('conv2d0', 'conv2d1', 'conv2d2', 'mixed3a', 'mixed3b', 'mixed…" ] }, "metadata": {}, @@ -530,7 +529,7 @@ "print(\"\\nSelect the layer you want to use semantic dictionnary on : \")\n", "layers_widget = widgets.Dropdown(\n", " options=layers_list,\n", - " value=layers_list[0],\n", + " value=layers_list[3],\n", " description='Layers'\n", ")\n", "display(layers_widget)" @@ -538,7 +537,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -556,17 +555,17 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", diff --git a/notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb b/notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb index 678dbccc..8b969b06 100644 --- a/notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb +++ b/notebooks/building-blocks/jupyter_versions/SpritemapGenerator.ipynb @@ -56,6 +56,24 @@ { "cell_type": "code", "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\r\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" + ] + } + ], + "source": [ + "!pip install ipyfilechooser ipywidgets --quiet" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "metadata": { "colab": { "autoexec": { @@ -97,13 +115,15 @@ "from math import ceil\n", "\n", "import numpy as np\n", - "\n", "import tensorflow as tf\n", "# uncomment to avoid deprecation warnings :\n", "from tensorflow.python.util import deprecation\n", "deprecation._PRINT_DEPRECATION_WARNINGS = False\n", "tf.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n", "\n", + "import ipywidgets as widgets\n", + "from IPython.display import display\n", + "\n", "import lucid.modelzoo.vision_models as models\n", "from lucid.optvis import render, objectives, transform, param\n", "from lucid.misc.channel_reducer import ChannelReducer\n", @@ -112,7 +132,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": { "scrolled": true }, @@ -141,7 +161,7 @@ " Layer (belonging to InceptionV1) ([{'dense'}]))" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -160,7 +180,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -194,33 +214,62 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "mixed3a 256\n" + "\n", + "Check the layers you want to apply activation grid on : \n" ] }, { - "ename": "NameError", - "evalue": "name 'neuron_sprite_map_nam' is not defined", - "output_type": "error", - "traceback": [ - "\u001b[0;31m\u001b[0m", - "\u001b[0;31mNameError\u001b[0mTraceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mneuron_sprite_map_array\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mchannel_sprite_map_array\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_spritemap\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msprite_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrow_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcolumn_size\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0mneuron_sprite_map_nam\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0msave\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mneuron_sprite_map_array\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"./spritemaps/spritemap_neuron_\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\".jpeg\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0msave\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mchannel_sprite_map_array\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"./spritemaps/spritemap_channel_\"\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mlayer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msplit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"/\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;34m\".jpeg\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mNameError\u001b[0m: name 'neuron_sprite_map_nam' is not defined" + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d2250a4944ea455584bfe82b8a52141b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "SelectMultiple(description='Layers', index=(3,), options=(('conv2d0', Layer (belonging to InceptionV1) Date: Sat, 11 Jul 2020 07:23:07 -0700 Subject: [PATCH 22/57] Add more ops to whitelist --- lucid/misc/graph_analysis/filter_overlay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/misc/graph_analysis/filter_overlay.py b/lucid/misc/graph_analysis/filter_overlay.py index 35cf9766..1aeff8b8 100644 --- a/lucid/misc/graph_analysis/filter_overlay.py +++ b/lucid/misc/graph_analysis/filter_overlay.py @@ -25,7 +25,7 @@ """ -standard_include_ops = ["Placeholder", "Relu", "Relu6", "Add", "Split", "Softmax", "Concat", "ConcatV2", "Conv2D", "MaxPool", "AvgPool", "MatMul"] # Conv2D +standard_include_ops = ["Placeholder", "Relu", "Relu6", "Add", "Split", "Softmax", "Concat", "ConcatV2", "Conv2D", "MaxPool", "AvgPool", "MatMul", "EwZXy"] # Conv2D def ops_whitelist(graph, include_ops=standard_include_ops): keep_nodes = [node.name for node in graph.nodes if node.op in include_ops] From aedd7c1fe2ff358c9848c788a48106884688b53a Mon Sep 17 00:00:00 2001 From: Chelsea Voss Date: Thu, 3 Sep 2020 11:01:26 -0700 Subject: [PATCH 23/57] Add cache to the docstring of load The actual text in this docstring line was taken from the docstring of read, since that appears to be the main place where the cache kwarg gets propagated down to in order to actually affect behavior. --- lucid/misc/io/loading.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lucid/misc/io/loading.py b/lucid/misc/io/loading.py index 95dfeb43..a233de35 100644 --- a/lucid/misc/io/loading.py +++ b/lucid/misc/io/loading.py @@ -183,6 +183,8 @@ def load(url_or_handle, allow_unsafe_formats=False, cache=None, **kwargs): Args: url_or_handle: a (reachable) URL, or an already open file handle allow_unsafe_formats: set to True to allow saving unsafe formats (eg. pickles) + cache: whether to attempt caching the resource. Defaults to True only if + the given URL specifies a remote resource. Raises: RuntimeError: If file extension or URL is not supported. From d7652b0ba26d2cba69b79c422e726b17ef9276a9 Mon Sep 17 00:00:00 2001 From: Stefan Sietzen Date: Mon, 21 Sep 2020 14:43:48 +0200 Subject: [PATCH 24/57] added adv fine-tuned InceptionV1 --- lucid/modelzoo/other_models/InceptionV1.py | 13 +++++++++++++ lucid/modelzoo/other_models/__init__.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/lucid/modelzoo/other_models/InceptionV1.py b/lucid/modelzoo/other_models/InceptionV1.py index ce032105..1a47ba8c 100644 --- a/lucid/modelzoo/other_models/InceptionV1.py +++ b/lucid/modelzoo/other_models/InceptionV1.py @@ -81,3 +81,16 @@ def post_import(self, scope): {'tags': ['dense'], 'name': 'softmax1', 'depth': 1008}, {'tags': ['dense'], 'name': 'softmax2', 'depth': 1008}, ]) + + +class InceptionV1_adv_finetuned(InceptionV1): + """adversarially fine-tuned InceptionV1 + + This model is based on InceptionV1 and has been fine-tuned with + PGD-generated adversarial examples (https://arxiv.org/pdf/1706.06083.pdf). + The PGD-attack was L2-bounded with an epsilon of 255 (1.0 for normalized images). + After fine-tuning, this model achieves a robust top-5 accuracy of ~67% + for eps. 255 L2-bounded adversarial examples compared to ~4% before fine-tuning. + """ + model_path = 'gs://modelzoo/vision/other_models/InceptionV1_adv_finetuned.pb' + diff --git a/lucid/modelzoo/other_models/__init__.py b/lucid/modelzoo/other_models/__init__.py index 9515640c..74536dbb 100644 --- a/lucid/modelzoo/other_models/__init__.py +++ b/lucid/modelzoo/other_models/__init__.py @@ -10,7 +10,7 @@ from lucid.modelzoo.vision_base import Model as _Model from lucid.modelzoo.other_models.AlexNet import AlexNet -from lucid.modelzoo.other_models.InceptionV1 import InceptionV1 +from lucid.modelzoo.other_models.InceptionV1 import InceptionV1, InceptionV1_adv_finetuned __all__ = [_name for _name, _obj in list(globals().items()) From bccb9f77b8d7d1ad5b4adaf7591af5730cb8d906 Mon Sep 17 00:00:00 2001 From: Jacob Hilton Date: Fri, 25 Sep 2020 15:58:05 -0700 Subject: [PATCH 25/57] rl_util attribution bug fix --- lucid/scratch/rl_util/attribution.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lucid/scratch/rl_util/attribution.py b/lucid/scratch/rl_util/attribution.py index 4b0b203d..6b300983 100644 --- a/lucid/scratch/rl_util/attribution.py +++ b/lucid/scratch/rl_util/attribution.py @@ -40,7 +40,7 @@ def get_grad_or_attr( *, act_dir=None, act_poses=None, - score_fn=tf.reduce_sum, + score_fn=lambda t: tf.reduce_sum(t, axis=-1), grad_or_attr, override=None, integrate_steps=1 @@ -62,7 +62,7 @@ def get_grad_or_attr( t_acts, tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1), ) - t_score = score_fn(t_acts) + t_score = tf.reduce_sum(score_fn(t_acts)) t_grad = tf.gradients(t_score, [t_acts_prev])[0] if integrate_steps > 1: acts_prev = t_acts_prev.eval() @@ -146,7 +146,7 @@ def get_multi_path_attr( *, act_dir=None, act_poses=None, - score_fn=tf.reduce_sum, + score_fn=lambda t: tf.reduce_sum(t, axis=-1), override=None, max_paths=50, integrate_steps=10 @@ -168,7 +168,7 @@ def get_multi_path_attr( t_acts, tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1), ) - t_score = score_fn(t_acts) + t_score = tf.reduce_sum(score_fn(t_acts)) t_grad = tf.gradients(t_score, [t_acts_prev])[0] acts_prev = t_acts_prev.eval() path_acts = get_paths( From 34ed55fcd5c6b0cbfd329075e34fd09dfd944bd1 Mon Sep 17 00:00:00 2001 From: Jacob Hilton Date: Fri, 25 Sep 2020 18:16:27 -0700 Subject: [PATCH 26/57] default_score_fn --- lucid/scratch/rl_util/attribution.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/lucid/scratch/rl_util/attribution.py b/lucid/scratch/rl_util/attribution.py index 6b300983..c88202ae 100644 --- a/lucid/scratch/rl_util/attribution.py +++ b/lucid/scratch/rl_util/attribution.py @@ -32,6 +32,10 @@ def get_acts(model, layer_name, obses): return t_acts.eval() +def default_score_fn(t): + return tf.reduce_sum(t, axis=list(range(len(t.shape)))[1:]) + + def get_grad_or_attr( model, layer_name, @@ -40,7 +44,7 @@ def get_grad_or_attr( *, act_dir=None, act_poses=None, - score_fn=lambda t: tf.reduce_sum(t, axis=-1), + score_fn=default_score_fn, grad_or_attr, override=None, integrate_steps=1 @@ -62,7 +66,9 @@ def get_grad_or_attr( t_acts, tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1), ) - t_score = tf.reduce_sum(score_fn(t_acts)) + t_scores = score_fn(t_acts) + assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim" + t_score = tf.reduce_sum(t_scores) t_grad = tf.gradients(t_score, [t_acts_prev])[0] if integrate_steps > 1: acts_prev = t_acts_prev.eval() @@ -146,7 +152,7 @@ def get_multi_path_attr( *, act_dir=None, act_poses=None, - score_fn=lambda t: tf.reduce_sum(t, axis=-1), + score_fn=default_score_fn, override=None, max_paths=50, integrate_steps=10 @@ -168,7 +174,9 @@ def get_multi_path_attr( t_acts, tf.concat([tf.range(obses.shape[0])[..., None], act_poses], axis=-1), ) - t_score = tf.reduce_sum(score_fn(t_acts)) + t_scores = score_fn(t_acts) + assert len(t_scores.shape) >= 1, "score_fn should not reduce the batch dim" + t_score = tf.reduce_sum(t_scores) t_grad = tf.gradients(t_score, [t_acts_prev])[0] acts_prev = t_acts_prev.eval() path_acts = get_paths( From 6fd0fb5881cf160d27a75355f2a19dc43677a871 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 16 Jul 2020 01:53:38 +0000 Subject: [PATCH 27/57] Bump lodash from 4.17.15 to 4.17.19 in /lucid/scratch/js Bumps [lodash](https://github.com/lodash/lodash) from 4.17.15 to 4.17.19. - [Release notes](https://github.com/lodash/lodash/releases) - [Commits](https://github.com/lodash/lodash/compare/4.17.15...4.17.19) Signed-off-by: dependabot[bot] --- lucid/scratch/js/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lucid/scratch/js/package-lock.json b/lucid/scratch/js/package-lock.json index 2164b445..6622bc99 100644 --- a/lucid/scratch/js/package-lock.json +++ b/lucid/scratch/js/package-lock.json @@ -697,9 +697,9 @@ } }, "lodash": { - "version": "4.17.15", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz", - "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==", + "version": "4.17.19", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", + "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==", "dev": true }, "lodash._objecttypes": { From 1de583542238be5451fbe38574d7070cda380af2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Sep 2020 21:58:08 +0000 Subject: [PATCH 28/57] Bump node-fetch from 2.1.1 to 2.6.1 in /lucid/scratch/js Bumps [node-fetch](https://github.com/bitinn/node-fetch) from 2.1.1 to 2.6.1. - [Release notes](https://github.com/bitinn/node-fetch/releases) - [Changelog](https://github.com/node-fetch/node-fetch/blob/master/docs/CHANGELOG.md) - [Commits](https://github.com/bitinn/node-fetch/compare/v2.1.1...v2.6.1) Signed-off-by: dependabot[bot] --- lucid/scratch/js/package-lock.json | 6 +++--- lucid/scratch/js/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) mode change 100755 => 100644 lucid/scratch/js/package.json diff --git a/lucid/scratch/js/package-lock.json b/lucid/scratch/js/package-lock.json index 6622bc99..90cec748 100644 --- a/lucid/scratch/js/package-lock.json +++ b/lucid/scratch/js/package-lock.json @@ -871,9 +871,9 @@ } }, "node-fetch": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.1.1.tgz", - "integrity": "sha1-NpynC4L1DIZJYQSmx3bSdPTkotQ=", + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", + "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==", "dev": true }, "normalize-path": { diff --git a/lucid/scratch/js/package.json b/lucid/scratch/js/package.json old mode 100755 new mode 100644 index 01760cdf..00bbddfe --- a/lucid/scratch/js/package.json +++ b/lucid/scratch/js/package.json @@ -16,7 +16,7 @@ "fetch-mock": "^6.1.0", "mocha": "^5.0.4", "ndarray": "^1.0.18", - "node-fetch": "^2.1.1", + "node-fetch": "^2.6.1", "numpy-parser": "^1.0.2", "rollup": "^0.56.4", "rollup-plugin-commonjs": "^8.4.0", From 130cb44d8d7f426b4eee6114f3b9e0c2917f939a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Sep 2020 16:10:09 +0000 Subject: [PATCH 29/57] Bump serve from 9.4.0 to 10.1.2 in /lucid/scratch/js Bumps [serve](https://github.com/zeit/serve) from 9.4.0 to 10.1.2. - [Release notes](https://github.com/zeit/serve/releases) - [Commits](https://github.com/zeit/serve/compare/9.4.0...10.1.2) Signed-off-by: dependabot[bot] --- lucid/scratch/js/package-lock.json | 248 +++++++++++++++++------------ lucid/scratch/js/package.json | 2 +- 2 files changed, 146 insertions(+), 104 deletions(-) diff --git a/lucid/scratch/js/package-lock.json b/lucid/scratch/js/package-lock.json index 90cec748..5df24055 100644 --- a/lucid/scratch/js/package-lock.json +++ b/lucid/scratch/js/package-lock.json @@ -5,11 +5,21 @@ "requires": true, "dependencies": { "@zeit/schemas": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-1.7.0.tgz", - "integrity": "sha512-Ma2HHFqwZZ5WOEMcd/8RJj70O9jy2esTvu9oaYLJSkenELKrv6vgkGeM5jB8xLRTYocpcnd2rCfpyKyhBqVphQ==", + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-2.6.0.tgz", + "integrity": "sha512-uUrgZ8AxS+Lio0fZKAipJjAh415JyrOZowliZAzmnJSsf7piVL5w+G0+gFJ0KSu3QRhvui/7zuvpLz03YjXAhg==", "dev": true }, + "accepts": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", + "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", + "dev": true, + "requires": { + "mime-types": "~2.1.24", + "negotiator": "0.6.2" + } + }, "acorn": { "version": "5.5.3", "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.5.3.tgz", @@ -17,15 +27,15 @@ "dev": true }, "ajv": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.5.2.tgz", - "integrity": "sha512-hOs7GfvI6tUI1LfZddH82ky6mOMyTuY0mk7kE2pWpmhhUSkumzaTO5vbVwij39MdwPQWCV4Zv57Eo06NtL/GVA==", + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.5.3.tgz", + "integrity": "sha512-LqZ9wY+fx3UMiiPd741yB2pj3hhil+hQc8taf4o2QGRFpWgZ2V5C8HA165DY9sS3fJwsk7uT7ZlFEyC3Ig3lLg==", "dev": true, "requires": { "fast-deep-equal": "^2.0.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.1" + "uri-js": "^4.2.2" } }, "ansi-align": { @@ -53,9 +63,9 @@ } }, "arch": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.1.1.tgz", - "integrity": "sha512-BLM56aPo9vLLFVa8+/+pJLnrZ7QGGTVHWsCwieAWT9o9K8UeGaQbzZbGoabWLOo2ksBCztoXdqBZBplqLDDCSg==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/arch/-/arch-2.1.2.tgz", + "integrity": "sha512-NTBIIbAfkJeIletyABbVtdPgeKfDafR+1mZV/AyyfC1UkVkp9iUjV+wwmqtUgphHYajbI86jejBJp5e+jkGTiQ==", "dev": true }, "arg": { @@ -251,18 +261,18 @@ } }, "color-convert": { - "version": "1.9.2", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.2.tgz", - "integrity": "sha512-3NUJZdhMhcdPn8vJ9v2UQJoH0qqoGUkYTgFEPZaPjEtwmmKUfNV46zZmgB2M5M4DCEQHMaCfWHCxiBflLm04Tg==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dev": true, "requires": { - "color-name": "1.1.1" + "color-name": "1.1.3" } }, "color-name": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.1.tgz", - "integrity": "sha1-SxQVMEz1ACjqgWQ2Q72C6gWANok=", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", "dev": true }, "commander": { @@ -271,6 +281,30 @@ "integrity": "sha512-7B1ilBwtYSbetCgTY1NJFg+gVpestg0fdA1MhC1Vs4ssyfSXnCAjFr+QcQM9/RedXC0EaUx1sG8Smgw2VfgKEg==", "dev": true }, + "compressible": { + "version": "2.0.18", + "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", + "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", + "dev": true, + "requires": { + "mime-db": ">= 1.43.0 < 2" + } + }, + "compression": { + "version": "1.7.3", + "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.3.tgz", + "integrity": "sha512-HSjyBG5N1Nnz7tF2+O7A9XUhyjru71/fwgNb7oIsEVHR0WShfs2tIS/EySLgiTe98aOK18YDlMXpzjCXY/n9mg==", + "dev": true, + "requires": { + "accepts": "~1.3.5", + "bytes": "3.0.0", + "compressible": "~2.0.14", + "debug": "2.6.9", + "on-headers": "~1.0.1", + "safe-buffer": "5.1.2", + "vary": "~1.1.2" + } + }, "concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", @@ -305,6 +339,15 @@ "rw": "1" } }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, "deep-eql": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", @@ -387,9 +430,9 @@ "dev": true }, "fast-json-stable-stringify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", - "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", "dev": true }, "fast-url-parser": { @@ -504,23 +547,6 @@ "is-glob": "^2.0.0" } }, - "glob-slash": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/glob-slash/-/glob-slash-1.0.0.tgz", - "integrity": "sha1-/lLvpDMjP3Si/mTHq7m8hIICq5U=", - "dev": true - }, - "glob-slasher": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/glob-slasher/-/glob-slasher-1.0.1.tgz", - "integrity": "sha1-dHoOW7IiZC7hDT4FRD4QlJPLD44=", - "dev": true, - "requires": { - "glob-slash": "^1.0.0", - "lodash.isobject": "^2.4.1", - "toxic": "^1.0.0" - } - }, "glob-to-regexp": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", @@ -696,31 +722,10 @@ "is-buffer": "^1.1.5" } }, - "lodash": { - "version": "4.17.19", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.19.tgz", - "integrity": "sha512-JNvd8XER9GQX0v2qJgsaN/mzFCNA5BRe/j8JN9d+tWyGLSodKQHKFicdwNYzWwI3wjRnaKPsGj1XkBjx/F96DQ==", - "dev": true - }, - "lodash._objecttypes": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/lodash._objecttypes/-/lodash._objecttypes-2.4.1.tgz", - "integrity": "sha1-fAt/admKH3ZSn4kLDNsbTf7BHBE=", - "dev": true - }, - "lodash.isobject": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/lodash.isobject/-/lodash.isobject-2.4.1.tgz", - "integrity": "sha1-Wi5H/mmVPx7mMafrof5k0tBlWPU=", - "dev": true, - "requires": { - "lodash._objecttypes": "~2.4.1" - } - }, "lru-cache": { - "version": "4.1.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.3.tgz", - "integrity": "sha512-fFEhvcgzuIoJVUF8fYr5KR0YqxD238zgObTps31YdADwPPAp82a4M8TrckkWyx7ekNlf9aBcVn81cFwwXngrJA==", + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", + "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", "dev": true, "requires": { "pseudomap": "^1.0.2", @@ -764,18 +769,18 @@ } }, "mime-db": { - "version": "1.33.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "version": "1.44.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", + "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==", "dev": true }, "mime-types": { - "version": "2.1.18", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "version": "2.1.27", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", + "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", "dev": true, "requires": { - "mime-db": "~1.33.0" + "mime-db": "1.44.0" } }, "minimatch": { @@ -788,9 +793,9 @@ } }, "minimist": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", "dev": true }, "mkdirp": { @@ -860,6 +865,12 @@ } } }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, "ndarray": { "version": "1.0.18", "resolved": "https://registry.npmjs.org/ndarray/-/ndarray-1.0.18.tgz", @@ -870,6 +881,12 @@ "is-buffer": "^1.0.2" } }, + "negotiator": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", + "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", + "dev": true + }, "node-fetch": { "version": "2.6.1", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", @@ -910,6 +927,12 @@ "is-extendable": "^0.1.1" } }, + "on-headers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", + "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "dev": true + }, "once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -1016,6 +1039,12 @@ } } }, + "range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=", + "dev": true + }, "rc": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", @@ -1161,37 +1190,53 @@ "dev": true }, "serve": { - "version": "9.4.0", - "resolved": "https://registry.npmjs.org/serve/-/serve-9.4.0.tgz", - "integrity": "sha512-a5TpnFytY2r59g0M3L9g2HvlLBcTHeevR8gTnDkzMWECfV2c8tUCEGC9tl3YYWM7xucdkUmov+xyKjWamQQJ7Q==", + "version": "10.1.2", + "resolved": "https://registry.npmjs.org/serve/-/serve-10.1.2.tgz", + "integrity": "sha512-TVH35uwndRlCqSeX3grR3Ntrjx2aBTeu6sx+zTD2CzN2N/rHuEDTvxiBwWbrellJNyWiQFz2xZmoW+UxV+Zahg==", "dev": true, "requires": { - "@zeit/schemas": "1.7.0", - "ajv": "6.5.2", + "@zeit/schemas": "2.6.0", + "ajv": "6.5.3", "arg": "2.0.0", "boxen": "1.3.0", "chalk": "2.4.1", "clipboardy": "1.2.3", - "serve-handler": "3.6.0", + "compression": "1.7.3", + "serve-handler": "5.0.8", "update-check": "1.5.2" } }, "serve-handler": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-3.6.0.tgz", - "integrity": "sha512-YPMV1OCfOxub4OnGQQtcGEJNI6e49r0vfSid2U5xrcOB1l6TFWfvHmUhEbfrvU7sqhZgmicfVtVBiAAGRH7NTA==", + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-5.0.8.tgz", + "integrity": "sha512-pqk0SChbBLLHfMIxQ55czjdiW7tj2cFy53svvP8e5VqEN/uB/QpfiTJ8k1uIYeFTDVoi+FGi5aqXScuu88bymg==", "dev": true, "requires": { "bytes": "3.0.0", "content-disposition": "0.5.2", "fast-url-parser": "1.1.3", - "glob-slasher": "1.0.1", "mime-types": "2.1.18", "minimatch": "3.0.4", "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1" + "path-to-regexp": "2.2.1", + "range-parser": "1.2.0" }, "dependencies": { + "mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "dev": true + }, + "mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dev": true, + "requires": { + "mime-db": "~1.33.0" + } + }, "path-to-regexp": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", @@ -1216,9 +1261,9 @@ "dev": true }, "signal-exit": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", - "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", "dev": true }, "sourcemap-codec": { @@ -1270,9 +1315,9 @@ "dev": true }, "supports-color": { - "version": "5.4.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz", - "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==", + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", "dev": true, "requires": { "has-flag": "^3.0.0" @@ -1301,15 +1346,6 @@ "execa": "^0.7.0" } }, - "toxic": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toxic/-/toxic-1.0.1.tgz", - "integrity": "sha512-WI3rIGdcaKULYg7KVoB0zcjikqvcYYvcuT6D89bFPz2rVR0Rl0PK6x8/X62rtdLtBKIE985NzVf/auTtGegIIg==", - "dev": true, - "requires": { - "lodash": "^4.17.10" - } - }, "type-detect": { "version": "4.0.8", "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", @@ -1327,14 +1363,20 @@ } }, "uri-js": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz", - "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==", + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.0.tgz", + "integrity": "sha512-B0yRTzYdUCCn9n+F4+Gh4yIDtMQcaJsmYBDsTSG8g/OejKBodLQ2IHfN3bM7jUsRXndopT7OIXWdYqc1fjmV6g==", "dev": true, "requires": { "punycode": "^2.1.0" } }, + "vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", + "dev": true + }, "vlq": { "version": "0.2.3", "resolved": "https://registry.npmjs.org/vlq/-/vlq-0.2.3.tgz", @@ -1351,9 +1393,9 @@ } }, "widest-line": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.0.tgz", - "integrity": "sha1-AUKk6KJD+IgsAjOqDgKBqnYVInM=", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz", + "integrity": "sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==", "dev": true, "requires": { "string-width": "^2.1.1" diff --git a/lucid/scratch/js/package.json b/lucid/scratch/js/package.json index 00bbddfe..949c068f 100644 --- a/lucid/scratch/js/package.json +++ b/lucid/scratch/js/package.json @@ -22,7 +22,7 @@ "rollup-plugin-commonjs": "^8.4.0", "rollup-plugin-node-resolve": "^3.0.3", "rollup-plugin-svelte": "^4.0.0", - "serve": "^9.4.0", + "serve": "^10.1.2", "svelte": "^1.56.1" }, "scripts": { From 6f06ff1ef2e1313c3d9e0ddbed36af589e49697e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Sep 2020 16:13:53 +0000 Subject: [PATCH 30/57] Bump acorn from 5.5.3 to 5.7.4 in /lucid/scratch/js Bumps [acorn](https://github.com/acornjs/acorn) from 5.5.3 to 5.7.4. - [Release notes](https://github.com/acornjs/acorn/releases) - [Commits](https://github.com/acornjs/acorn/compare/5.5.3...5.7.4) Signed-off-by: dependabot[bot] --- lucid/scratch/js/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lucid/scratch/js/package-lock.json b/lucid/scratch/js/package-lock.json index 5df24055..92acb0f7 100644 --- a/lucid/scratch/js/package-lock.json +++ b/lucid/scratch/js/package-lock.json @@ -21,9 +21,9 @@ } }, "acorn": { - "version": "5.5.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.5.3.tgz", - "integrity": "sha512-jd5MkIUlbbmb07nXH0DT3y7rDVtkzDi4XZOUVWAer8ajmF/DTSSbl5oNFyDOl/OXA33Bl79+ypHhl2pN20VeOQ==", + "version": "5.7.4", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.4.tgz", + "integrity": "sha512-1D++VG7BhrtvQpNbBzovKNc1FLGGEE/oGe7b9xJm/RFHMBeUaUGpluV9RLjZa47YFdPcDAenEYuq9pQPcMdLJg==", "dev": true }, "ajv": { From ab2ed714634fd23896f1db3d8e8d3a39ae0060a6 Mon Sep 17 00:00:00 2001 From: Jacob Hilton Date: Wed, 11 Nov 2020 16:47:22 -0800 Subject: [PATCH 31/57] rl_util notebook --- notebooks/misc/rl_util.ipynb | 1 + 1 file changed, 1 insertion(+) create mode 100644 notebooks/misc/rl_util.ipynb diff --git a/notebooks/misc/rl_util.ipynb b/notebooks/misc/rl_util.ipynb new file mode 100644 index 00000000..a7c75ed6 --- /dev/null +++ b/notebooks/misc/rl_util.ipynb @@ -0,0 +1 @@ +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"rl_clarity_util.ipynb","provenance":[],"collapsed_sections":["CsQnKgi0pqXR"],"authorship_tag":"ABX9TyNtc6peJc18ZpquQhmSX7eZ"},"kernelspec":{"name":"python3","display_name":"Python 3"}},"cells":[{"cell_type":"markdown","metadata":{"id":"VAn2uCs-CoEX"},"source":["This notebook explains how to use the utilities provided by `lucid.scratch.rl_util` with the models from the paper [Understanding RL vision](https://distill.pub/2020/understanding-rl-vision)."]},{"cell_type":"markdown","metadata":{"id":"CsQnKgi0pqXR"},"source":["# Setup"]},{"cell_type":"code","metadata":{"id":"3wKHkh1vWnwa","executionInfo":{"status":"ok","timestamp":1605141734690,"user_tz":480,"elapsed":623,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"8e1baa59-85f6-4d91-f49e-04501183c75e","colab":{"base_uri":"https://localhost:8080/"}},"source":["# use tensorflow 1.x\n","%tensorflow_version 1.x"],"execution_count":1,"outputs":[{"output_type":"stream","text":["TensorFlow 1.x selected.\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/requests/__init__.py:91: RequestsDependencyWarning: urllib3 (1.26.0) or chardet (3.0.4) doesn't match a supported version!\n"," RequestsDependencyWarning)\n"],"name":"stderr"}]},{"cell_type":"code","metadata":{"id":"6QqbnIpxYMoT","executionInfo":{"status":"ok","timestamp":1605141747515,"user_tz":480,"elapsed":13441,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"09c392e2-9916-4678-96bb-af7071a8c549","colab":{"base_uri":"https://localhost:8080/"}},"source":["# install specific version of lucid\n","!pip install git+git://github.com/tensorflow/lucid.git@e421330f458093c2743d4c8aa49c9207eba6296d"],"execution_count":2,"outputs":[{"output_type":"stream","text":["Collecting git+git://github.com/tensorflow/lucid.git@e421330f458093c2743d4c8aa49c9207eba6296d\n"," Cloning git://github.com/tensorflow/lucid.git (to revision e421330f458093c2743d4c8aa49c9207eba6296d) to /tmp/pip-req-build-ik6v308v\n"," Running command git clone -q git://github.com/tensorflow/lucid.git /tmp/pip-req-build-ik6v308v\n"," Running command git checkout -q e421330f458093c2743d4c8aa49c9207eba6296d\n","Requirement already satisfied (use --upgrade to upgrade): lucid==0.3.9 from git+git://github.com/tensorflow/lucid.git@e421330f458093c2743d4c8aa49c9207eba6296d in /usr/local/lib/python3.6/dist-packages\n","Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (1.18.5)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (1.4.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.22.2.post1)\n","Requirement already satisfied: umap-learn in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.4.6)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.2.5)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (5.5.0)\n","Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (7.0.0)\n","Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.16.0)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (4.4.2)\n","Requirement already satisfied: pyopengl in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.1.5)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.0.12)\n","Requirement already satisfied: cachetools in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (4.1.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (8.6.0)\n","Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->lucid==0.3.9) (0.17.0)\n","Requirement already satisfied: numba!=0.47,>=0.46 in /usr/local/lib/python3.6/dist-packages (from umap-learn->lucid==0.3.9) (0.48.0)\n","Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from nltk->lucid==0.3.9) (1.15.0)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (50.3.2)\n","Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (4.3.3)\n","Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (0.8.1)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (0.7.5)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (2.6.1)\n","Requirement already satisfied: pexpect; sys_platform != \"win32\" in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (4.8.0)\n","Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (1.0.18)\n","Requirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba!=0.47,>=0.46->umap-learn->lucid==0.3.9) (0.31.0)\n","Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython->lucid==0.3.9) (0.2.0)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != \"win32\"->ipython->lucid==0.3.9) (0.6.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->lucid==0.3.9) (0.2.5)\n","Building wheels for collected packages: lucid\n"," Building wheel for lucid (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for lucid: filename=lucid-0.3.9-cp36-none-any.whl size=159169 sha256=fbb4f6cacac5513cefc246d628c46e244135b23683d4383e0618b55c18b1761f\n"," Stored in directory: /tmp/pip-ephem-wheel-cache-uire1cc4/wheels/3b/fc/32/337cd3f096d019fa6ae954f4268a37cba589f53b3f798f7e1e\n","Successfully built lucid\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"IPNYCCROaNt_","executionInfo":{"status":"ok","timestamp":1605141747516,"user_tz":480,"elapsed":13437,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["def longest_common_prefix(l):\n"," l = set([s[: min(map(len, l))] for s in l])\n"," while len(l) > 1:\n"," l = set([s[:-1] for s in l])\n"," return list(l)[0]\n","\n","\n","def longest_common_suffix(l):\n"," l = set([s[-min(map(len, l)) :] for s in l])\n"," while len(l) > 1:\n"," l = set([s[1:] for s in l])\n"," return list(l)[0]\n","\n","\n","# small utility for abbreviating a list of names\n","def get_abbreviator(names):\n"," if len(names) <= 1:\n"," return slice(None, None)\n"," prefix = longest_common_prefix(names)\n"," prefix = prefix.rsplit(\"/\", 1)[0] + \"/\" if \"/\" in prefix else \"\"\n"," suffix = longest_common_suffix(names)\n"," suffix = \"/\" + suffix.split(\"/\", 1)[-1] if \"/\" in suffix else \"\"\n"," return slice(len(prefix), None if len(suffix) == 0 else -len(suffix))"],"execution_count":3,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"8v3WIvyWpxeX"},"source":["# Import utilities and load data"]},{"cell_type":"markdown","metadata":{"id":"5SECNXLL_OVd"},"source":["First let's import the utilities.\n"]},{"cell_type":"code","metadata":{"id":"m6VLpL0CZG3n","executionInfo":{"status":"ok","timestamp":1605141750051,"user_tz":480,"elapsed":15967,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"39ec1ded-d76d-469f-e418-0264dbf832fc","colab":{"base_uri":"https://localhost:8080/"}},"source":["import numpy as np\n","import tensorflow as tf\n","from collections import OrderedDict\n","from lucid.modelzoo.vision_base import Model\n","from lucid.scratch.rl_util import *\n","# get_abbreviator defined during setup\n","\n","# hide tensorflow deprecation warnings\n","tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)"],"execution_count":4,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.decomposition.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.decomposition. Anything that cannot be imported from sklearn.decomposition is now part of the private API.\n"," warnings.warn(message, FutureWarning)\n"],"name":"stderr"}]},{"cell_type":"markdown","metadata":{"id":"E79AeCK0_F_B"},"source":["Here's a list of all the utilities we imported from `lucid.scratch.rl_util`."]},{"cell_type":"code","metadata":{"id":"7xiqVCRo2PeD","executionInfo":{"status":"ok","timestamp":1605141750052,"user_tz":480,"elapsed":15962,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"5d637ba0-2f00-449e-c670-a4358c4f19bd","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(\", \".join(all_()))"],"execution_count":5,"outputs":[{"output_type":"stream","text":["np, tf, Model, ChannelReducer, param, objectives, render, transform, show, save, _image_url, _display_html, lucid_svelte, load_joblib, save_joblib, zoom_to, get_var, get_shape, concatenate_horizontally, hue_to_rgb, channels_to_rgb, conv2d, norm_filter, brightness_to_opacity, gradient_override_map, maxpool_override, get_acts, get_grad_or_attr, get_attr, get_grad, get_paths, get_multi_path_attr, argmax_nd, LayerNMF, rescale_opacity, all_, reload\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"wQ58sOF-1-7Y"},"source":["Now let's load the data for the model we want to analyze. The available models are indexed [here](https://openaipublic.blob.core.windows.net/rl-clarity/attribution/models/index.html). In this example, we use the original CoinRun model, whose `/` is `coinrun`."]},{"cell_type":"code","metadata":{"id":"JbpTKYzqZalW","executionInfo":{"status":"ok","timestamp":1605141752384,"user_tz":480,"elapsed":18288,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["def load_data(relpath_name):\n"," basepath = \"https://openaipublic.blob.core.windows.net/rl-clarity/attribution/models/\"\n"," if \"/\" in relpath_name:\n"," relpath, name = relpath_name.rsplit(\"/\")\n"," relpath += \"/\"\n"," else:\n"," relpath = \"\"\n"," name = relpath_name\n"," dirpath = f\"{basepath}{relpath}rl-clarity/\"\n","\n"," result = {}\n"," result[\"model\"] = Model.load(f\"{dirpath}{name}.model.pb\")\n"," result.update(load_joblib(f\"{dirpath}{name}.metadata.jd\"))\n"," result.update(load_joblib(f\"{dirpath}{name}.observations.jd\"))\n"," result[\"trajectories\"] = load_joblib(f\"{dirpath}{name}.trajectories.jd\")\n","\n"," result[\"observations\"] = result[\"observations\"] / np.float(255)\n"," result[\"trajectories\"][\"observations\"] = result[\"trajectories\"][\"observations\"] / np.float(255)\n","\n"," layer_names = [\n"," node.name\n"," for node in result[\"model\"].graph_def.node\n"," if len(get_shape(result[\"model\"], node.name)) >= 4 and node.op.lower() == \"relu\"\n"," ]\n"," abbreviator = get_abbreviator(layer_names)\n"," result[\"layer_names\"] = OrderedDict(\n"," [(name[abbreviator], name) for name in layer_names]\n"," )\n","\n"," return result\n","\n","\n","data = load_data(\"coinrun\")"],"execution_count":6,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"l0eiu5S5_ewX"},"source":["Let's add all of the data we've loaded as local variables, printing their names."]},{"cell_type":"code","metadata":{"id":"p3laYhT36OB2","executionInfo":{"status":"ok","timestamp":1605141752387,"user_tz":480,"elapsed":18287,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"a69a8b73-d718-4348-c7a4-0ceb2be1fab3","colab":{"base_uri":"https://localhost:8080/"}},"source":["locals().update(data)\n","print(\", \".join(data.keys()))"],"execution_count":7,"outputs":[{"output_type":"stream","text":["model, policy_logits_name, value_function_name, env_name, gae_gamma, gae_lambda, action_combos, observations, trajectories, layer_names\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"YVUEqhE77W6H"},"source":["Let's explain each of these variables:\n","\n","- `model: lucid.modelzoo.vision_base.Model` – Lucid model\n","- `policy_logits_name: str` – name of the policy head tensor in the model\n","- `value_function_name: str` – name of the value head tensor in the model\n","- `env_name: str` – name of the environment\n","- `gae_gamma: float` – generalized advantage estimation hyperparameter $\\gamma$ used to train the model\n","- `gae_lambda: float` – generalized advantage estimation hyperparameter $\\lambda$ used to train the model\n","- `action_combos: List[Tuple[str]]` – which combination of keys each integer action corresponds to\n","- `observations: np.ndarray` – batch × height × width × channels float array of observations sampled infrequently from the agent playing the game, intended to be used for NMF\n","- `trajectories: Dict[str, np.ndarray]` – dictionary from contiguous trajectories of the agent playing the game with the following keys:\n"," - `observations` – trajectories × timesteps × height × width × channels float array of observations\n"," - `actions` – trajectories × timesteps array of integer actions\n"," - `rewards` – trajectories × timesteps array of float rewards\n"," - `firsts` – trajectories × timesteps array of booleans specifying whether the timestep was the first in the episode\n","- `layer_names: OrderedDict[str, str]` – mapping from the abbreviated names to the full names of the activation tensors of the convolutional layers in the model"]},{"cell_type":"markdown","metadata":{"id":"Kp3MjlPfOnRp"},"source":["Let's print the abbreviated layer names that were found."]},{"cell_type":"code","metadata":{"id":"EcSBLmhAOjgA","executionInfo":{"status":"ok","timestamp":1605141752388,"user_tz":480,"elapsed":18282,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"8973fd2d-e903-434b-cb36-4350bc30d38f","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(\", \".join(layer_names.keys()))"],"execution_count":8,"outputs":[{"output_type":"stream","text":["1a, 2a, 2b, 3a, 4a\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"3PmbPfycBC2G"},"source":["The `zoom_to` and `show` utilities are useful for displaying observations and visualizations."]},{"cell_type":"code","metadata":{"id":"1YItp5JEBBid","executionInfo":{"status":"ok","timestamp":1605141752388,"user_tz":480,"elapsed":18276,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"2717a99d-74b4-4edf-f49c-fd47f383c2b8","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["show(zoom_to(trajectories[\"observations\"][0, :8], 200))"],"execution_count":9,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"YZgIsgsqIyK6"},"source":["# Dimensionality reduction and feature visualization"]},{"cell_type":"markdown","metadata":{"id":"w1s8GiR9JUkJ"},"source":["The `LayerNMF` utility can be used to apply NMF dimensionality reduction to obtain directions in activation space, and then to use feature visualization (either gradient-based or dataset example-based) to visualize those directions.\n","\n","Passing `attr_layer_name=value_function_name` causes NMF to be applied to value function attributions. If we did not pass this, NMF would instead be applied to activations.\n","\n","We use the infrequently-sampled `observations` rather than observations from `trajectories` in order to cover a broader distribution of observations."]},{"cell_type":"code","metadata":{"id":"rBDvl92SJTPu","executionInfo":{"status":"ok","timestamp":1605141861085,"user_tz":480,"elapsed":126967,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["# can take a couple of minutes\n","# for the paper, we use observations[:], but this requires more memory\n","nmf = LayerNMF(model, layer_names['2b'], observations[:1024], features=8, attr_layer_name=value_function_name)"],"execution_count":10,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"H5Z0pPQaKaHt"},"source":["The directions in activation space obtained are given by the `channel_dirs` property."]},{"cell_type":"code","metadata":{"id":"7ohXmx0SRJWl","executionInfo":{"status":"ok","timestamp":1605141861086,"user_tz":480,"elapsed":126964,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"5110563c-bbad-4ccc-ba7c-40c4d4427399","colab":{"base_uri":"https://localhost:8080/"}},"source":["nmf.channel_dirs.shape"],"execution_count":11,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32)"]},"metadata":{"tags":[]},"execution_count":11}]},{"cell_type":"markdown","metadata":{"id":"dOUScXpSSOjP"},"source":["We can now visualize these directions in activation space in various ways.\n","\n","We can apply gradient-based feature visualization using the `vis_traditional` method."]},{"cell_type":"code","metadata":{"id":"KlmV_3IUKUwc","executionInfo":{"status":"ok","timestamp":1605141918169,"user_tz":480,"elapsed":184039,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"a502f31e-73f2-4b67-a935-c4c43c7a1f71","colab":{"base_uri":"https://localhost:8080/","height":325}},"source":["show(zoom_to(nmf.vis_traditional(), 200))"],"execution_count":12,"outputs":[{"output_type":"stream","text":["512 339.13757\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"nOCMBvfFSsnF"},"source":["We can apply dataset example-based feature visualization using the `vis_dataset_thumbnail` method. `num_mult` gives the height and width of the grid of patches, and `expand_mult` is a multiplier on the size of each patch.\n","\n","The strength of the activation for the image from which the patch was taken is given in the alpha (opacity) channel of the visualization (scaled to be at most 1)."]},{"cell_type":"code","metadata":{"id":"JNy_Qi7RSlg8","executionInfo":{"status":"ok","timestamp":1605141918864,"user_tz":480,"elapsed":184728,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"1e56fe6c-6fa7-44e1-97b1-3b5ca7201bfd","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["show([zoom_to(nmf.vis_dataset_thumbnail(i, num_mult=4, expand_mult=4)[0], 200) for i in range(nmf.features)])"],"execution_count":13,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"tUflo4qQT_Jv"},"source":["We can apply spatially-aware dataset example-based feature visualization using the `vis_dataset` method. `subdiv_mult` gives the height and width of the grid of patches per activation, and `expand_mult` is again a multiplier on the size of each patch.\n","\n","Activation strength is again given by opacity, so most of the top and left of the image is transparent since coins do not usually appear in those parts of observations."]},{"cell_type":"code","metadata":{"id":"GzBXlqXnSpMt","executionInfo":{"status":"ok","timestamp":1605141919157,"user_tz":480,"elapsed":185014,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"12e85c78-9aac-4c6a-e722-eeb3a6d0902a","colab":{"base_uri":"https://localhost:8080/","height":820}},"source":["show(zoom_to(nmf.vis_dataset(1, subdiv_mult=1, expand_mult=4)[0], 800))"],"execution_count":14,"outputs":[{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"lMMp7EOYYiCF"},"source":["# Attribution"]},{"cell_type":"markdown","metadata":{"id":"8dqOqp7AYrOL"},"source":["The `get_acts` utility can be used to get activations. We can use this to get the model's value function for some observations."]},{"cell_type":"code","metadata":{"id":"1-NxHuTtTjTi","executionInfo":{"status":"ok","timestamp":1605141919382,"user_tz":480,"elapsed":185232,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"bab1fe7c-46a2-4757-fe8f-298b8dc358d4","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["value_functions = get_acts(model, value_function_name, observations[:8])\n","show(zoom_to(observations[:8], 200), labels=[f\"{v:.3f}\" for v in value_functions])"],"execution_count":15,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 9.984
\n"," \n","
\n"," 9.622
\n"," \n","
\n"," 9.278
\n"," \n","
\n"," 9.500
\n"," \n","
\n"," 9.945
\n"," \n","
\n"," 9.668
\n"," \n","
\n"," 9.891
\n"," \n","
\n"," 9.975
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"6VvVyF5Ic8QZ"},"source":["The `get_attr` utility can be used to get attributions using the integrated gradients method. The number of steps used for the numerical integration is specified by `integrate_steps`. Here we apply this to some observations from a trajectory.\n","\n","We pass `value_function_name` in order to get value function attributions. If we passed the name of a tensor with more than one element (such as `policy_logits_name`), then we could use `score_fn` to specify how to reduce that tensor to a single element (note that `score_fn` should not reduce the batch dimension)."]},{"cell_type":"code","metadata":{"id":"UwGDu-4kcTG3","executionInfo":{"status":"ok","timestamp":1605141919557,"user_tz":480,"elapsed":185401,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"291e5d99-f5f7-43dd-914d-4446656a1d59","colab":{"base_uri":"https://localhost:8080/"}},"source":["traj = trajectories['observations'][0][76:84]\n","attr = get_attr(model, value_function_name, layer_names['2b'], traj, integrate_steps=10)\n","attr.shape"],"execution_count":16,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32, 32, 32)"]},"metadata":{"tags":[]},"execution_count":16}]},{"cell_type":"markdown","metadata":{"id":"pOfsF2_EdwcF"},"source":["We can apply dimensionality reduction to these attributions using the `LayerNMF` object we generated earlier."]},{"cell_type":"code","metadata":{"id":"kjfVBYXxYpxf","executionInfo":{"status":"ok","timestamp":1605141919891,"user_tz":480,"elapsed":185729,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"23889619-c9ac-46b9-8e1f-c9bf5558cc2a","colab":{"base_uri":"https://localhost:8080/"}},"source":["attr_reduced = nmf.transform(np.maximum(attr, 0)) - nmf.transform(np.maximum(-attr, 0)) # transform the positive and negative parts separately\n","nmf_norms = nmf.channel_dirs.sum(-1)\n","attr_reduced *= nmf_norms[None, None, None] # multiply by the norms of the NMF directions, since the magnitudes of the NMF directions are not relevant\n","attr_reduced /= np.median(attr_reduced.max(axis=(-3, -2, -1))) # globally normalize by the median max value to make the visualization balanced (a bit of a hack)\n","attr_reduced.shape"],"execution_count":17,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32, 32, 8)"]},"metadata":{"tags":[]},"execution_count":17}]},{"cell_type":"markdown","metadata":{"id":"0OO_eLvJiRhm"},"source":["Here are the observations along with the positive and negative parts of the attributions, which we visualize by assigning a different color to each of the post-NMF channels."]},{"cell_type":"code","metadata":{"id":"NmErsBUki-am","executionInfo":{"status":"ok","timestamp":1605141920257,"user_tz":480,"elapsed":186089,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"d7bea5dd-a74a-4aeb-f9ad-1549825ba8e2","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["attr_pos = np.maximum(attr_reduced, 0)\n","attr_neg = np.maximum(-attr_reduced, 0)\n","print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(attr_pos, 200))\n","print(\"negative attribution\")\n","show(zoom_to(attr_neg, 200))"],"execution_count":18,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"FbwL6c93jDMf"},"source":["We can use the `conv2d` and `norm_filter` utilities to smooth out attribution over nearby spatial positions, so that the amount of visual space taken up can be used to judge attribution strength."]},{"cell_type":"code","metadata":{"id":"mrlz1RlljCJ5","executionInfo":{"status":"ok","timestamp":1605141920840,"user_tz":480,"elapsed":186666,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"66f870cc-7777-4df1-b676-f300ca787771","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(conv2d(attr_pos, filter_=norm_filter(15)), 200))\n","print(\"negative attribution\")\n","show(zoom_to(conv2d(attr_neg, filter_=norm_filter(15)), 200))"],"execution_count":19,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"RixyZCp1jwrr"},"source":["# Model editing"]},{"cell_type":"markdown","metadata":{"id":"0A8acH07sqpu"},"source":["To edit the model in-place, we can use Lucid's `ParameterEditor`."]},{"cell_type":"code","metadata":{"id":"SvL0GfJvsoGe","executionInfo":{"status":"ok","timestamp":1605141920841,"user_tz":480,"elapsed":186660,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["from copy import deepcopy\n","from lucid.scratch.parameter_editor import ParameterEditor\n","\n","edited_model = deepcopy(model)\n","editor = ParameterEditor(edited_model.graph_def)"],"execution_count":20,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"NF9s-yV_tqQC"},"source":["To make the model blind to saw obstacles, we can use the first NMF direction. We edit the convolutional kernel of the next layer to make it project out the NMF direction from activations before applying the original kernel."]},{"cell_type":"code","metadata":{"id":"OeIh47L7tB2g","executionInfo":{"status":"ok","timestamp":1605141920842,"user_tz":480,"elapsed":186658,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["kernel_name = layer_names[\"3a\"].replace(\"Relu\", \"conv2d/kernel\") # name of tensor of convolutional kernel of next layer\n","kernel = editor[kernel_name]\n","saw_dir = nmf.channel_dirs[0][None, None, :, None] # first NMF direction, corresponding to saw obstacle\n","saw_dir /= np.linalg.norm(saw_dir)\n","# the kernel is left-multiplied by the activations from the previous layer, so we left-multiply the kernel by the projection matrix\n","kernel = kernel - saw_dir * (saw_dir * kernel).sum(axis=-2, keepdims=True) # equivalently: kernel - saw_dir @ saw_dir.transpose((0, 1, 3, 2)) @ kernel\n","editor[kernel_name] = kernel\n","# note: this is not quite the same as the edit made for the paper, since we only used 1024 observations for the NMF calculation here"],"execution_count":21,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"0Yp8AMJBv9hz"},"source":["We can use the `get_var` utility to verify that the kernel has been updated."]},{"cell_type":"code","metadata":{"id":"O7jxxgmvgynF","executionInfo":{"status":"ok","timestamp":1605141920844,"user_tz":480,"elapsed":186656,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"165d44de-2231-4fca-a7b4-1940b2cb289d","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(np.linalg.norm(get_var(model, kernel_name)))\n","print(np.linalg.norm(get_var(edited_model, kernel_name)))"],"execution_count":22,"outputs":[{"output_type":"stream","text":["31.603941\n","30.58285\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"8Szlc5AtvNI_"},"source":["If we now repeat the attribution visualization from above with the edited model, we see that the red saw obstacle channel has disappeared, while the yellow coin channel remains present."]},{"cell_type":"code","metadata":{"id":"-j8AP7ZQouG_","executionInfo":{"status":"ok","timestamp":1605141922061,"user_tz":480,"elapsed":187866,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"1b9d2701-0448-4176-a886-beb24cb0dbf8","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["traj = trajectories['observations'][0][76:84]\n","attr = get_attr(edited_model, value_function_name, layer_names['2b'], traj, integrate_steps=10)\n","attr_reduced = nmf.transform(np.maximum(attr, 0)) - nmf.transform(np.maximum(-attr, 0))\n","nmf_norms = nmf.channel_dirs.sum(-1)\n","attr_reduced *= nmf_norms[None, None, None]\n","attr_reduced /= np.median(attr_reduced.max(axis=(-3, -2, -1)))\n","attr_pos = np.maximum(attr_reduced, 0)\n","attr_neg = np.maximum(-attr_reduced, 0)\n","print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(conv2d(attr_pos, filter_=norm_filter(15)), 200))\n","print(\"negative attribution\")\n","show(zoom_to(conv2d(attr_neg, filter_=norm_filter(15)), 200))"],"execution_count":23,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]}]} \ No newline at end of file From 1b4714b05c329b3d287670cfbef4787a6ec11ed3 Mon Sep 17 00:00:00 2001 From: Jacob Hilton Date: Fri, 13 Nov 2020 12:11:41 -0800 Subject: [PATCH 32/57] rl_util.conv2d dtype bug fix --- lucid/scratch/rl_util/util.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lucid/scratch/rl_util/util.py b/lucid/scratch/rl_util/util.py index b3460444..7f498f80 100644 --- a/lucid/scratch/rl_util/util.py +++ b/lucid/scratch/rl_util/util.py @@ -58,7 +58,11 @@ def conv2d(input_, filter_): filter_.ndim == 2 ), "filter_ must have 2 dimensions and will be applied channelwise" with tf.Graph().as_default(), tf.Session(): - filter_ = tf.tensordot(filter_, np.eye(input_.shape[-1]), axes=[[], []]) + filter_ = tf.tensordot( + filter_.astype(input_.dtype), + np.eye(input_.shape[-1], dtype=input_.dtype), + axes=[[], []], + ) return tf.nn.conv2d( input_, filter=filter_, strides=[1, 1, 1, 1], padding="SAME" ).eval() From 1e9a98c79b338e446d88a2ec187ab1d33745872a Mon Sep 17 00:00:00 2001 From: Jacob Hilton Date: Fri, 13 Nov 2020 17:46:26 -0800 Subject: [PATCH 33/57] rl_util notebook lucid commit hash --- notebooks/misc/rl_util.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/misc/rl_util.ipynb b/notebooks/misc/rl_util.ipynb index a7c75ed6..2c61e902 100644 --- a/notebooks/misc/rl_util.ipynb +++ b/notebooks/misc/rl_util.ipynb @@ -1 +1 @@ -{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"rl_clarity_util.ipynb","provenance":[],"collapsed_sections":["CsQnKgi0pqXR"],"authorship_tag":"ABX9TyNtc6peJc18ZpquQhmSX7eZ"},"kernelspec":{"name":"python3","display_name":"Python 3"}},"cells":[{"cell_type":"markdown","metadata":{"id":"VAn2uCs-CoEX"},"source":["This notebook explains how to use the utilities provided by `lucid.scratch.rl_util` with the models from the paper [Understanding RL vision](https://distill.pub/2020/understanding-rl-vision)."]},{"cell_type":"markdown","metadata":{"id":"CsQnKgi0pqXR"},"source":["# Setup"]},{"cell_type":"code","metadata":{"id":"3wKHkh1vWnwa","executionInfo":{"status":"ok","timestamp":1605141734690,"user_tz":480,"elapsed":623,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"8e1baa59-85f6-4d91-f49e-04501183c75e","colab":{"base_uri":"https://localhost:8080/"}},"source":["# use tensorflow 1.x\n","%tensorflow_version 1.x"],"execution_count":1,"outputs":[{"output_type":"stream","text":["TensorFlow 1.x selected.\n"],"name":"stdout"},{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/requests/__init__.py:91: RequestsDependencyWarning: urllib3 (1.26.0) or chardet (3.0.4) doesn't match a supported version!\n"," RequestsDependencyWarning)\n"],"name":"stderr"}]},{"cell_type":"code","metadata":{"id":"6QqbnIpxYMoT","executionInfo":{"status":"ok","timestamp":1605141747515,"user_tz":480,"elapsed":13441,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"09c392e2-9916-4678-96bb-af7071a8c549","colab":{"base_uri":"https://localhost:8080/"}},"source":["# install specific version of lucid\n","!pip install git+git://github.com/tensorflow/lucid.git@e421330f458093c2743d4c8aa49c9207eba6296d"],"execution_count":2,"outputs":[{"output_type":"stream","text":["Collecting git+git://github.com/tensorflow/lucid.git@e421330f458093c2743d4c8aa49c9207eba6296d\n"," Cloning git://github.com/tensorflow/lucid.git (to revision e421330f458093c2743d4c8aa49c9207eba6296d) to /tmp/pip-req-build-ik6v308v\n"," Running command git clone -q git://github.com/tensorflow/lucid.git /tmp/pip-req-build-ik6v308v\n"," Running command git checkout -q e421330f458093c2743d4c8aa49c9207eba6296d\n","Requirement already satisfied (use --upgrade to upgrade): lucid==0.3.9 from git+git://github.com/tensorflow/lucid.git@e421330f458093c2743d4c8aa49c9207eba6296d in /usr/local/lib/python3.6/dist-packages\n","Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (1.18.5)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (1.4.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.22.2.post1)\n","Requirement already satisfied: umap-learn in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.4.6)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.2.5)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (5.5.0)\n","Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (7.0.0)\n","Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.16.0)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (4.4.2)\n","Requirement already satisfied: pyopengl in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.1.5)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.0.12)\n","Requirement already satisfied: cachetools in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (4.1.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (8.6.0)\n","Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->lucid==0.3.9) (0.17.0)\n","Requirement already satisfied: numba!=0.47,>=0.46 in /usr/local/lib/python3.6/dist-packages (from umap-learn->lucid==0.3.9) (0.48.0)\n","Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from nltk->lucid==0.3.9) (1.15.0)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (50.3.2)\n","Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (4.3.3)\n","Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (0.8.1)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (0.7.5)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (2.6.1)\n","Requirement already satisfied: pexpect; sys_platform != \"win32\" in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (4.8.0)\n","Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (1.0.18)\n","Requirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba!=0.47,>=0.46->umap-learn->lucid==0.3.9) (0.31.0)\n","Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython->lucid==0.3.9) (0.2.0)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != \"win32\"->ipython->lucid==0.3.9) (0.6.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->lucid==0.3.9) (0.2.5)\n","Building wheels for collected packages: lucid\n"," Building wheel for lucid (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for lucid: filename=lucid-0.3.9-cp36-none-any.whl size=159169 sha256=fbb4f6cacac5513cefc246d628c46e244135b23683d4383e0618b55c18b1761f\n"," Stored in directory: /tmp/pip-ephem-wheel-cache-uire1cc4/wheels/3b/fc/32/337cd3f096d019fa6ae954f4268a37cba589f53b3f798f7e1e\n","Successfully built lucid\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"IPNYCCROaNt_","executionInfo":{"status":"ok","timestamp":1605141747516,"user_tz":480,"elapsed":13437,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["def longest_common_prefix(l):\n"," l = set([s[: min(map(len, l))] for s in l])\n"," while len(l) > 1:\n"," l = set([s[:-1] for s in l])\n"," return list(l)[0]\n","\n","\n","def longest_common_suffix(l):\n"," l = set([s[-min(map(len, l)) :] for s in l])\n"," while len(l) > 1:\n"," l = set([s[1:] for s in l])\n"," return list(l)[0]\n","\n","\n","# small utility for abbreviating a list of names\n","def get_abbreviator(names):\n"," if len(names) <= 1:\n"," return slice(None, None)\n"," prefix = longest_common_prefix(names)\n"," prefix = prefix.rsplit(\"/\", 1)[0] + \"/\" if \"/\" in prefix else \"\"\n"," suffix = longest_common_suffix(names)\n"," suffix = \"/\" + suffix.split(\"/\", 1)[-1] if \"/\" in suffix else \"\"\n"," return slice(len(prefix), None if len(suffix) == 0 else -len(suffix))"],"execution_count":3,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"8v3WIvyWpxeX"},"source":["# Import utilities and load data"]},{"cell_type":"markdown","metadata":{"id":"5SECNXLL_OVd"},"source":["First let's import the utilities.\n"]},{"cell_type":"code","metadata":{"id":"m6VLpL0CZG3n","executionInfo":{"status":"ok","timestamp":1605141750051,"user_tz":480,"elapsed":15967,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"39ec1ded-d76d-469f-e418-0264dbf832fc","colab":{"base_uri":"https://localhost:8080/"}},"source":["import numpy as np\n","import tensorflow as tf\n","from collections import OrderedDict\n","from lucid.modelzoo.vision_base import Model\n","from lucid.scratch.rl_util import *\n","# get_abbreviator defined during setup\n","\n","# hide tensorflow deprecation warnings\n","tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)"],"execution_count":4,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.decomposition.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.decomposition. Anything that cannot be imported from sklearn.decomposition is now part of the private API.\n"," warnings.warn(message, FutureWarning)\n"],"name":"stderr"}]},{"cell_type":"markdown","metadata":{"id":"E79AeCK0_F_B"},"source":["Here's a list of all the utilities we imported from `lucid.scratch.rl_util`."]},{"cell_type":"code","metadata":{"id":"7xiqVCRo2PeD","executionInfo":{"status":"ok","timestamp":1605141750052,"user_tz":480,"elapsed":15962,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"5d637ba0-2f00-449e-c670-a4358c4f19bd","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(\", \".join(all_()))"],"execution_count":5,"outputs":[{"output_type":"stream","text":["np, tf, Model, ChannelReducer, param, objectives, render, transform, show, save, _image_url, _display_html, lucid_svelte, load_joblib, save_joblib, zoom_to, get_var, get_shape, concatenate_horizontally, hue_to_rgb, channels_to_rgb, conv2d, norm_filter, brightness_to_opacity, gradient_override_map, maxpool_override, get_acts, get_grad_or_attr, get_attr, get_grad, get_paths, get_multi_path_attr, argmax_nd, LayerNMF, rescale_opacity, all_, reload\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"wQ58sOF-1-7Y"},"source":["Now let's load the data for the model we want to analyze. The available models are indexed [here](https://openaipublic.blob.core.windows.net/rl-clarity/attribution/models/index.html). In this example, we use the original CoinRun model, whose `/` is `coinrun`."]},{"cell_type":"code","metadata":{"id":"JbpTKYzqZalW","executionInfo":{"status":"ok","timestamp":1605141752384,"user_tz":480,"elapsed":18288,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["def load_data(relpath_name):\n"," basepath = \"https://openaipublic.blob.core.windows.net/rl-clarity/attribution/models/\"\n"," if \"/\" in relpath_name:\n"," relpath, name = relpath_name.rsplit(\"/\")\n"," relpath += \"/\"\n"," else:\n"," relpath = \"\"\n"," name = relpath_name\n"," dirpath = f\"{basepath}{relpath}rl-clarity/\"\n","\n"," result = {}\n"," result[\"model\"] = Model.load(f\"{dirpath}{name}.model.pb\")\n"," result.update(load_joblib(f\"{dirpath}{name}.metadata.jd\"))\n"," result.update(load_joblib(f\"{dirpath}{name}.observations.jd\"))\n"," result[\"trajectories\"] = load_joblib(f\"{dirpath}{name}.trajectories.jd\")\n","\n"," result[\"observations\"] = result[\"observations\"] / np.float(255)\n"," result[\"trajectories\"][\"observations\"] = result[\"trajectories\"][\"observations\"] / np.float(255)\n","\n"," layer_names = [\n"," node.name\n"," for node in result[\"model\"].graph_def.node\n"," if len(get_shape(result[\"model\"], node.name)) >= 4 and node.op.lower() == \"relu\"\n"," ]\n"," abbreviator = get_abbreviator(layer_names)\n"," result[\"layer_names\"] = OrderedDict(\n"," [(name[abbreviator], name) for name in layer_names]\n"," )\n","\n"," return result\n","\n","\n","data = load_data(\"coinrun\")"],"execution_count":6,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"l0eiu5S5_ewX"},"source":["Let's add all of the data we've loaded as local variables, printing their names."]},{"cell_type":"code","metadata":{"id":"p3laYhT36OB2","executionInfo":{"status":"ok","timestamp":1605141752387,"user_tz":480,"elapsed":18287,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"a69a8b73-d718-4348-c7a4-0ceb2be1fab3","colab":{"base_uri":"https://localhost:8080/"}},"source":["locals().update(data)\n","print(\", \".join(data.keys()))"],"execution_count":7,"outputs":[{"output_type":"stream","text":["model, policy_logits_name, value_function_name, env_name, gae_gamma, gae_lambda, action_combos, observations, trajectories, layer_names\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"YVUEqhE77W6H"},"source":["Let's explain each of these variables:\n","\n","- `model: lucid.modelzoo.vision_base.Model` – Lucid model\n","- `policy_logits_name: str` – name of the policy head tensor in the model\n","- `value_function_name: str` – name of the value head tensor in the model\n","- `env_name: str` – name of the environment\n","- `gae_gamma: float` – generalized advantage estimation hyperparameter $\\gamma$ used to train the model\n","- `gae_lambda: float` – generalized advantage estimation hyperparameter $\\lambda$ used to train the model\n","- `action_combos: List[Tuple[str]]` – which combination of keys each integer action corresponds to\n","- `observations: np.ndarray` – batch × height × width × channels float array of observations sampled infrequently from the agent playing the game, intended to be used for NMF\n","- `trajectories: Dict[str, np.ndarray]` – dictionary from contiguous trajectories of the agent playing the game with the following keys:\n"," - `observations` – trajectories × timesteps × height × width × channels float array of observations\n"," - `actions` – trajectories × timesteps array of integer actions\n"," - `rewards` – trajectories × timesteps array of float rewards\n"," - `firsts` – trajectories × timesteps array of booleans specifying whether the timestep was the first in the episode\n","- `layer_names: OrderedDict[str, str]` – mapping from the abbreviated names to the full names of the activation tensors of the convolutional layers in the model"]},{"cell_type":"markdown","metadata":{"id":"Kp3MjlPfOnRp"},"source":["Let's print the abbreviated layer names that were found."]},{"cell_type":"code","metadata":{"id":"EcSBLmhAOjgA","executionInfo":{"status":"ok","timestamp":1605141752388,"user_tz":480,"elapsed":18282,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"8973fd2d-e903-434b-cb36-4350bc30d38f","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(\", \".join(layer_names.keys()))"],"execution_count":8,"outputs":[{"output_type":"stream","text":["1a, 2a, 2b, 3a, 4a\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"3PmbPfycBC2G"},"source":["The `zoom_to` and `show` utilities are useful for displaying observations and visualizations."]},{"cell_type":"code","metadata":{"id":"1YItp5JEBBid","executionInfo":{"status":"ok","timestamp":1605141752388,"user_tz":480,"elapsed":18276,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"2717a99d-74b4-4edf-f49c-fd47f383c2b8","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["show(zoom_to(trajectories[\"observations\"][0, :8], 200))"],"execution_count":9,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"YZgIsgsqIyK6"},"source":["# Dimensionality reduction and feature visualization"]},{"cell_type":"markdown","metadata":{"id":"w1s8GiR9JUkJ"},"source":["The `LayerNMF` utility can be used to apply NMF dimensionality reduction to obtain directions in activation space, and then to use feature visualization (either gradient-based or dataset example-based) to visualize those directions.\n","\n","Passing `attr_layer_name=value_function_name` causes NMF to be applied to value function attributions. If we did not pass this, NMF would instead be applied to activations.\n","\n","We use the infrequently-sampled `observations` rather than observations from `trajectories` in order to cover a broader distribution of observations."]},{"cell_type":"code","metadata":{"id":"rBDvl92SJTPu","executionInfo":{"status":"ok","timestamp":1605141861085,"user_tz":480,"elapsed":126967,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["# can take a couple of minutes\n","# for the paper, we use observations[:], but this requires more memory\n","nmf = LayerNMF(model, layer_names['2b'], observations[:1024], features=8, attr_layer_name=value_function_name)"],"execution_count":10,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"H5Z0pPQaKaHt"},"source":["The directions in activation space obtained are given by the `channel_dirs` property."]},{"cell_type":"code","metadata":{"id":"7ohXmx0SRJWl","executionInfo":{"status":"ok","timestamp":1605141861086,"user_tz":480,"elapsed":126964,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"5110563c-bbad-4ccc-ba7c-40c4d4427399","colab":{"base_uri":"https://localhost:8080/"}},"source":["nmf.channel_dirs.shape"],"execution_count":11,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32)"]},"metadata":{"tags":[]},"execution_count":11}]},{"cell_type":"markdown","metadata":{"id":"dOUScXpSSOjP"},"source":["We can now visualize these directions in activation space in various ways.\n","\n","We can apply gradient-based feature visualization using the `vis_traditional` method."]},{"cell_type":"code","metadata":{"id":"KlmV_3IUKUwc","executionInfo":{"status":"ok","timestamp":1605141918169,"user_tz":480,"elapsed":184039,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"a502f31e-73f2-4b67-a935-c4c43c7a1f71","colab":{"base_uri":"https://localhost:8080/","height":325}},"source":["show(zoom_to(nmf.vis_traditional(), 200))"],"execution_count":12,"outputs":[{"output_type":"stream","text":["512 339.13757\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"nOCMBvfFSsnF"},"source":["We can apply dataset example-based feature visualization using the `vis_dataset_thumbnail` method. `num_mult` gives the height and width of the grid of patches, and `expand_mult` is a multiplier on the size of each patch.\n","\n","The strength of the activation for the image from which the patch was taken is given in the alpha (opacity) channel of the visualization (scaled to be at most 1)."]},{"cell_type":"code","metadata":{"id":"JNy_Qi7RSlg8","executionInfo":{"status":"ok","timestamp":1605141918864,"user_tz":480,"elapsed":184728,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"1e56fe6c-6fa7-44e1-97b1-3b5ca7201bfd","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["show([zoom_to(nmf.vis_dataset_thumbnail(i, num_mult=4, expand_mult=4)[0], 200) for i in range(nmf.features)])"],"execution_count":13,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"tUflo4qQT_Jv"},"source":["We can apply spatially-aware dataset example-based feature visualization using the `vis_dataset` method. `subdiv_mult` gives the height and width of the grid of patches per activation, and `expand_mult` is again a multiplier on the size of each patch.\n","\n","Activation strength is again given by opacity, so most of the top and left of the image is transparent since coins do not usually appear in those parts of observations."]},{"cell_type":"code","metadata":{"id":"GzBXlqXnSpMt","executionInfo":{"status":"ok","timestamp":1605141919157,"user_tz":480,"elapsed":185014,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"12e85c78-9aac-4c6a-e722-eeb3a6d0902a","colab":{"base_uri":"https://localhost:8080/","height":820}},"source":["show(zoom_to(nmf.vis_dataset(1, subdiv_mult=1, expand_mult=4)[0], 800))"],"execution_count":14,"outputs":[{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"lMMp7EOYYiCF"},"source":["# Attribution"]},{"cell_type":"markdown","metadata":{"id":"8dqOqp7AYrOL"},"source":["The `get_acts` utility can be used to get activations. We can use this to get the model's value function for some observations."]},{"cell_type":"code","metadata":{"id":"1-NxHuTtTjTi","executionInfo":{"status":"ok","timestamp":1605141919382,"user_tz":480,"elapsed":185232,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"bab1fe7c-46a2-4757-fe8f-298b8dc358d4","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["value_functions = get_acts(model, value_function_name, observations[:8])\n","show(zoom_to(observations[:8], 200), labels=[f\"{v:.3f}\" for v in value_functions])"],"execution_count":15,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 9.984
\n"," \n","
\n"," 9.622
\n"," \n","
\n"," 9.278
\n"," \n","
\n"," 9.500
\n"," \n","
\n"," 9.945
\n"," \n","
\n"," 9.668
\n"," \n","
\n"," 9.891
\n"," \n","
\n"," 9.975
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"6VvVyF5Ic8QZ"},"source":["The `get_attr` utility can be used to get attributions using the integrated gradients method. The number of steps used for the numerical integration is specified by `integrate_steps`. Here we apply this to some observations from a trajectory.\n","\n","We pass `value_function_name` in order to get value function attributions. If we passed the name of a tensor with more than one element (such as `policy_logits_name`), then we could use `score_fn` to specify how to reduce that tensor to a single element (note that `score_fn` should not reduce the batch dimension)."]},{"cell_type":"code","metadata":{"id":"UwGDu-4kcTG3","executionInfo":{"status":"ok","timestamp":1605141919557,"user_tz":480,"elapsed":185401,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"291e5d99-f5f7-43dd-914d-4446656a1d59","colab":{"base_uri":"https://localhost:8080/"}},"source":["traj = trajectories['observations'][0][76:84]\n","attr = get_attr(model, value_function_name, layer_names['2b'], traj, integrate_steps=10)\n","attr.shape"],"execution_count":16,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32, 32, 32)"]},"metadata":{"tags":[]},"execution_count":16}]},{"cell_type":"markdown","metadata":{"id":"pOfsF2_EdwcF"},"source":["We can apply dimensionality reduction to these attributions using the `LayerNMF` object we generated earlier."]},{"cell_type":"code","metadata":{"id":"kjfVBYXxYpxf","executionInfo":{"status":"ok","timestamp":1605141919891,"user_tz":480,"elapsed":185729,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"23889619-c9ac-46b9-8e1f-c9bf5558cc2a","colab":{"base_uri":"https://localhost:8080/"}},"source":["attr_reduced = nmf.transform(np.maximum(attr, 0)) - nmf.transform(np.maximum(-attr, 0)) # transform the positive and negative parts separately\n","nmf_norms = nmf.channel_dirs.sum(-1)\n","attr_reduced *= nmf_norms[None, None, None] # multiply by the norms of the NMF directions, since the magnitudes of the NMF directions are not relevant\n","attr_reduced /= np.median(attr_reduced.max(axis=(-3, -2, -1))) # globally normalize by the median max value to make the visualization balanced (a bit of a hack)\n","attr_reduced.shape"],"execution_count":17,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32, 32, 8)"]},"metadata":{"tags":[]},"execution_count":17}]},{"cell_type":"markdown","metadata":{"id":"0OO_eLvJiRhm"},"source":["Here are the observations along with the positive and negative parts of the attributions, which we visualize by assigning a different color to each of the post-NMF channels."]},{"cell_type":"code","metadata":{"id":"NmErsBUki-am","executionInfo":{"status":"ok","timestamp":1605141920257,"user_tz":480,"elapsed":186089,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"d7bea5dd-a74a-4aeb-f9ad-1549825ba8e2","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["attr_pos = np.maximum(attr_reduced, 0)\n","attr_neg = np.maximum(-attr_reduced, 0)\n","print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(attr_pos, 200))\n","print(\"negative attribution\")\n","show(zoom_to(attr_neg, 200))"],"execution_count":18,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"FbwL6c93jDMf"},"source":["We can use the `conv2d` and `norm_filter` utilities to smooth out attribution over nearby spatial positions, so that the amount of visual space taken up can be used to judge attribution strength."]},{"cell_type":"code","metadata":{"id":"mrlz1RlljCJ5","executionInfo":{"status":"ok","timestamp":1605141920840,"user_tz":480,"elapsed":186666,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"66f870cc-7777-4df1-b676-f300ca787771","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(conv2d(attr_pos, filter_=norm_filter(15)), 200))\n","print(\"negative attribution\")\n","show(zoom_to(conv2d(attr_neg, filter_=norm_filter(15)), 200))"],"execution_count":19,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"RixyZCp1jwrr"},"source":["# Model editing"]},{"cell_type":"markdown","metadata":{"id":"0A8acH07sqpu"},"source":["To edit the model in-place, we can use Lucid's `ParameterEditor`."]},{"cell_type":"code","metadata":{"id":"SvL0GfJvsoGe","executionInfo":{"status":"ok","timestamp":1605141920841,"user_tz":480,"elapsed":186660,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["from copy import deepcopy\n","from lucid.scratch.parameter_editor import ParameterEditor\n","\n","edited_model = deepcopy(model)\n","editor = ParameterEditor(edited_model.graph_def)"],"execution_count":20,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"NF9s-yV_tqQC"},"source":["To make the model blind to saw obstacles, we can use the first NMF direction. We edit the convolutional kernel of the next layer to make it project out the NMF direction from activations before applying the original kernel."]},{"cell_type":"code","metadata":{"id":"OeIh47L7tB2g","executionInfo":{"status":"ok","timestamp":1605141920842,"user_tz":480,"elapsed":186658,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["kernel_name = layer_names[\"3a\"].replace(\"Relu\", \"conv2d/kernel\") # name of tensor of convolutional kernel of next layer\n","kernel = editor[kernel_name]\n","saw_dir = nmf.channel_dirs[0][None, None, :, None] # first NMF direction, corresponding to saw obstacle\n","saw_dir /= np.linalg.norm(saw_dir)\n","# the kernel is left-multiplied by the activations from the previous layer, so we left-multiply the kernel by the projection matrix\n","kernel = kernel - saw_dir * (saw_dir * kernel).sum(axis=-2, keepdims=True) # equivalently: kernel - saw_dir @ saw_dir.transpose((0, 1, 3, 2)) @ kernel\n","editor[kernel_name] = kernel\n","# note: this is not quite the same as the edit made for the paper, since we only used 1024 observations for the NMF calculation here"],"execution_count":21,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"0Yp8AMJBv9hz"},"source":["We can use the `get_var` utility to verify that the kernel has been updated."]},{"cell_type":"code","metadata":{"id":"O7jxxgmvgynF","executionInfo":{"status":"ok","timestamp":1605141920844,"user_tz":480,"elapsed":186656,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"165d44de-2231-4fca-a7b4-1940b2cb289d","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(np.linalg.norm(get_var(model, kernel_name)))\n","print(np.linalg.norm(get_var(edited_model, kernel_name)))"],"execution_count":22,"outputs":[{"output_type":"stream","text":["31.603941\n","30.58285\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"8Szlc5AtvNI_"},"source":["If we now repeat the attribution visualization from above with the edited model, we see that the red saw obstacle channel has disappeared, while the yellow coin channel remains present."]},{"cell_type":"code","metadata":{"id":"-j8AP7ZQouG_","executionInfo":{"status":"ok","timestamp":1605141922061,"user_tz":480,"elapsed":187866,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"1b9d2701-0448-4176-a886-beb24cb0dbf8","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["traj = trajectories['observations'][0][76:84]\n","attr = get_attr(edited_model, value_function_name, layer_names['2b'], traj, integrate_steps=10)\n","attr_reduced = nmf.transform(np.maximum(attr, 0)) - nmf.transform(np.maximum(-attr, 0))\n","nmf_norms = nmf.channel_dirs.sum(-1)\n","attr_reduced *= nmf_norms[None, None, None]\n","attr_reduced /= np.median(attr_reduced.max(axis=(-3, -2, -1)))\n","attr_pos = np.maximum(attr_reduced, 0)\n","attr_neg = np.maximum(-attr_reduced, 0)\n","print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(conv2d(attr_pos, filter_=norm_filter(15)), 200))\n","print(\"negative attribution\")\n","show(zoom_to(conv2d(attr_neg, filter_=norm_filter(15)), 200))"],"execution_count":23,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]}]} \ No newline at end of file +{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"name":"rl_clarity_util.ipynb","provenance":[],"collapsed_sections":["CsQnKgi0pqXR"],"authorship_tag":"ABX9TyMx9zfDbdpONWs+YnCm3c7G"},"kernelspec":{"name":"python3","display_name":"Python 3"}},"cells":[{"cell_type":"markdown","metadata":{"id":"VAn2uCs-CoEX"},"source":["This notebook explains how to use the utilities provided by `lucid.scratch.rl_util` with the models from the paper [Understanding RL vision](https://distill.pub/2020/understanding-rl-vision)."]},{"cell_type":"markdown","metadata":{"id":"CsQnKgi0pqXR"},"source":["# Setup"]},{"cell_type":"code","metadata":{"id":"3wKHkh1vWnwa","executionInfo":{"status":"ok","timestamp":1605317737435,"user_tz":480,"elapsed":530,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"3e7e6ca4-b9eb-429a-f10b-2da770b4bac5","colab":{"base_uri":"https://localhost:8080/"}},"source":["# use tensorflow 1.x\n","%tensorflow_version 1.x"],"execution_count":1,"outputs":[{"output_type":"stream","text":["TensorFlow 1.x selected.\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"6QqbnIpxYMoT","executionInfo":{"status":"ok","timestamp":1605317751882,"user_tz":480,"elapsed":14970,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"0bb21585-13b0-44d3-8571-6112f9b4b9cd","colab":{"base_uri":"https://localhost:8080/"}},"source":["# install specific version of lucid\n","!pip install git+git://github.com/tensorflow/lucid.git@16a03dee8f99af4cdd89d6b7c1cc913817174c83"],"execution_count":2,"outputs":[{"output_type":"stream","text":["Collecting git+git://github.com/tensorflow/lucid.git@16a03dee8f99af4cdd89d6b7c1cc913817174c83\n"," Cloning git://github.com/tensorflow/lucid.git (to revision 16a03dee8f99af4cdd89d6b7c1cc913817174c83) to /tmp/pip-req-build-02w3ic5f\n"," Running command git clone -q git://github.com/tensorflow/lucid.git /tmp/pip-req-build-02w3ic5f\n","Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (1.18.5)\n","Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (1.4.1)\n","Requirement already satisfied: scikit-learn in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.22.2.post1)\n","Requirement already satisfied: umap-learn in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.4.6)\n","Requirement already satisfied: nltk in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.2.5)\n","Requirement already satisfied: ipython in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (5.5.0)\n","Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (7.0.0)\n","Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (0.16.0)\n","Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (4.4.2)\n","Requirement already satisfied: pyopengl in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.1.5)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (3.0.12)\n","Requirement already satisfied: cachetools in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (4.1.1)\n","Requirement already satisfied: more-itertools in /usr/local/lib/python3.6/dist-packages (from lucid==0.3.9) (8.6.0)\n","Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from scikit-learn->lucid==0.3.9) (0.17.0)\n","Requirement already satisfied: numba!=0.47,>=0.46 in /usr/local/lib/python3.6/dist-packages (from umap-learn->lucid==0.3.9) (0.48.0)\n","Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from nltk->lucid==0.3.9) (1.15.0)\n","Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (1.0.18)\n","Requirement already satisfied: pexpect; sys_platform != \"win32\" in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (4.8.0)\n","Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (0.8.1)\n","Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (0.7.5)\n","Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (50.3.2)\n","Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (4.3.3)\n","Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython->lucid==0.3.9) (2.6.1)\n","Requirement already satisfied: llvmlite<0.32.0,>=0.31.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba!=0.47,>=0.46->umap-learn->lucid==0.3.9) (0.31.0)\n","Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython->lucid==0.3.9) (0.2.5)\n","Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != \"win32\"->ipython->lucid==0.3.9) (0.6.0)\n","Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython->lucid==0.3.9) (0.2.0)\n","Building wheels for collected packages: lucid\n"," Building wheel for lucid (setup.py) ... \u001b[?25l\u001b[?25hdone\n"," Created wheel for lucid: filename=lucid-0.3.9-cp36-none-any.whl size=159191 sha256=ce3c73a40525abeb8db2a694beab6b743f5520a9ea0d02bf0cb49f008ea87249\n"," Stored in directory: /tmp/pip-ephem-wheel-cache-hn4kltt2/wheels/39/c8/8e/6b03eb4e2617bf64fb67125838d2dad660486ceb18bbe46141\n","Successfully built lucid\n","Installing collected packages: lucid\n"," Found existing installation: lucid 0.3.8\n"," Uninstalling lucid-0.3.8:\n"," Successfully uninstalled lucid-0.3.8\n","Successfully installed lucid-0.3.9\n"],"name":"stdout"}]},{"cell_type":"code","metadata":{"id":"IPNYCCROaNt_","executionInfo":{"status":"ok","timestamp":1605317751883,"user_tz":480,"elapsed":14969,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["def longest_common_prefix(l):\n"," l = set([s[: min(map(len, l))] for s in l])\n"," while len(l) > 1:\n"," l = set([s[:-1] for s in l])\n"," return list(l)[0]\n","\n","\n","def longest_common_suffix(l):\n"," l = set([s[-min(map(len, l)) :] for s in l])\n"," while len(l) > 1:\n"," l = set([s[1:] for s in l])\n"," return list(l)[0]\n","\n","\n","# small utility for abbreviating a list of names\n","def get_abbreviator(names):\n"," if len(names) <= 1:\n"," return slice(None, None)\n"," prefix = longest_common_prefix(names)\n"," prefix = prefix.rsplit(\"/\", 1)[0] + \"/\" if \"/\" in prefix else \"\"\n"," suffix = longest_common_suffix(names)\n"," suffix = \"/\" + suffix.split(\"/\", 1)[-1] if \"/\" in suffix else \"\"\n"," return slice(len(prefix), None if len(suffix) == 0 else -len(suffix))"],"execution_count":3,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"8v3WIvyWpxeX"},"source":["# Import utilities and load data"]},{"cell_type":"markdown","metadata":{"id":"5SECNXLL_OVd"},"source":["First let's import the utilities.\n"]},{"cell_type":"code","metadata":{"id":"m6VLpL0CZG3n","executionInfo":{"status":"ok","timestamp":1605317758829,"user_tz":480,"elapsed":21910,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"f52ce78f-a5b7-41a3-a4b8-44ad2f8ba2b7","colab":{"base_uri":"https://localhost:8080/"}},"source":["import numpy as np\n","import tensorflow as tf\n","from collections import OrderedDict\n","from lucid.modelzoo.vision_base import Model\n","from lucid.scratch.rl_util import *\n","# get_abbreviator defined during setup\n","\n","# hide tensorflow deprecation warnings\n","tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)"],"execution_count":4,"outputs":[{"output_type":"stream","text":["/usr/local/lib/python3.6/dist-packages/sklearn/utils/deprecation.py:144: FutureWarning: The sklearn.decomposition.base module is deprecated in version 0.22 and will be removed in version 0.24. The corresponding classes / functions should instead be imported from sklearn.decomposition. Anything that cannot be imported from sklearn.decomposition is now part of the private API.\n"," warnings.warn(message, FutureWarning)\n"],"name":"stderr"}]},{"cell_type":"markdown","metadata":{"id":"E79AeCK0_F_B"},"source":["Here's a list of all the utilities we imported from `lucid.scratch.rl_util`."]},{"cell_type":"code","metadata":{"id":"7xiqVCRo2PeD","executionInfo":{"status":"ok","timestamp":1605317758829,"user_tz":480,"elapsed":21903,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"ceb85c37-46d9-4333-eef7-9f6c7cee96df","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(\", \".join(all_()))"],"execution_count":5,"outputs":[{"output_type":"stream","text":["np, tf, Model, ChannelReducer, param, objectives, render, transform, show, save, _image_url, _display_html, lucid_svelte, load_joblib, save_joblib, zoom_to, get_var, get_shape, concatenate_horizontally, hue_to_rgb, channels_to_rgb, conv2d, norm_filter, brightness_to_opacity, gradient_override_map, maxpool_override, get_acts, get_grad_or_attr, get_attr, get_grad, get_paths, get_multi_path_attr, argmax_nd, LayerNMF, rescale_opacity, all_, reload\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"wQ58sOF-1-7Y"},"source":["Now let's load the data for the model we want to analyze. The available models are indexed [here](https://openaipublic.blob.core.windows.net/rl-clarity/attribution/models/index.html). In this example, we use the original CoinRun model, whose `/` is `coinrun`."]},{"cell_type":"code","metadata":{"id":"JbpTKYzqZalW","executionInfo":{"status":"ok","timestamp":1605317767057,"user_tz":480,"elapsed":30128,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["def load_data(relpath_name):\n"," basepath = \"https://openaipublic.blob.core.windows.net/rl-clarity/attribution/models/\"\n"," if \"/\" in relpath_name:\n"," relpath, name = relpath_name.rsplit(\"/\")\n"," relpath += \"/\"\n"," else:\n"," relpath = \"\"\n"," name = relpath_name\n"," dirpath = f\"{basepath}{relpath}rl-clarity/\"\n","\n"," result = {}\n"," result[\"model\"] = Model.load(f\"{dirpath}{name}.model.pb\")\n"," result.update(load_joblib(f\"{dirpath}{name}.metadata.jd\"))\n"," result.update(load_joblib(f\"{dirpath}{name}.observations.jd\"))\n"," result[\"trajectories\"] = load_joblib(f\"{dirpath}{name}.trajectories.jd\")\n","\n"," result[\"observations\"] = result[\"observations\"] / np.float(255)\n"," result[\"trajectories\"][\"observations\"] = result[\"trajectories\"][\"observations\"] / np.float(255)\n","\n"," layer_names = [\n"," node.name\n"," for node in result[\"model\"].graph_def.node\n"," if len(get_shape(result[\"model\"], node.name)) >= 4 and node.op.lower() == \"relu\"\n"," ]\n"," abbreviator = get_abbreviator(layer_names)\n"," result[\"layer_names\"] = OrderedDict(\n"," [(name[abbreviator], name) for name in layer_names]\n"," )\n","\n"," return result\n","\n","\n","data = load_data(\"coinrun\")"],"execution_count":6,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"l0eiu5S5_ewX"},"source":["Let's add all of the data we've loaded as local variables, printing their names."]},{"cell_type":"code","metadata":{"id":"p3laYhT36OB2","executionInfo":{"status":"ok","timestamp":1605317767062,"user_tz":480,"elapsed":30126,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"68c44eeb-00d8-47ae-fd74-20ba8b03f56c","colab":{"base_uri":"https://localhost:8080/"}},"source":["locals().update(data)\n","print(\", \".join(data.keys()))"],"execution_count":7,"outputs":[{"output_type":"stream","text":["model, policy_logits_name, value_function_name, env_name, gae_gamma, gae_lambda, action_combos, observations, trajectories, layer_names\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"YVUEqhE77W6H"},"source":["Let's explain each of these variables:\n","\n","- `model: lucid.modelzoo.vision_base.Model` – Lucid model\n","- `policy_logits_name: str` – name of the policy head tensor in the model\n","- `value_function_name: str` – name of the value head tensor in the model\n","- `env_name: str` – name of the environment\n","- `gae_gamma: float` – generalized advantage estimation hyperparameter $\\gamma$ used to train the model\n","- `gae_lambda: float` – generalized advantage estimation hyperparameter $\\lambda$ used to train the model\n","- `action_combos: List[Tuple[str]]` – which combination of keys each integer action corresponds to\n","- `observations: np.ndarray` – batch × height × width × channels float array of observations sampled infrequently from the agent playing the game, intended to be used for NMF\n","- `trajectories: Dict[str, np.ndarray]` – dictionary from contiguous trajectories of the agent playing the game with the following keys:\n"," - `observations` – trajectories × timesteps × height × width × channels float array of observations\n"," - `actions` – trajectories × timesteps array of integer actions\n"," - `rewards` – trajectories × timesteps array of float rewards\n"," - `firsts` – trajectories × timesteps array of booleans specifying whether the timestep was the first in the episode\n","- `layer_names: OrderedDict[str, str]` – mapping from the abbreviated names to the full names of the activation tensors of the convolutional layers in the model"]},{"cell_type":"markdown","metadata":{"id":"Kp3MjlPfOnRp"},"source":["Let's print the abbreviated layer names that were found."]},{"cell_type":"code","metadata":{"id":"EcSBLmhAOjgA","executionInfo":{"status":"ok","timestamp":1605317767064,"user_tz":480,"elapsed":30122,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"9251e2a0-ca9b-4e74-d922-5f4d2e7f0486","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(\", \".join(layer_names.keys()))"],"execution_count":8,"outputs":[{"output_type":"stream","text":["1a, 2a, 2b, 3a, 4a\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"3PmbPfycBC2G"},"source":["The `zoom_to` and `show` utilities are useful for displaying observations and visualizations."]},{"cell_type":"code","metadata":{"id":"1YItp5JEBBid","executionInfo":{"status":"ok","timestamp":1605317767064,"user_tz":480,"elapsed":30115,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"caeb1e9a-a1c7-4ebe-a2c2-cd1bfbd77181","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["show(zoom_to(trajectories[\"observations\"][0, :8], 200))"],"execution_count":9,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"YZgIsgsqIyK6"},"source":["# Dimensionality reduction and feature visualization"]},{"cell_type":"markdown","metadata":{"id":"w1s8GiR9JUkJ"},"source":["The `LayerNMF` utility can be used to apply NMF dimensionality reduction to obtain directions in activation space, and then to use feature visualization (either gradient-based or dataset example-based) to visualize those directions.\n","\n","Passing `attr_layer_name=value_function_name` causes NMF to be applied to value function attributions. If we did not pass this, NMF would instead be applied to activations.\n","\n","We use the infrequently-sampled `observations` rather than observations from `trajectories` in order to cover a broader distribution of observations."]},{"cell_type":"code","metadata":{"id":"rBDvl92SJTPu","executionInfo":{"status":"ok","timestamp":1605317877034,"user_tz":480,"elapsed":140082,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["# can take a couple of minutes\n","# for the paper, we use observations[:], but this requires more memory\n","nmf = LayerNMF(model, layer_names['2b'], observations[:1024], features=8, attr_layer_name=value_function_name)"],"execution_count":10,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"H5Z0pPQaKaHt"},"source":["The directions in activation space obtained are given by the `channel_dirs` property."]},{"cell_type":"code","metadata":{"id":"7ohXmx0SRJWl","executionInfo":{"status":"ok","timestamp":1605317877035,"user_tz":480,"elapsed":140077,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"e9b59f52-b570-4034-a4ed-f40de3bff406","colab":{"base_uri":"https://localhost:8080/"}},"source":["nmf.channel_dirs.shape"],"execution_count":11,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32)"]},"metadata":{"tags":[]},"execution_count":11}]},{"cell_type":"markdown","metadata":{"id":"dOUScXpSSOjP"},"source":["We can now visualize these directions in activation space in various ways.\n","\n","We can apply gradient-based feature visualization using the `vis_traditional` method."]},{"cell_type":"code","metadata":{"id":"KlmV_3IUKUwc","executionInfo":{"status":"ok","timestamp":1605317974693,"user_tz":480,"elapsed":237729,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"6db9cc1b-944c-4f52-8bec-7c7a24f5c73f","colab":{"base_uri":"https://localhost:8080/","height":325}},"source":["show(zoom_to(nmf.vis_traditional(), 200))"],"execution_count":12,"outputs":[{"output_type":"stream","text":["512 273.1749\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"nOCMBvfFSsnF"},"source":["We can apply dataset example-based feature visualization using the `vis_dataset_thumbnail` method. `num_mult` gives the height and width of the grid of patches, and `expand_mult` is a multiplier on the size of each patch.\n","\n","The strength of the activation for the image from which the patch was taken is given in the alpha (opacity) channel of the visualization (scaled to be at most 1)."]},{"cell_type":"code","metadata":{"id":"JNy_Qi7RSlg8","executionInfo":{"status":"ok","timestamp":1605317975427,"user_tz":480,"elapsed":238456,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"6ee515e5-a4a3-42f7-e1c3-6e8a8a51ad65","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["show([zoom_to(nmf.vis_dataset_thumbnail(i, num_mult=4, expand_mult=4)[0], 200) for i in range(nmf.features)])"],"execution_count":13,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"tUflo4qQT_Jv"},"source":["We can apply spatially-aware dataset example-based feature visualization using the `vis_dataset` method. `subdiv_mult` gives the height and width of the grid of patches per activation, and `expand_mult` is again a multiplier on the size of each patch.\n","\n","Activation strength is again given by opacity, so most of the top and left of the image is transparent since coins do not usually appear in those parts of observations."]},{"cell_type":"code","metadata":{"id":"GzBXlqXnSpMt","executionInfo":{"status":"ok","timestamp":1605317976036,"user_tz":480,"elapsed":239059,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"d8503ec4-839a-4cdf-abe2-2035724c06cb","colab":{"base_uri":"https://localhost:8080/","height":820}},"source":["show(zoom_to(nmf.vis_dataset(1, subdiv_mult=1, expand_mult=4)[0], 800))"],"execution_count":14,"outputs":[{"output_type":"display_data","data":{"text/html":[""],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"lMMp7EOYYiCF"},"source":["# Attribution"]},{"cell_type":"markdown","metadata":{"id":"8dqOqp7AYrOL"},"source":["The `get_acts` utility can be used to get activations. We can use this to get the model's value function for some observations."]},{"cell_type":"code","metadata":{"id":"1-NxHuTtTjTi","executionInfo":{"status":"ok","timestamp":1605317976037,"user_tz":480,"elapsed":239053,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"080893d3-ba31-4157-e639-133e9b5f2e88","colab":{"base_uri":"https://localhost:8080/","height":241}},"source":["value_functions = get_acts(model, value_function_name, observations[:8])\n","show(zoom_to(observations[:8], 200), labels=[f\"{v:.3f}\" for v in value_functions])"],"execution_count":15,"outputs":[{"output_type":"display_data","data":{"text/html":["
\n"," 9.984
\n"," \n","
\n"," 9.622
\n"," \n","
\n"," 9.278
\n"," \n","
\n"," 9.500
\n"," \n","
\n"," 9.945
\n"," \n","
\n"," 9.668
\n"," \n","
\n"," 9.891
\n"," \n","
\n"," 9.975
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"6VvVyF5Ic8QZ"},"source":["The `get_attr` utility can be used to get attributions using the integrated gradients method. The number of steps used for the numerical integration is specified by `integrate_steps`. Here we apply this to some observations from a trajectory.\n","\n","We pass `value_function_name` in order to get value function attributions. If we passed the name of a tensor with more than one element (such as `policy_logits_name`), then we could use `score_fn` to specify how to reduce that tensor to a single element (note that `score_fn` should not reduce the batch dimension)."]},{"cell_type":"code","metadata":{"id":"UwGDu-4kcTG3","executionInfo":{"status":"ok","timestamp":1605317976215,"user_tz":480,"elapsed":239225,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"23be125d-60f7-4abc-838e-46634d08a1e5","colab":{"base_uri":"https://localhost:8080/"}},"source":["traj = trajectories['observations'][0][76:84]\n","attr = get_attr(model, value_function_name, layer_names['2b'], traj, integrate_steps=10)\n","attr.shape"],"execution_count":16,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32, 32, 32)"]},"metadata":{"tags":[]},"execution_count":16}]},{"cell_type":"markdown","metadata":{"id":"pOfsF2_EdwcF"},"source":["We can apply dimensionality reduction to these attributions using the `LayerNMF` object we generated earlier."]},{"cell_type":"code","metadata":{"id":"kjfVBYXxYpxf","executionInfo":{"status":"ok","timestamp":1605317976444,"user_tz":480,"elapsed":239448,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"cb53694e-a41e-4c82-b5a6-92a8052c1c72","colab":{"base_uri":"https://localhost:8080/"}},"source":["attr_reduced = nmf.transform(np.maximum(attr, 0)) - nmf.transform(np.maximum(-attr, 0)) # transform the positive and negative parts separately\n","nmf_norms = nmf.channel_dirs.sum(-1)\n","attr_reduced *= nmf_norms[None, None, None] # multiply by the norms of the NMF directions, since the magnitudes of the NMF directions are not relevant\n","attr_reduced /= np.median(attr_reduced.max(axis=(-3, -2, -1))) # globally normalize by the median max value to make the visualization balanced (a bit of a hack)\n","attr_reduced.shape"],"execution_count":17,"outputs":[{"output_type":"execute_result","data":{"text/plain":["(8, 32, 32, 8)"]},"metadata":{"tags":[]},"execution_count":17}]},{"cell_type":"markdown","metadata":{"id":"0OO_eLvJiRhm"},"source":["Here are the observations along with the positive and negative parts of the attributions, which we visualize by assigning a different color to each of the post-NMF channels."]},{"cell_type":"code","metadata":{"id":"NmErsBUki-am","executionInfo":{"status":"ok","timestamp":1605317976787,"user_tz":480,"elapsed":239785,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"a239cd03-4fa1-4c47-a73c-6371cb453793","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["attr_pos = np.maximum(attr_reduced, 0)\n","attr_neg = np.maximum(-attr_reduced, 0)\n","print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(attr_pos, 200))\n","print(\"negative attribution\")\n","show(zoom_to(attr_neg, 200))"],"execution_count":18,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"FbwL6c93jDMf"},"source":["We can use the `conv2d` and `norm_filter` utilities to smooth out attribution over nearby spatial positions, so that the amount of visual space taken up can be used to judge attribution strength."]},{"cell_type":"code","metadata":{"id":"mrlz1RlljCJ5","executionInfo":{"status":"ok","timestamp":1605317977481,"user_tz":480,"elapsed":240473,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"852aaf62-ac93-4b27-a805-ed018b618481","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(conv2d(attr_pos, filter_=norm_filter(15)), 200))\n","print(\"negative attribution\")\n","show(zoom_to(conv2d(attr_neg, filter_=norm_filter(15)), 200))"],"execution_count":19,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]},{"cell_type":"markdown","metadata":{"id":"RixyZCp1jwrr"},"source":["# Model editing"]},{"cell_type":"markdown","metadata":{"id":"0A8acH07sqpu"},"source":["To edit the model in-place, we can use Lucid's `ParameterEditor`."]},{"cell_type":"code","metadata":{"id":"SvL0GfJvsoGe","executionInfo":{"status":"ok","timestamp":1605317977482,"user_tz":480,"elapsed":240469,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["from copy import deepcopy\n","from lucid.scratch.parameter_editor import ParameterEditor\n","\n","edited_model = deepcopy(model)\n","editor = ParameterEditor(edited_model.graph_def)"],"execution_count":20,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"NF9s-yV_tqQC"},"source":["To make the model blind to saw obstacles, we can use the first NMF direction. We edit the convolutional kernel of the next layer to make it project out the NMF direction from activations before applying the original kernel."]},{"cell_type":"code","metadata":{"id":"OeIh47L7tB2g","executionInfo":{"status":"ok","timestamp":1605317977483,"user_tz":480,"elapsed":240468,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}}},"source":["kernel_name = layer_names[\"3a\"].replace(\"Relu\", \"conv2d/kernel\") # name of tensor of convolutional kernel of next layer\n","kernel = editor[kernel_name]\n","saw_dir = nmf.channel_dirs[0][None, None, :, None] # first NMF direction, corresponding to saw obstacle\n","saw_dir /= np.linalg.norm(saw_dir)\n","# the kernel is left-multiplied by the activations from the previous layer, so we left-multiply the kernel by the projection matrix\n","kernel = kernel - saw_dir * (saw_dir * kernel).sum(axis=-2, keepdims=True) # equivalently: kernel - saw_dir @ saw_dir.transpose((0, 1, 3, 2)) @ kernel\n","editor[kernel_name] = kernel\n","# note: this is not quite the same as the edit made for the paper, since we only used 1024 observations for the NMF calculation here"],"execution_count":21,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"0Yp8AMJBv9hz"},"source":["We can use the `get_var` utility to verify that the kernel has been updated."]},{"cell_type":"code","metadata":{"id":"O7jxxgmvgynF","executionInfo":{"status":"ok","timestamp":1605317977655,"user_tz":480,"elapsed":240634,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"61ab6d55-9f27-4c8a-8ebe-aabc70e84fa3","colab":{"base_uri":"https://localhost:8080/"}},"source":["print(np.linalg.norm(get_var(model, kernel_name)))\n","print(np.linalg.norm(get_var(edited_model, kernel_name)))"],"execution_count":22,"outputs":[{"output_type":"stream","text":["31.603941\n","30.58285\n"],"name":"stdout"}]},{"cell_type":"markdown","metadata":{"id":"8Szlc5AtvNI_"},"source":["If we now repeat the attribution visualization from above with the edited model, we see that the red saw obstacle channel has disappeared, while the yellow coin channel remains present."]},{"cell_type":"code","metadata":{"id":"-j8AP7ZQouG_","executionInfo":{"status":"ok","timestamp":1605317978681,"user_tz":480,"elapsed":241655,"user":{"displayName":"Jacob Hilton","photoUrl":"https://lh3.googleusercontent.com/a-/AOh14GgyvNqcv2CjrqyyVTkhuN6dyNkAxGKKFbxzDoXV=s64","userId":"01152313290781312479"}},"outputId":"14c59892-3625-4323-ae32-50c77522dfd5","colab":{"base_uri":"https://localhost:8080/","height":742}},"source":["traj = trajectories['observations'][0][76:84]\n","attr = get_attr(edited_model, value_function_name, layer_names['2b'], traj, integrate_steps=10)\n","attr_reduced = nmf.transform(np.maximum(attr, 0)) - nmf.transform(np.maximum(-attr, 0))\n","nmf_norms = nmf.channel_dirs.sum(-1)\n","attr_reduced *= nmf_norms[None, None, None]\n","attr_reduced /= np.median(attr_reduced.max(axis=(-3, -2, -1)))\n","attr_pos = np.maximum(attr_reduced, 0)\n","attr_neg = np.maximum(-attr_reduced, 0)\n","print(\"observation\")\n","show(zoom_to(traj, 200))\n","print(\"positive attribution\")\n","show(zoom_to(conv2d(attr_pos, filter_=norm_filter(15)), 200))\n","print(\"negative attribution\")\n","show(zoom_to(conv2d(attr_neg, filter_=norm_filter(15)), 200))"],"execution_count":23,"outputs":[{"output_type":"stream","text":["observation\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["positive attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}},{"output_type":"stream","text":["negative attribution\n"],"name":"stdout"},{"output_type":"display_data","data":{"text/html":["
\n"," 0
\n"," \n","
\n"," 1
\n"," \n","
\n"," 2
\n"," \n","
\n"," 3
\n"," \n","
\n"," 4
\n"," \n","
\n"," 5
\n"," \n","
\n"," 6
\n"," \n","
\n"," 7
\n"," \n","
"],"text/plain":[""]},"metadata":{"tags":[]}}]}]} \ No newline at end of file From 6beda59ac22677c193075504cb89159fe1b4466d Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Tue, 24 Nov 2020 13:44:59 -0700 Subject: [PATCH 34/57] Fix total_variation citation --- lucid/optvis/objectives.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/optvis/objectives.py b/lucid/optvis/objectives.py index eebbe702..eb9554a3 100644 --- a/lucid/optvis/objectives.py +++ b/lucid/optvis/objectives.py @@ -243,7 +243,7 @@ def total_variation(layer="input"): """Total variation of image (or activations at some layer). This operation is most often used as a penalty to reduce noise. - See Simonyan, et al., 2014. + See Mahendran, V. 2014. Understanding Deep Image Representations by Inverting Them. """ return lambda T: tf.image.total_variation(T(layer)) From 76d0ce193911f29b58abf18d3b40ac75391daae2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Jan 2021 22:12:46 +0000 Subject: [PATCH 35/57] Bump ini from 1.3.5 to 1.3.8 in /lucid/scratch/js Bumps [ini](https://github.com/isaacs/ini) from 1.3.5 to 1.3.8. - [Release notes](https://github.com/isaacs/ini/releases) - [Commits](https://github.com/isaacs/ini/compare/v1.3.5...v1.3.8) Signed-off-by: dependabot[bot] --- lucid/scratch/js/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lucid/scratch/js/package-lock.json b/lucid/scratch/js/package-lock.json index 92acb0f7..4590f944 100644 --- a/lucid/scratch/js/package-lock.json +++ b/lucid/scratch/js/package-lock.json @@ -594,9 +594,9 @@ "dev": true }, "ini": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", - "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", "dev": true }, "iota-array": { From 4cd0df0ba577c698a07b9edf91a3d2a044b49e1e Mon Sep 17 00:00:00 2001 From: Chelsea Voss Date: Tue, 26 Jan 2021 21:03:58 +0000 Subject: [PATCH 36/57] Update show() to fix Firefox rendering issue --- lucid/misc/io/showing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/misc/io/showing.py b/lucid/misc/io/showing.py index d87d816d..390b3429 100644 --- a/lucid/misc/io/showing.py +++ b/lucid/misc/io/showing.py @@ -61,7 +61,7 @@ def _image_url(array, fmt='png', mode="data", quality=90, domain=None): def _image_html(array, w=None, domain=None, fmt='png'): url = _image_url(array, domain=domain, fmt=fmt) - style = "image-rendering: pixelated;" + style = "image-rendering: pixelated; image-rendering: crisp-edges;" if w is not None: style += "width: {w}px;".format(w=w) return """""".format(**locals()) From cf4870a617a25d6fe823cc43477148916345d16d Mon Sep 17 00:00:00 2001 From: Mihai Maruseac Date: Thu, 4 Mar 2021 09:40:58 -0800 Subject: [PATCH 37/57] Delete package-lock.json This should remove all current dependency vulnerability issues. When rebuilding this test folder, it can be regenerated, so it is safe to remove at the moment. --- lucid/scratch/js/package-lock.json | 1417 ---------------------------- 1 file changed, 1417 deletions(-) delete mode 100644 lucid/scratch/js/package-lock.json diff --git a/lucid/scratch/js/package-lock.json b/lucid/scratch/js/package-lock.json deleted file mode 100644 index 4590f944..00000000 --- a/lucid/scratch/js/package-lock.json +++ /dev/null @@ -1,1417 +0,0 @@ -{ - "name": "lucid-components", - "version": "0.0.5", - "lockfileVersion": 1, - "requires": true, - "dependencies": { - "@zeit/schemas": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/@zeit/schemas/-/schemas-2.6.0.tgz", - "integrity": "sha512-uUrgZ8AxS+Lio0fZKAipJjAh415JyrOZowliZAzmnJSsf7piVL5w+G0+gFJ0KSu3QRhvui/7zuvpLz03YjXAhg==", - "dev": true - }, - "accepts": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", - "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "dev": true, - "requires": { - "mime-types": "~2.1.24", - "negotiator": "0.6.2" - } - }, - "acorn": { - "version": "5.7.4", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.4.tgz", - "integrity": "sha512-1D++VG7BhrtvQpNbBzovKNc1FLGGEE/oGe7b9xJm/RFHMBeUaUGpluV9RLjZa47YFdPcDAenEYuq9pQPcMdLJg==", - "dev": true - }, - "ajv": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.5.3.tgz", - "integrity": "sha512-LqZ9wY+fx3UMiiPd741yB2pj3hhil+hQc8taf4o2QGRFpWgZ2V5C8HA165DY9sS3fJwsk7uT7ZlFEyC3Ig3lLg==", - "dev": true, - "requires": { - "fast-deep-equal": "^2.0.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "ansi-align": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-2.0.0.tgz", - "integrity": "sha1-w2rsy6VjuJzrVW82kPCx2eNUf38=", - "dev": true, - "requires": { - "string-width": "^2.0.0" - } - }, - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "requires": { - "color-convert": "^1.9.0" - } - }, - "arch": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.1.2.tgz", - "integrity": "sha512-NTBIIbAfkJeIletyABbVtdPgeKfDafR+1mZV/AyyfC1UkVkp9iUjV+wwmqtUgphHYajbI86jejBJp5e+jkGTiQ==", - "dev": true - }, - "arg": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/arg/-/arg-2.0.0.tgz", - "integrity": "sha512-XxNTUzKnz1ctK3ZIcI2XUPlD96wbHP2nGqkPKpvk/HNRlPveYrXIVSTk9m3LcqOgDPg3B1nMvdV/K8wZd7PG4w==", - "dev": true - }, - "arr-diff": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz", - "integrity": "sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8=", - "dev": true, - "requires": { - "arr-flatten": "^1.0.1" - } - }, - "arr-flatten": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", - "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", - "dev": true - }, - "array-unique": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz", - "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=", - "dev": true - }, - "assertion-error": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz", - "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==", - "dev": true - }, - "balanced-match": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", - "dev": true - }, - "boxen": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/boxen/-/boxen-1.3.0.tgz", - "integrity": "sha512-TNPjfTr432qx7yOjQyaXm3dSR0MH9vXp7eT1BFSl/C51g+EFnOR9hTg1IreahGBmDNCehscshe45f+C1TBZbLw==", - "dev": true, - "requires": { - "ansi-align": "^2.0.0", - "camelcase": "^4.0.0", - "chalk": "^2.0.1", - "cli-boxes": "^1.0.0", - "string-width": "^2.0.0", - "term-size": "^1.2.0", - "widest-line": "^2.0.0" - } - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "braces": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz", - "integrity": "sha1-uneWLhLf+WnWt2cR6RS3N4V79qc=", - "dev": true, - "requires": { - "expand-range": "^1.8.1", - "preserve": "^0.2.0", - "repeat-element": "^1.1.2" - } - }, - "browser-stdout": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz", - "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", - "dev": true - }, - "builtin-modules": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-2.0.0.tgz", - "integrity": "sha512-3U5kUA5VPsRUA3nofm/BXX7GVHKfxz0hOBAPxXrIvHzlDRkQVqEn6yi8QJegxl4LzOHLdvb7XF5dVawa/VVYBg==", - "dev": true - }, - "bytes": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", - "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", - "dev": true - }, - "camelcase": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz", - "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=", - "dev": true - }, - "chai": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.1.2.tgz", - "integrity": "sha1-D2RYS6ZC8PKs4oBiefTwbKI61zw=", - "dev": true, - "requires": { - "assertion-error": "^1.0.1", - "check-error": "^1.0.1", - "deep-eql": "^3.0.0", - "get-func-name": "^2.0.0", - "pathval": "^1.0.0", - "type-detect": "^4.0.0" - } - }, - "chai-almost": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/chai-almost/-/chai-almost-1.0.1.tgz", - "integrity": "sha1-Q9AmzzvnmhzVE88Vr4QKgSQ6S2A=", - "dev": true, - "requires": { - "deep-eql": "^2.0.2", - "type-detect": "^4.0.3" - }, - "dependencies": { - "deep-eql": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-2.0.2.tgz", - "integrity": "sha1-sbrAblbwp2d3aG1Qyf63XC7XZ5o=", - "dev": true, - "requires": { - "type-detect": "^3.0.0" - }, - "dependencies": { - "type-detect": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-3.0.0.tgz", - "integrity": "sha1-RtDMhVOrt7E6NSsNbeov1Y8tm1U=", - "dev": true - } - } - } - } - }, - "chalk": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", - "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "check-error": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz", - "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=", - "dev": true - }, - "cli-boxes": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-1.0.0.tgz", - "integrity": "sha1-T6kXw+WclKAEzWH47lCdplFocUM=", - "dev": true - }, - "clipboardy": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/clipboardy/-/clipboardy-1.2.3.tgz", - "integrity": "sha512-2WNImOvCRe6r63Gk9pShfkwXsVtKCroMAevIbiae021mS850UkWPbevxsBz3tnvjZIEGvlwaqCPsw+4ulzNgJA==", - "dev": true, - "requires": { - "arch": "^2.1.0", - "execa": "^0.8.0" - }, - "dependencies": { - "execa": { - "version": "0.8.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.8.0.tgz", - "integrity": "sha1-2NdrvBtVIX7RkP1t1J08d07PyNo=", - "dev": true, - "requires": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - } - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "commander": { - "version": "2.15.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.0.tgz", - "integrity": "sha512-7B1ilBwtYSbetCgTY1NJFg+gVpestg0fdA1MhC1Vs4ssyfSXnCAjFr+QcQM9/RedXC0EaUx1sG8Smgw2VfgKEg==", - "dev": true - }, - "compressible": { - "version": "2.0.18", - "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz", - "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==", - "dev": true, - "requires": { - "mime-db": ">= 1.43.0 < 2" - } - }, - "compression": { - "version": "1.7.3", - "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.3.tgz", - "integrity": "sha512-HSjyBG5N1Nnz7tF2+O7A9XUhyjru71/fwgNb7oIsEVHR0WShfs2tIS/EySLgiTe98aOK18YDlMXpzjCXY/n9mg==", - "dev": true, - "requires": { - "accepts": "~1.3.5", - "bytes": "3.0.0", - "compressible": "~2.0.14", - "debug": "2.6.9", - "on-headers": "~1.0.1", - "safe-buffer": "5.1.2", - "vary": "~1.1.2" - } - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "dev": true - }, - "content-disposition": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", - "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=", - "dev": true - }, - "cross-spawn": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-5.1.0.tgz", - "integrity": "sha1-6L0O/uWPz/b4+UUQoKVUu/ojVEk=", - "dev": true, - "requires": { - "lru-cache": "^4.0.1", - "shebang-command": "^1.2.0", - "which": "^1.2.9" - } - }, - "d3-dsv": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-1.0.8.tgz", - "integrity": "sha512-IVCJpQ+YGe3qu6odkPQI0KPqfxkhbP/oM1XhhE/DFiYmcXKfCRub4KXyiuehV1d4drjWVXHUWx4gHqhdZb6n/A==", - "dev": true, - "requires": { - "commander": "2", - "iconv-lite": "0.4", - "rw": "1" - } - }, - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "deep-eql": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz", - "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==", - "dev": true, - "requires": { - "type-detect": "^4.0.0" - } - }, - "deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true - }, - "diff": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz", - "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==", - "dev": true - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true - }, - "estree-walker": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-0.5.1.tgz", - "integrity": "sha512-7HgCgz1axW7w5aOvgOQkoR1RMBkllygJrssU3BvymKQ95lxXYv6Pon17fBRDm9qhkvXZGijOULoSF9ShOk/ZLg==", - "dev": true - }, - "execa": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-0.7.0.tgz", - "integrity": "sha1-lEvs00zEHuMqY6n68nrVpl/Fl3c=", - "dev": true, - "requires": { - "cross-spawn": "^5.0.1", - "get-stream": "^3.0.0", - "is-stream": "^1.1.0", - "npm-run-path": "^2.0.0", - "p-finally": "^1.0.0", - "signal-exit": "^3.0.0", - "strip-eof": "^1.0.0" - } - }, - "expand-brackets": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz", - "integrity": "sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s=", - "dev": true, - "requires": { - "is-posix-bracket": "^0.1.0" - } - }, - "expand-range": { - "version": "1.8.2", - "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", - "integrity": "sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc=", - "dev": true, - "requires": { - "fill-range": "^2.1.0" - } - }, - "extglob": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/extglob/-/extglob-0.3.2.tgz", - "integrity": "sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE=", - "dev": true, - "requires": { - "is-extglob": "^1.0.0" - } - }, - "fast-deep-equal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz", - "integrity": "sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk=", - "dev": true - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "fast-url-parser": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz", - "integrity": "sha1-9K8+qfNNiicc9YrSs3WfQx8LMY0=", - "dev": true, - "requires": { - "punycode": "^1.3.2" - }, - "dependencies": { - "punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", - "dev": true - } - } - }, - "fetch-mock": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/fetch-mock/-/fetch-mock-6.1.0.tgz", - "integrity": "sha512-SRSmFJW4GTr0EDSopylv+/0vQY/H2I26FqcGGiKN/bH4MPIsHmJi2D+Hc3XoUiMHgf+JrqnynloFEanTS/yiAw==", - "dev": true, - "requires": { - "glob-to-regexp": "^0.3.0", - "path-to-regexp": "^2.1.0" - } - }, - "filename-regex": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/filename-regex/-/filename-regex-2.0.1.tgz", - "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=", - "dev": true - }, - "fill-range": { - "version": "2.2.4", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", - "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", - "dev": true, - "requires": { - "is-number": "^2.1.0", - "isobject": "^2.0.0", - "randomatic": "^3.0.0", - "repeat-element": "^1.1.2", - "repeat-string": "^1.5.2" - } - }, - "for-in": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", - "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", - "dev": true - }, - "for-own": { - "version": "0.1.5", - "resolved": "https://registry.npmjs.org/for-own/-/for-own-0.1.5.tgz", - "integrity": "sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=", - "dev": true, - "requires": { - "for-in": "^1.0.1" - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true - }, - "get-func-name": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz", - "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=", - "dev": true - }, - "get-stream": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", - "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=", - "dev": true - }, - "glob": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", - "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "glob-base": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz", - "integrity": "sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q=", - "dev": true, - "requires": { - "glob-parent": "^2.0.0", - "is-glob": "^2.0.0" - } - }, - "glob-parent": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-2.0.0.tgz", - "integrity": "sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg=", - "dev": true, - "requires": { - "is-glob": "^2.0.0" - } - }, - "glob-to-regexp": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz", - "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=", - "dev": true - }, - "growl": { - "version": "1.10.3", - "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.3.tgz", - "integrity": "sha512-hKlsbA5Vu3xsh1Cg3J7jSmX/WaW6A5oBeqzM88oNbCRQFz+zUaXm6yxS4RVytp1scBoJzSYl4YAEOQIt6O8V1Q==", - "dev": true - }, - "has-flag": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-2.0.0.tgz", - "integrity": "sha1-6CB68cx7MNRGzHC3NLXovhj4jVE=", - "dev": true - }, - "he": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz", - "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=", - "dev": true - }, - "iconv-lite": { - "version": "0.4.19", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz", - "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ==", - "dev": true - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dev": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", - "dev": true - }, - "ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true - }, - "iota-array": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/iota-array/-/iota-array-1.0.0.tgz", - "integrity": "sha1-ge9X/l0FgUzVjCSDYyqZwwoOgIc=", - "dev": true - }, - "is-buffer": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", - "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", - "dev": true - }, - "is-dotfile": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/is-dotfile/-/is-dotfile-1.0.3.tgz", - "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=", - "dev": true - }, - "is-equal-shallow": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz", - "integrity": "sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ=", - "dev": true, - "requires": { - "is-primitive": "^2.0.0" - } - }, - "is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", - "dev": true - }, - "is-extglob": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz", - "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "is-glob": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz", - "integrity": "sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM=", - "dev": true, - "requires": { - "is-extglob": "^1.0.0" - } - }, - "is-module": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz", - "integrity": "sha1-Mlj7afeMFNW4FdZkM2tM/7ZEFZE=", - "dev": true - }, - "is-number": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", - "integrity": "sha1-Afy7s5NGOlSPL0ZszhbezknbkI8=", - "dev": true, - "requires": { - "kind-of": "^3.0.2" - } - }, - "is-posix-bracket": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz", - "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=", - "dev": true - }, - "is-primitive": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz", - "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=", - "dev": true - }, - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "dev": true - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", - "dev": true - }, - "isobject": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", - "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", - "dev": true, - "requires": { - "isarray": "1.0.0" - } - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "kind-of": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", - "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", - "dev": true, - "requires": { - "is-buffer": "^1.1.5" - } - }, - "lru-cache": { - "version": "4.1.5", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.5.tgz", - "integrity": "sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g==", - "dev": true, - "requires": { - "pseudomap": "^1.0.2", - "yallist": "^2.1.2" - } - }, - "magic-string": { - "version": "0.22.4", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.22.4.tgz", - "integrity": "sha512-kxBL06p6iO2qPBHsqGK2b3cRwiRGpnmSuVWNhwHcMX7qJOUr1HvricYP1LZOCdkQBUp0jiWg2d6WJwR3vYgByw==", - "dev": true, - "requires": { - "vlq": "^0.2.1" - } - }, - "math-random": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.1.tgz", - "integrity": "sha1-izqsWIuKZuSXXjzepn97sylgH6w=", - "dev": true - }, - "micromatch": { - "version": "2.3.11", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz", - "integrity": "sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU=", - "dev": true, - "requires": { - "arr-diff": "^2.0.0", - "array-unique": "^0.2.1", - "braces": "^1.8.2", - "expand-brackets": "^0.1.4", - "extglob": "^0.3.1", - "filename-regex": "^2.0.0", - "is-extglob": "^1.0.0", - "is-glob": "^2.0.1", - "kind-of": "^3.0.2", - "normalize-path": "^2.0.1", - "object.omit": "^2.0.0", - "parse-glob": "^3.0.4", - "regex-cache": "^0.4.2" - } - }, - "mime-db": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.44.0.tgz", - "integrity": "sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg==", - "dev": true - }, - "mime-types": { - "version": "2.1.27", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.27.tgz", - "integrity": "sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w==", - "dev": true, - "requires": { - "mime-db": "1.44.0" - } - }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", - "dev": true - }, - "mkdirp": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", - "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", - "dev": true, - "requires": { - "minimist": "0.0.8" - }, - "dependencies": { - "minimist": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", - "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", - "dev": true - } - } - }, - "mocha": { - "version": "5.0.4", - "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.0.4.tgz", - "integrity": "sha512-nMOpAPFosU1B4Ix1jdhx5e3q7XO55ic5a8cgYvW27CequcEY+BabS0kUVL1Cw1V5PuVHZWeNRWFLmEPexo79VA==", - "dev": true, - "requires": { - "browser-stdout": "1.3.1", - "commander": "2.11.0", - "debug": "3.1.0", - "diff": "3.5.0", - "escape-string-regexp": "1.0.5", - "glob": "7.1.2", - "growl": "1.10.3", - "he": "1.1.1", - "mkdirp": "0.5.1", - "supports-color": "4.4.0" - }, - "dependencies": { - "commander": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.11.0.tgz", - "integrity": "sha512-b0553uYA5YAEGgyYIGYROzKQ7X5RAqedkfjiZxwi0kL1g3bOaBNNZfYkzt/CL0umgD5wc9Jec2FbB98CjkMRvQ==", - "dev": true - }, - "debug": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", - "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "supports-color": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-4.4.0.tgz", - "integrity": "sha512-rKC3+DyXWgK0ZLKwmRsrkyHVZAjNkfzeehuFWdGGcqGDTZFH73+RH6S/RDAAxl9GusSjZSUWYLmT9N5pzXFOXQ==", - "dev": true, - "requires": { - "has-flag": "^2.0.0" - } - } - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "ndarray": { - "version": "1.0.18", - "resolved": "https://registry.npmjs.org/ndarray/-/ndarray-1.0.18.tgz", - "integrity": "sha1-tg06cyJOxVXQ+qeXEeUCRI/T95M=", - "dev": true, - "requires": { - "iota-array": "^1.0.0", - "is-buffer": "^1.0.2" - } - }, - "negotiator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==", - "dev": true - }, - "node-fetch": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", - "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw==", - "dev": true - }, - "normalize-path": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", - "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", - "dev": true, - "requires": { - "remove-trailing-separator": "^1.0.1" - } - }, - "npm-run-path": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz", - "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=", - "dev": true, - "requires": { - "path-key": "^2.0.0" - } - }, - "numpy-parser": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/numpy-parser/-/numpy-parser-1.0.2.tgz", - "integrity": "sha512-Afucgc+hNuHdkT7PA7608kapG2jUpYCMURXluJBwHZ62YbuFgoOmltuDzAi/JDVBl+WYoNNGUPQVQWbnDtZZ+A==", - "dev": true - }, - "object.omit": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz", - "integrity": "sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo=", - "dev": true, - "requires": { - "for-own": "^0.1.4", - "is-extendable": "^0.1.1" - } - }, - "on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", - "dev": true - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, - "requires": { - "wrappy": "1" - } - }, - "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=", - "dev": true - }, - "parse-glob": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz", - "integrity": "sha1-ssN2z7EfNVE7rdFz7wu246OIORw=", - "dev": true, - "requires": { - "glob-base": "^0.3.0", - "is-dotfile": "^1.0.0", - "is-extglob": "^1.0.0", - "is-glob": "^2.0.0" - } - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "dev": true - }, - "path-is-inside": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", - "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=", - "dev": true - }, - "path-key": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz", - "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=", - "dev": true - }, - "path-parse": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.5.tgz", - "integrity": "sha1-PBrfhx6pzWyUMbbqK9dKD/BVxME=", - "dev": true - }, - "path-to-regexp": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.0.tgz", - "integrity": "sha512-zJcOPeBsraLjWXwUzFMPzH3QO2CmO1yRggtADPJjOTyCF5csQxfUGJL+CbyyRvIS09wOipi4F/fgRhdmVGSwxQ==", - "dev": true - }, - "pathval": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz", - "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=", - "dev": true - }, - "preserve": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", - "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=", - "dev": true - }, - "pseudomap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", - "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", - "dev": true - }, - "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "dev": true - }, - "randomatic": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.0.0.tgz", - "integrity": "sha512-VdxFOIEY3mNO5PtSRkkle/hPJDHvQhK21oa73K4yAc9qmp6N429gAyF1gZMOTMeS0/AYzaV/2Trcef+NaIonSA==", - "dev": true, - "requires": { - "is-number": "^4.0.0", - "kind-of": "^6.0.0", - "math-random": "^1.0.1" - }, - "dependencies": { - "is-number": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", - "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", - "dev": true - }, - "kind-of": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", - "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", - "dev": true - } - } - }, - "range-parser": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", - "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=", - "dev": true - }, - "rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, - "requires": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - } - }, - "regex-cache": { - "version": "0.4.4", - "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", - "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", - "dev": true, - "requires": { - "is-equal-shallow": "^0.1.3" - } - }, - "registry-auth-token": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-3.3.2.tgz", - "integrity": "sha512-JL39c60XlzCVgNrO+qq68FoNb56w/m7JYvGR2jT5iR1xBrUA3Mfx5Twk5rqTThPmQKMWydGmq8oFtDlxfrmxnQ==", - "dev": true, - "requires": { - "rc": "^1.1.6", - "safe-buffer": "^5.0.1" - } - }, - "registry-url": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-3.1.0.tgz", - "integrity": "sha1-PU74cPc93h138M+aOBQyRE4XSUI=", - "dev": true, - "requires": { - "rc": "^1.0.1" - } - }, - "remove-trailing-separator": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", - "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", - "dev": true - }, - "repeat-element": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.2.tgz", - "integrity": "sha1-7wiaF40Ug7quTZPrmLT55OEdmQo=", - "dev": true - }, - "repeat-string": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", - "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", - "dev": true - }, - "require-relative": { - "version": "0.8.7", - "resolved": "https://registry.npmjs.org/require-relative/-/require-relative-0.8.7.tgz", - "integrity": "sha1-eZlTn8ngR6N5KPoZb44VY9q9Nt4=", - "dev": true - }, - "resolve": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.5.0.tgz", - "integrity": "sha512-hgoSGrc3pjzAPHNBg+KnFcK2HwlHTs/YrAGUr6qgTVUZmXv1UEXXl0bZNBKMA9fud6lRYFdPGz0xXxycPzmmiw==", - "dev": true, - "requires": { - "path-parse": "^1.0.5" - } - }, - "rollup": { - "version": "0.56.5", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-0.56.5.tgz", - "integrity": "sha512-IGPk5vdWrsc4vkiW9XMeXr5QMtxmvATTttTi59w2jBQWe9G/MMQtn8teIBAj+DdK51TrpVT6P0aQUaQUlUYCJA==", - "dev": true - }, - "rollup-plugin-commonjs": { - "version": "8.4.1", - "resolved": "https://registry.npmjs.org/rollup-plugin-commonjs/-/rollup-plugin-commonjs-8.4.1.tgz", - "integrity": "sha512-mg+WuD+jlwoo8bJtW3Mvx7Tz6TsIdMsdhuvCnDMoyjh0oxsVgsjB/N0X984RJCWwc5IIiqNVJhXeeITcc73++A==", - "dev": true, - "requires": { - "acorn": "^5.2.1", - "estree-walker": "^0.5.0", - "magic-string": "^0.22.4", - "resolve": "^1.4.0", - "rollup-pluginutils": "^2.0.1" - } - }, - "rollup-plugin-node-resolve": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/rollup-plugin-node-resolve/-/rollup-plugin-node-resolve-3.2.0.tgz", - "integrity": "sha512-stvVrKaQiNu65ObGJLCHyHH/NXjiPMt/ZHwvl444KgJPrii1zCgyg+NTK2Uy6WExL+OuUWdHd7T8EoPQDtYEkw==", - "dev": true, - "requires": { - "builtin-modules": "^2.0.0", - "is-module": "^1.0.0", - "resolve": "^1.1.6" - } - }, - "rollup-plugin-svelte": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/rollup-plugin-svelte/-/rollup-plugin-svelte-4.0.0.tgz", - "integrity": "sha512-yroOS7RUeLz6uDQAPKkdbPJ4cpppEif31yQzFn866EsL45WeN8hsRYI0eJHQ57JB98XqOKaTYLkEtSHGjQWR+A==", - "dev": true, - "requires": { - "require-relative": "^0.8.7", - "rollup-pluginutils": "^2.0.1", - "sourcemap-codec": "^1.3.1" - } - }, - "rollup-pluginutils": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/rollup-pluginutils/-/rollup-pluginutils-2.0.1.tgz", - "integrity": "sha1-fslbNXP2VDpGpkYb2afFRFJdD8A=", - "dev": true, - "requires": { - "estree-walker": "^0.3.0", - "micromatch": "^2.3.11" - }, - "dependencies": { - "estree-walker": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-0.3.1.tgz", - "integrity": "sha1-5rGlHPcpJSTnI3wxLl/mZgwc4ao=", - "dev": true - } - } - }, - "rw": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz", - "integrity": "sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q=", - "dev": true - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "serve": { - "version": "10.1.2", - "resolved": "https://registry.npmjs.org/serve/-/serve-10.1.2.tgz", - "integrity": "sha512-TVH35uwndRlCqSeX3grR3Ntrjx2aBTeu6sx+zTD2CzN2N/rHuEDTvxiBwWbrellJNyWiQFz2xZmoW+UxV+Zahg==", - "dev": true, - "requires": { - "@zeit/schemas": "2.6.0", - "ajv": "6.5.3", - "arg": "2.0.0", - "boxen": "1.3.0", - "chalk": "2.4.1", - "clipboardy": "1.2.3", - "compression": "1.7.3", - "serve-handler": "5.0.8", - "update-check": "1.5.2" - } - }, - "serve-handler": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-5.0.8.tgz", - "integrity": "sha512-pqk0SChbBLLHfMIxQ55czjdiW7tj2cFy53svvP8e5VqEN/uB/QpfiTJ8k1uIYeFTDVoi+FGi5aqXScuu88bymg==", - "dev": true, - "requires": { - "bytes": "3.0.0", - "content-disposition": "0.5.2", - "fast-url-parser": "1.1.3", - "mime-types": "2.1.18", - "minimatch": "3.0.4", - "path-is-inside": "1.0.2", - "path-to-regexp": "2.2.1", - "range-parser": "1.2.0" - }, - "dependencies": { - "mime-db": { - "version": "1.33.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", - "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", - "dev": true - }, - "mime-types": { - "version": "2.1.18", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", - "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", - "dev": true, - "requires": { - "mime-db": "~1.33.0" - } - }, - "path-to-regexp": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz", - "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ==", - "dev": true - } - } - }, - "shebang-command": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz", - "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=", - "dev": true, - "requires": { - "shebang-regex": "^1.0.0" - } - }, - "shebang-regex": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz", - "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=", - "dev": true - }, - "signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", - "dev": true - }, - "sourcemap-codec": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/sourcemap-codec/-/sourcemap-codec-1.4.0.tgz", - "integrity": "sha512-66s3CwUASiYGiwQxkr34IctPs/LDkaJ8qNqVK6bBektymq3Sx1rX9qDZ8MNXFwiXqKuM3JYwzG3NAhI1ivqkjA==", - "dev": true, - "requires": { - "vlq": "^1.0.0" - }, - "dependencies": { - "vlq": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/vlq/-/vlq-1.0.0.tgz", - "integrity": "sha512-o3WmXySo+oI5thgqr7Qy8uBkT/v9Zr+sRyrh1lr8aWPUkgDWdWt4Nae2WKBrLsocgE8BuWWD0jLc+VW8LeU+2g==", - "dev": true - } - } - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - } - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - }, - "strip-eof": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz", - "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8=", - "dev": true - }, - "strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=", - "dev": true - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - }, - "dependencies": { - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true - } - } - }, - "svelte": { - "version": "1.57.1", - "resolved": "https://registry.npmjs.org/svelte/-/svelte-1.57.1.tgz", - "integrity": "sha512-FyOB78duJXwnJP9PJ0hENV5mGJs+g8riEM1O8swF6r4aYmXKkbvCLwnylzZNJVteCYRYtUC2Ybgh0sFt6TnBQQ==", - "dev": true - }, - "term-size": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/term-size/-/term-size-1.2.0.tgz", - "integrity": "sha1-RYuDiH8oj8Vtb/+/rSYuJmOO+mk=", - "dev": true, - "requires": { - "execa": "^0.7.0" - } - }, - "type-detect": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz", - "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==", - "dev": true - }, - "update-check": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/update-check/-/update-check-1.5.2.tgz", - "integrity": "sha512-1TrmYLuLj/5ZovwUS7fFd1jMH3NnFDN1y1A8dboedIDt7zs/zJMo6TwwlhYKkSeEwzleeiSBV5/3c9ufAQWDaQ==", - "dev": true, - "requires": { - "registry-auth-token": "3.3.2", - "registry-url": "3.1.0" - } - }, - "uri-js": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.0.tgz", - "integrity": "sha512-B0yRTzYdUCCn9n+F4+Gh4yIDtMQcaJsmYBDsTSG8g/OejKBodLQ2IHfN3bM7jUsRXndopT7OIXWdYqc1fjmV6g==", - "dev": true, - "requires": { - "punycode": "^2.1.0" - } - }, - "vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=", - "dev": true - }, - "vlq": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/vlq/-/vlq-0.2.3.tgz", - "integrity": "sha512-DRibZL6DsNhIgYQ+wNdWDL2SL3bKPlVrRiBqV5yuMm++op8W4kGFtaQfCs4KEJn0wBZcHVHJ3eoywX8983k1ow==", - "dev": true - }, - "which": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", - "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "widest-line": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz", - "integrity": "sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==", - "dev": true, - "requires": { - "string-width": "^2.1.1" - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "yallist": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", - "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", - "dev": true - } - } -} From 068f57fddb55b5b45bc776ee16ea1bb90535f7d2 Mon Sep 17 00:00:00 2001 From: Gabriel Goh Date: Thu, 4 Mar 2021 11:34:15 -0800 Subject: [PATCH 38/57] added clip model --- lucid/modelzoo/other_models/CLIPx4.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 lucid/modelzoo/other_models/CLIPx4.py diff --git a/lucid/modelzoo/other_models/CLIPx4.py b/lucid/modelzoo/other_models/CLIPx4.py new file mode 100644 index 00000000..237f8eeb --- /dev/null +++ b/lucid/modelzoo/other_models/CLIPx4.py @@ -0,0 +1,23 @@ +# Copyright 2018 The Lucid Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from lucid.modelzoo.vision_base import Model + +class CLIPImage(Model): + image_value_range = (0, 255) + input_name = 'input_image' + model_name = "RN50_4x" + image_shape = [288, 288, 3] + model_path = "https://openaipublic.blob.core.windows.net/clip/tf/RN50_4x/084ee9c176da32014b0ebe42cd7ca66e/image32.pb" From e68dbb6c0cf93d1b937bf944897e474d354b8009 Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 10:09:57 +0000 Subject: [PATCH 39/57] Update CLIP model URL to modelzoo bucket --- lucid/modelzoo/other_models/CLIPx4.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/modelzoo/other_models/CLIPx4.py b/lucid/modelzoo/other_models/CLIPx4.py index 237f8eeb..7b38d64d 100644 --- a/lucid/modelzoo/other_models/CLIPx4.py +++ b/lucid/modelzoo/other_models/CLIPx4.py @@ -20,4 +20,4 @@ class CLIPImage(Model): input_name = 'input_image' model_name = "RN50_4x" image_shape = [288, 288, 3] - model_path = "https://openaipublic.blob.core.windows.net/clip/tf/RN50_4x/084ee9c176da32014b0ebe42cd7ca66e/image32.pb" + model_path = "gs://modelzoo/vision/other_models/Clip_ResNet50.pb" From 60668b2a1a33df7ddafb7ee198eda05820b6947b Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 10:51:54 +0000 Subject: [PATCH 40/57] Correct import paths for Clip model --- lucid/modelzoo/other_models/{CLIPx4.py => Clip.py} | 6 +++--- lucid/modelzoo/other_models/__init__.py | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) rename lucid/modelzoo/other_models/{CLIPx4.py => Clip.py} (92%) diff --git a/lucid/modelzoo/other_models/CLIPx4.py b/lucid/modelzoo/other_models/Clip.py similarity index 92% rename from lucid/modelzoo/other_models/CLIPx4.py rename to lucid/modelzoo/other_models/Clip.py index 7b38d64d..3e61ea5f 100644 --- a/lucid/modelzoo/other_models/CLIPx4.py +++ b/lucid/modelzoo/other_models/Clip.py @@ -15,9 +15,9 @@ from lucid.modelzoo.vision_base import Model -class CLIPImage(Model): +class Clip_ResNet50_4x(Model): image_value_range = (0, 255) input_name = 'input_image' - model_name = "RN50_4x" + model_name = "Clip_ResNet50_4x" image_shape = [288, 288, 3] - model_path = "gs://modelzoo/vision/other_models/Clip_ResNet50.pb" + model_path = "gs://modelzoo/vision/other_models/Clip_ResNet50_4x.pb" diff --git a/lucid/modelzoo/other_models/__init__.py b/lucid/modelzoo/other_models/__init__.py index 74536dbb..00ef8668 100644 --- a/lucid/modelzoo/other_models/__init__.py +++ b/lucid/modelzoo/other_models/__init__.py @@ -10,6 +10,7 @@ from lucid.modelzoo.vision_base import Model as _Model from lucid.modelzoo.other_models.AlexNet import AlexNet +from lucid.modelzoo.other_models.Clip import Clip from lucid.modelzoo.other_models.InceptionV1 import InceptionV1, InceptionV1_adv_finetuned From f175a883dd956035cddadfc2a45c177b1536be82 Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 10:52:52 +0000 Subject: [PATCH 41/57] Require Python 3.7 as numpy dependency does so --- setup.py | 2 +- tox.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index c5f6acfd..596b94c7 100644 --- a/setup.py +++ b/setup.py @@ -77,7 +77,7 @@ "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Scientific/Engineering :: Mathematics", diff --git a/tox.ini b/tox.ini index b7f4dc22..8a5a4df1 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{36} +envlist = py{37} [testenv] deps = From 96ff5ae98957000e8e752e31d581a28263b7e4dd Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 10:54:01 +0000 Subject: [PATCH 42/57] Require Python 3.7 on CI, too --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a2124023..6550df5f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,6 @@ language: python python: - - "3.6" + - "3.7" install: - pip install -U pip wheel - pip install python-coveralls From 33a63233b22231c6427923c80eea818340b92658 Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 10:59:41 +0000 Subject: [PATCH 43/57] Pin TF version on CI to last 1.x release --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 8a5a4df1..c22ab40b 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ envlist = py{37} [testenv] deps = - tensorflow + tensorflow == 1.15.5 .[test] commands = coverage run --source lucid --omit lucid/scratch/*,lucid/recipes/*,lucid/misc/gl/* -m py.test --run-slow {posargs} From 8b4ba198575ad61a7e8304cbe40b6383c481d7cf Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 11:03:33 +0000 Subject: [PATCH 44/57] WIP, TBS --- lucid/modelzoo/other_models/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/modelzoo/other_models/__init__.py b/lucid/modelzoo/other_models/__init__.py index 00ef8668..de864532 100644 --- a/lucid/modelzoo/other_models/__init__.py +++ b/lucid/modelzoo/other_models/__init__.py @@ -10,7 +10,7 @@ from lucid.modelzoo.vision_base import Model as _Model from lucid.modelzoo.other_models.AlexNet import AlexNet -from lucid.modelzoo.other_models.Clip import Clip +from lucid.modelzoo.other_models.Clip import Clip_ResNet50_4x from lucid.modelzoo.other_models.InceptionV1 import InceptionV1, InceptionV1_adv_finetuned From f26307be680da094c6355bf7dc6b14677f191af6 Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 11:12:38 +0000 Subject: [PATCH 45/57] Attempt to catch ModuleNotFoundError in channel_reducer to get tests to run on CI --- lucid/misc/channel_reducer.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lucid/misc/channel_reducer.py b/lucid/misc/channel_reducer.py index e175dcc5..d190207f 100644 --- a/lucid/misc/channel_reducer.py +++ b/lucid/misc/channel_reducer.py @@ -23,9 +23,9 @@ import sklearn.decomposition try: - from sklearn.decomposition.base import BaseEstimator -except AttributeError: - from sklearn.base import BaseEstimator + from sklearn.decomposition.base import BaseEstimator +except ModuleNotFoundError: + from sklearn.base import BaseEstimator class ChannelReducer(object): From 0ade2907c5708ac23f47ebe96b42723b28784961 Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 11:31:19 +0000 Subject: [PATCH 46/57] WIP testing newer gfile module import --- lucid/misc/io/reading.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/lucid/misc/io/reading.py b/lucid/misc/io/reading.py index 52fed129..9e67bee6 100644 --- a/lucid/misc/io/reading.py +++ b/lucid/misc/io/reading.py @@ -25,7 +25,7 @@ import logging from urllib.parse import urlparse from urllib import request -from tensorflow import gfile +from tensorflow.io import gfile import tensorflow as tf from tempfile import gettempdir import gc @@ -136,7 +136,7 @@ def _is_remote(scheme): RESERVED_PATH_CHARS = re.compile("[^a-zA-Z0-9]") -LUCID_CACHE_DIR_NAME = 'lucid_cache' +LUCID_CACHE_DIR_NAME = "lucid_cache" MAX_FILENAME_LENGTH = 200 _LUCID_CACHE_DIR = None # filled on first use @@ -146,16 +146,22 @@ def local_cache_path(remote_url): """Returns the path that remote_url would be cached at locally.""" local_name = RESERVED_PATH_CHARS.sub("_", remote_url) if len(local_name) > MAX_FILENAME_LENGTH: - filename_hash = hashlib.sha256(local_name.encode('utf-8')).hexdigest() - truncated_name = local_name[:(MAX_FILENAME_LENGTH-(len(filename_hash)) - 1)] + '-' + filename_hash - log.debug(f'truncated long cache filename to {truncated_name} (original {len(local_name)} char name: {local_name}') + filename_hash = hashlib.sha256(local_name.encode("utf-8")).hexdigest() + truncated_name = ( + local_name[: (MAX_FILENAME_LENGTH - (len(filename_hash)) - 1)] + + "-" + + filename_hash + ) + log.debug( + f"truncated long cache filename to {truncated_name} (original {len(local_name)} char name: {local_name}" + ) local_name = truncated_name if _LUCID_CACHE_DIR is None: _LUCID_CACHE_DIR = os.path.join(gettempdir(), LUCID_CACHE_DIR_NAME) if not os.path.exists(_LUCID_CACHE_DIR): # folder might exist if another thread/process creates it concurrently, this would be ok os.makedirs(_LUCID_CACHE_DIR, exist_ok=True) - log.info(f'created lucid cache dir at {_LUCID_CACHE_DIR}') + log.info(f"created lucid cache dir at {_LUCID_CACHE_DIR}") return os.path.join(_LUCID_CACHE_DIR, local_name) @@ -199,7 +205,8 @@ def _read_and_cache(url, mode="rb"): from functools import partial -_READ_BUFFER_SIZE = 1048576 # setting a larger value here to help read bigger chunks of files over the network (eg from GCS) + +_READ_BUFFER_SIZE = 1048576 # setting a larger value here to help read bigger chunks of files over the network (eg from GCS) def _file_chunk_iterator(file_handle): From d07b2762aad021402f9a87d809d1206ae4c27669 Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 11:39:46 +0000 Subject: [PATCH 47/57] Correct way to call modern GFile --- lucid/misc/io/reading.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lucid/misc/io/reading.py b/lucid/misc/io/reading.py index 9e67bee6..81e60666 100644 --- a/lucid/misc/io/reading.py +++ b/lucid/misc/io/reading.py @@ -25,7 +25,7 @@ import logging from urllib.parse import urlparse from urllib import request -from tensorflow.io import gfile +from tensorflow.io.gfile import GFile import tensorflow as tf from tempfile import gettempdir import gc @@ -121,7 +121,7 @@ def read_handle(url, cache=None, mode="rb"): def _handle_gfile(url, mode="rb"): - return gfile.Open(url, mode) + return GFile(url, mode) def _handle_web_url(url, mode="r"): From 0fcc7ab3b9d4aebe0c4526067991e35d16acc9de Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 13:25:54 +0000 Subject: [PATCH 48/57] Fix a longstanding bug in url_scope that added an extra . after local file paths --- lucid/misc/io/scoping.py | 12 +++++++++--- tests/misc/io/test_scoping.py | 8 ++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) create mode 100644 tests/misc/io/test_scoping.py diff --git a/lucid/misc/io/scoping.py b/lucid/misc/io/scoping.py index cc04fce4..a5c7a96c 100644 --- a/lucid/misc/io/scoping.py +++ b/lucid/misc/io/scoping.py @@ -37,8 +37,13 @@ def _normalize_url(url: str) -> str: # os.path.normpath mangles url schemes: gs://etc -> gs:/etc # urlparse.urljoin doesn't normalize paths url_scheme, sep, url_path = url.partition("://") - normalized_path = os.path.normpath(url_path) - return url_scheme + sep + normalized_path + # 2021-03-12 @ludwig this method is often called with paths that are not URLs. + # thus, url_path may be empty + # in this case we can't call `os.path.normpath(url_path)` + # as it "normalizes" an empty input to "." (current directory) + normalized_path = os.path.normpath(url_path) if url_path else "" + joined = url_scheme + sep + normalized_path + return joined def scope_url(url, io_scopes=None): @@ -47,4 +52,5 @@ def scope_url(url, io_scopes=None): return url paths = io_scopes + [url] joined = os.path.join(*paths) - return _normalize_url(joined) + normalized = _normalize_url(joined) + return normalized diff --git a/tests/misc/io/test_scoping.py b/tests/misc/io/test_scoping.py new file mode 100644 index 00000000..be694347 --- /dev/null +++ b/tests/misc/io/test_scoping.py @@ -0,0 +1,8 @@ +import pytest +from lucid.misc.io.scoping import io_scope, scope_url + + +def test_empty_io_scope(): + path = "./some/file.ext" + scoped = scope_url(path) + assert scoped == path From 5f4b26075ab7937c69c3e2b8cb9b09ee01a89b3f Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 14:06:11 +0000 Subject: [PATCH 49/57] Add "slow" marker to pytest config options to avoid warning, disbale slow tests on CI --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index c22ab40b..ddd0c751 100644 --- a/tox.ini +++ b/tox.ini @@ -5,9 +5,11 @@ envlist = py{37} deps = tensorflow == 1.15.5 .[test] -commands = coverage run --source lucid --omit lucid/scratch/*,lucid/recipes/*,lucid/misc/gl/* -m py.test --run-slow {posargs} +commands = coverage run --source lucid --omit lucid/scratch/*,lucid/recipes/*,lucid/misc/gl/* -m py.test {posargs} [pytest] +markers = + slow: marks tests as slow (deselect with '-m "not slow"') addopts = --verbose testpaths = ./tests/ From 4d972bad8e6b4b98c152dd8d759352f4e89d69c4 Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 14:30:57 +0000 Subject: [PATCH 50/57] tf.spectral.irfft2d -> tf.signal.irfft2d --- lucid/optvis/param/random.py | 2 +- lucid/optvis/param/spatial.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lucid/optvis/param/random.py b/lucid/optvis/param/random.py index cbdc49c2..7d14a2b3 100644 --- a/lucid/optvis/param/random.py +++ b/lucid/optvis/param/random.py @@ -42,7 +42,7 @@ def rand_fft_image(shape, sd=None, decay_power=1): # learning rates to pixel-wise optimisation. spertum_scale *= np.sqrt(w * h) scaled_spectrum = spectrum * spertum_scale - img = tf.spectral.irfft2d(scaled_spectrum) + img = tf.signal.irfft2d(scaled_spectrum) # in case of odd input dimension we cut off the additional pixel # we get from irfft2d length computation img = img[:ch, :h, :w] diff --git a/lucid/optvis/param/spatial.py b/lucid/optvis/param/spatial.py index 70bcbfa0..a0154676 100644 --- a/lucid/optvis/param/spatial.py +++ b/lucid/optvis/param/spatial.py @@ -79,7 +79,7 @@ def fft_image(shape, sd=None, decay_power=1): # convert complex scaled spectrum to shape (h, w, ch) image tensor # needs to transpose because irfft2d returns channels first - image_t = tf.transpose(tf.spectral.irfft2d(scaled_spectrum_t), (0, 2, 3, 1)) + image_t = tf.transpose(tf.signal.irfft2d(scaled_spectrum_t), (0, 2, 3, 1)) # in case of odd spatial input dimensions we need to crop image_t = image_t[:batch, :h, :w, :ch] From 1803e851b1ca135add4927b9f351d511ccf86d7b Mon Sep 17 00:00:00 2001 From: Ludwig Schubert Date: Fri, 12 Mar 2021 14:50:35 +0000 Subject: [PATCH 51/57] Pin numpy to version 1.19 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 596b94c7..84f992b2 100644 --- a/setup.py +++ b/setup.py @@ -52,7 +52,7 @@ "optimization", ], install_requires=[ - "numpy", + "numpy<=1.19", "scipy", "scikit-learn", "umap-learn", From 9e0ebcbdbb68ebff6dbb1d49f8a8f175561a5876 Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Sat, 5 Dec 2020 21:45:36 +1100 Subject: [PATCH 52/57] [*.py] Rename "Arguments:" to "Args:" --- lucid/misc/iter_nd_utils.py | 2 +- lucid/misc/stimuli.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lucid/misc/iter_nd_utils.py b/lucid/misc/iter_nd_utils.py index e8c9105a..12c995e3 100644 --- a/lucid/misc/iter_nd_utils.py +++ b/lucid/misc/iter_nd_utils.py @@ -24,7 +24,7 @@ def recursive_enumerate_nd(it, stop_iter=None, prefix=()): """Recursively enumerate nested iterables with tuples n-dimenional indices. - Arguments: + Args: it: object to be enumerated stop_iter: User defined funciton which can conditionally block further iteration. Defaults to allowing iteration. diff --git a/lucid/misc/stimuli.py b/lucid/misc/stimuli.py index 480d4d15..1b0debbe 100644 --- a/lucid/misc/stimuli.py +++ b/lucid/misc/stimuli.py @@ -50,7 +50,7 @@ def img_f(x,y): return (negative if interior, positive if exterior) img = sampler(img_f) - Arguments: + Args: size: Size of image to be rendered in pixels. alias_factor: Number of samples to use in aliasing. color_a: Color of exterior. A 3-tuple of floats between 0 and 1. Defaults @@ -148,7 +148,7 @@ def rounded_corner(orientation, r, angular_width=90, size=224, **kwds): This function is a flexible generator of "rounded corner" stimuli. It returns an image, represented as a numpy array of shape [size, size, 3]. - Arguments: + Args: orientation: The orientation of the curve, in degrees. r: radius of the curve angular_width: when r=0 and we have sharp corner, this controls the angle From 17d52b55229c52567e6b7c140d18ac049778aa63 Mon Sep 17 00:00:00 2001 From: Tyler <41713505+Tylersuard@users.noreply.github.com> Date: Mon, 14 Dec 2020 04:51:23 -0800 Subject: [PATCH 53/57] Update xy2rgb.ipynb Added compatibility with Colab: switching to Tensorflow v.1 --- .../differentiable-parameterizations/xy2rgb.ipynb | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/notebooks/differentiable-parameterizations/xy2rgb.ipynb b/notebooks/differentiable-parameterizations/xy2rgb.ipynb index 2f2fdc9b..57210e58 100644 --- a/notebooks/differentiable-parameterizations/xy2rgb.ipynb +++ b/notebooks/differentiable-parameterizations/xy2rgb.ipynb @@ -103,6 +103,15 @@ "## Install, Import, and load a model" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%tensorflow_version 1.x" + ] + }, { "metadata": { "id": "b-IocmWWvb_I", @@ -1116,4 +1125,4 @@ ] } ] -} +} \ No newline at end of file From 3ab6a965b7e37d698c6854d52ebd196dcafa62a9 Mon Sep 17 00:00:00 2001 From: ProGamerGov Date: Wed, 16 Sep 2020 16:46:31 -0600 Subject: [PATCH 54/57] Fixed spelling of factorization - Changed "facotrization" to "factorization". --- lucid/misc/channel_reducer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucid/misc/channel_reducer.py b/lucid/misc/channel_reducer.py index d190207f..d6f10edd 100644 --- a/lucid/misc/channel_reducer.py +++ b/lucid/misc/channel_reducer.py @@ -44,7 +44,7 @@ def __init__(self, n_components=3, reduction_alg="NMF", **kwargs): Inputs: n_components: Numer of dimensions to reduce inner most dimension to. reduction_alg: A string or sklearn.decomposition class. Defaults to - "NMF" (non-negative matrix facotrization). Other options include: + "NMF" (non-negative matrix factorization). Other options include: "PCA", "FastICA", and "MiniBatchDictionaryLearning". The name of any of the sklearn.decomposition classes will work, though. kwargs: Additional kwargs to be passed on to the reducer. From 3263e11b5e9e7c57857974dc16459f82d4b6104f Mon Sep 17 00:00:00 2001 From: Adam Pearce Date: Mon, 28 Sep 2020 12:27:43 -0400 Subject: [PATCH 55/57] ignore lock files --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index 1ec8ae6a..832a6327 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,7 @@ lib share tests/fixtures/generated_outputs/ + + +lucid/scratch/js/package-lock.json +lucid/scratch/js/yarn.lock \ No newline at end of file From 58b201e7bd5557e941829cd789cc91a36935d291 Mon Sep 17 00:00:00 2001 From: Chris Olah Date: Wed, 4 Dec 2019 11:47:47 -0800 Subject: [PATCH 56/57] @colah's graph_analysis: nicer json parsed graph structure --- lucid/misc/graph_analysis/parse_overlay.py | 53 ++++++++++++++-------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/lucid/misc/graph_analysis/parse_overlay.py b/lucid/misc/graph_analysis/parse_overlay.py index 07fd854a..6495f11f 100644 --- a/lucid/misc/graph_analysis/parse_overlay.py +++ b/lucid/misc/graph_analysis/parse_overlay.py @@ -49,19 +49,35 @@ def parse_structure(node): structure = node.sub_structure if structure is None: - return node.name + return { + "type" : "Node", + "name": node.name + } elif structure.structure_type == "Sequence": - return {"Sequence" : [parse_structure(n) for n in structure.structure["sequence"]]} + return { + "type" : "Sequence", + "children": [parse_structure(n) for n in structure.structure["sequence"]] + } elif structure.structure_type == "HeadBranch": - return {"Sequence" : [ - {"Branch" : [parse_structure(n) for n in structure.structure["branches"]] }, - parse_structure(structure.structure["head"]) - ]} + return { + "type" : "Sequence", + "children": [{ + "type": "Branch", + "children": [parse_structure(n) for n in structure.structure["branches"]] + }, + parse_structure(structure.structure["head"])] + } elif structure.structure_type == "TailBranch": - return {"Sequence" : [ + return { + "type" : "Sequence", + "children": [ parse_structure(structure.structure["tail"]), - {"Branch" : [parse_structure(n) for n in structure.structure["branches"]] }, - ]} + { + "type": "Branch", + "subtype": "AuxilliaryHeadBranch", + "children": [parse_structure(n) for n in structure.structure["branches"]] + }] + } else: data = {} for k in structure.structure: @@ -70,26 +86,27 @@ def parse_structure(node): else: data[k] = parse_structure(structure.structure[k]) - return {structure.structure_type : data} + data["type"] = structure.structure_type + return data def flatten_sequences(structure): """Flatten nested sequences into a single sequence.""" - if isinstance(structure, str) or structure is None: + if isinstance(structure, str) or (isinstance(structure, dict) and structure["type"] == "Node") or structure is None: return structure else: structure = structure.copy() - for k in structure: - structure[k] = [flatten_sequences(sub) for sub in structure[k]] + if "children" in structure: + structure["children"] = [flatten_sequences(sub) for sub in structure["children"]] - if "Sequence" in structure: + if structure["type"] == "Sequence": new_seq = [] - for sub in structure["Sequence"]: - if isinstance(sub, dict) and "Sequence" in sub: - new_seq += sub["Sequence"] + for sub in structure["children"]: + if isinstance(sub, dict) and sub["type"] == "Sequence": + new_seq += sub["children"] else: new_seq.append(sub) - structure["Sequence"] = new_seq + structure["children"] = new_seq return structure From a4c29a96f49907ae34c5f38dd88063192f5138c9 Mon Sep 17 00:00:00 2001 From: Constum Thomas Date: Sun, 14 Mar 2021 17:55:17 +0100 Subject: [PATCH 57/57] Adding a message for channel/spatial attribution indicating how to generate spritemaps --- .../jupyter_versions/AttrChannelJupyter.ipynb | 204 +++++++++--------- .../jupyter_versions/AttrSpatialJupyter.ipynb | 54 +++-- 2 files changed, 130 insertions(+), 128 deletions(-) diff --git a/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb index d35d6f18..a5f4bc6c 100644 --- a/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/AttrChannelJupyter.ipynb @@ -66,7 +66,14 @@ "\n", "Thanks for trying Lucid!\n", "\n", - "#### **This notebook is a Jupyter version of the original Google Colab Notebook. This version adds widgets to facilitate the use of Lucid on your own images.**" + "#### **This notebook is a Jupyter version of the original Google Colab Notebook. This version adds widgets to facilitate the use of Lucid on your own images.**\n", + "\n", + "\n", + "\n", + "**In order to use this notebook you need to generate or download a spritemap of feature activations corresponding to your model and the layer you want to study.**\n", + "\n", + "\n", + "For example you can get the spritemap of Googlenet at layer Mixed4A at [this url](https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_mixed4a_channel_alpha.jpeg) or you can generate the spritemap of your choice using the notebook SpritemapGenerator placed at ```notebooks/building-blocks/jupyter-versions/```" ] }, { @@ -104,7 +111,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\r\n", + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 21.0.1 is available.\r\n", "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" ] } @@ -223,8 +230,8 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_kcad2_h2/ChannelAttrWidget_dc20ba62_58d4_4050_b0d2_0a567afed2fd.html > /tmp/svelte_kcad2_h2/ChannelAttrWidget_dc20ba62_58d4_4050_b0d2_0a567afed2fd.js\n", - "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_kcad2_h2/ChannelAttrWidget_dc20ba62_58d4_4050_b0d2_0a567afed2fd.html...\\n'\n" + "svelte compile --format iife /tmp/svelte_xaebppqv/ChannelAttrWidget_a11d0b66_6448_4125_9f52_2827a93f872e.html > /tmp/svelte_xaebppqv/ChannelAttrWidget_a11d0b66_6448_4125_9f52_2827a93f872e.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../tmp/svelte_xaebppqv/ChannelAttrWidget_a11d0b66_6448_4125_9f52_2827a93f872e.html...\\n'\n" ] } ], @@ -338,8 +345,8 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_kcad2_h2/BarsWidget_9f40af41_fd2e_490a_bdc9_8f63f2c397b6.html > /tmp/svelte_kcad2_h2/BarsWidget_9f40af41_fd2e_490a_bdc9_8f63f2c397b6.js\n", - "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_kcad2_h2/BarsWidget_9f40af41_fd2e_490a_bdc9_8f63f2c397b6.html...\\n'\n" + "svelte compile --format iife /tmp/svelte_xaebppqv/BarsWidget_b26bdc5c_e39c_4043_8c12_002e99aed31e.html > /tmp/svelte_xaebppqv/BarsWidget_b26bdc5c_e39c_4043_8c12_002e99aed31e.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../tmp/svelte_xaebppqv/BarsWidget_b26bdc5c_e39c_4043_8c12_002e99aed31e.html...\\n'\n" ] } ], @@ -538,7 +545,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "ee940f09767d4f15910d050a7d0953d6", + "model_id": "9734f7a12883419daece5c1d66c4f5bb", "version_major": 2, "version_minor": 0 }, @@ -561,12 +568,12 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "98c40181a1f94ece8a9fbb108cd06089", + "model_id": "77cb19e80f9a413d8f3552f120492f23", "version_major": 2, "version_minor": 0 }, "text/plain": [ - "FileChooser(path='.', filename='', show_hidden='False')" + "FileChooser(path='.', filename='', title='HTML(value='', layout=Layout(display='none'))', show_hidden='False',…" ] }, "metadata": {}, @@ -583,7 +590,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "b38f5d1dd0a74ff486bd8b0135ccd01f", + "model_id": "e68385ce38d24447be41394edc245a5e", "version_major": 2, "version_minor": 0 }, @@ -605,7 +612,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "9e19d42a0d2b4c82a23ad67c905ce457", + "model_id": "40f8a951b4a7416297834080d79338a7", "version_major": 2, "version_minor": 0 }, @@ -627,7 +634,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "ab3344b68cad4bc8b921f3b02e6c9389", + "model_id": "730ef25e8e104fe6bfef8e144a054965", "version_major": 2, "version_minor": 0 }, @@ -641,7 +648,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "dee1f1ad0cbd479182e461b622751d87", + "model_id": "3fe5f4be9c574509beab40cc23741820", "version_major": 2, "version_minor": 0 }, @@ -708,7 +715,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -741,7 +748,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 19, "metadata": { "colab": { "autoexec": { @@ -797,10 +804,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -1159,10 +1166,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -1729,7 +1736,7 @@ } ], "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", + "img = load(picture_path)\n", "\n", "legend = \"

Legend :

\"\n", "legend += \"
%s
\" % class_name_1\n", @@ -1853,7 +1860,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 18, "metadata": { "colab": { "autoexec": { @@ -1932,10 +1939,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -2294,10 +2301,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -2886,10 +2893,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -3248,10 +3255,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -3840,10 +3847,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -4202,10 +4209,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -4772,8 +4779,7 @@ } ], "source": [ - "img = load(\"https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png\")\n", - "\n", + "img = load(picture_path)\n", "legend = \"

Legend :

\"\n", "legend += \"
%s
\" % class_name_1\n", "legend += \"
%s
\" % class_name_2\n", diff --git a/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb index e9d605ec..1577bc3a 100644 --- a/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb +++ b/notebooks/building-blocks/jupyter_versions/AttrSpatialJupyter.ipynb @@ -126,13 +126,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 20.1.1 is available.\r\n", - "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\r\n" + "\u001b[K\u001b[?25h/usr/bin/svelte -> /usr/lib/node_modules/svelte-cli/bin.jsll\u001b[0m \u001b[35mdoReverseSerial\u001b[0m unbuild 10\u001b[0m\u001b[K[0m\u001b[K\n", + "+ svelte-cli@2.2.0\n", + "updated 1 package in 0.398s\n", + "\u001b[33mWARNING: You are using pip version 19.3.1; however, version 21.0.1 is available.\n", + "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" ] } ], "source": [ - "# !npm install -g svelte-cli@2.2.0\n", + " !npm install -g svelte-cli@2.2.0\n", "!pip install ipyfilechooser ipywidgets --quiet\n", "\n", "import os\n", @@ -383,8 +386,8 @@ "output_type": "stream", "text": [ "Trying to build svelte component from html...\n", - "svelte compile --format iife /tmp/svelte_en7a7hmz/SpatialWidget_2decdfab_b9d6_4fc0_9d6a_ceebf14056e1.html > /tmp/svelte_en7a7hmz/SpatialWidget_2decdfab_b9d6_4fc0_9d6a_ceebf14056e1.js\n", - "b'svelte version 1.64.1\\ncompiling ../../../../../../../tmp/svelte_en7a7hmz/SpatialWidget_2decdfab_b9d6_4fc0_9d6a_ceebf14056e1.html...\\n(4:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(5:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(21:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(22:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n'\n" + "svelte compile --format iife /tmp/svelte_h1dscgwl/SpatialWidget_b1c05587_c332_4e69_b360_398f4e2a767f.html > /tmp/svelte_h1dscgwl/SpatialWidget_b1c05587_c332_4e69_b360_398f4e2a767f.js\n", + "b'svelte version 1.64.1\\ncompiling ../../../../tmp/svelte_h1dscgwl/SpatialWidget_b1c05587_c332_4e69_b360_398f4e2a767f.html...\\n(4:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(5:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(21:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n(22:4) \\xe2\\x80\\x93 A11y: element should have an alt attribute\\n'\n" ] } ], @@ -700,7 +703,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "2cd5278703bf4478b4c785183de4df9c", + "model_id": "6f9759aa702c4244b092ca92823be602", "version_major": 2, "version_minor": 0 }, @@ -723,12 +726,12 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "f5067cdac7ab47bcad1678b764b2f812", + "model_id": "75ec7fb5713a43889ca65820faea33f0", "version_major": 2, "version_minor": 0 }, "text/plain": [ - "FileChooser(path='.', filename='', show_hidden='False')" + "FileChooser(path='.', filename='', title='HTML(value='', layout=Layout(display='none'))', show_hidden='False',…" ] }, "metadata": {}, @@ -745,7 +748,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "90e2803133894d5f91280c4ed353f0cc", + "model_id": "0510ae9b34894436a89a2b490e37df6d", "version_major": 2, "version_minor": 0 }, @@ -767,7 +770,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "101bb74d27134564b4a8edeb7fb536e0", + "model_id": "1f55ea2200c04654a1509d3980d61f4a", "version_major": 2, "version_minor": 0 }, @@ -789,7 +792,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "02e21b60f82f4eeeaa3af9efa548bf33", + "model_id": "dfdb8053869a41389f72dcf9d1658aa1", "version_major": 2, "version_minor": 0 }, @@ -811,7 +814,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "1e0c2de160424900a7c48b4fe6bfa1d7", + "model_id": "20d03a5fc84943d696aef0315f6ef7a4", "version_major": 2, "version_minor": 0 }, @@ -925,10 +928,10 @@ "data": { "text/html": [ "\n", - "
\n", + "
\n", " \n", " \n", " " @@ -1763,13 +1766,6 @@ "\n", "spatial_spatial_attr(model, img, layer_name_1, layer_name_2, class_name_1, class_name_2)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": {