Skip to content

Commit

Permalink
convert to tf2, but not working yet
Browse files Browse the repository at this point in the history
  • Loading branch information
Akatuoro committed Jul 17, 2020
1 parent 964fee6 commit d662c3e
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 46 deletions.
14 changes: 7 additions & 7 deletions gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@

from __future__ import print_function, division

from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
from libs.architectures import build_generator, build_discriminator

from PIL import Image
Expand Down
10 changes: 5 additions & 5 deletions libs/architectures/conv1.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model

import numpy as np

Expand Down
12 changes: 6 additions & 6 deletions libs/architectures/conv2.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model

import numpy as np

Expand All @@ -13,7 +13,7 @@ def generator(latent_dim, img_shape):
model.add(Reshape((32, 32, 128)))
model.add(BatchNormalization(momentum=0.8))
#model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Conv2D(128, kernel_size=3, padding="same", data_format="channels_last"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
#model.add(UpSampling2D())
Expand Down
10 changes: 5 additions & 5 deletions libs/architectures/dense.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model

import numpy as np

Expand Down
10 changes: 5 additions & 5 deletions libs/architectures/resnet.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Add, Lambda
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Add, Lambda
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model

import numpy as np

Expand Down
6 changes: 3 additions & 3 deletions libs/blocks.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# Blocks intended to work with the Keras functional API

from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Add, Lambda
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D
from keras.layers.convolutional import UpSampling2D, Conv2D
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Add, Lambda
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D
from tensorflow.keras.layers import UpSampling2D, Conv2D

def ResidualBlock(output_dim, kernel_size, resample=None):

Expand Down
28 changes: 13 additions & 15 deletions wgangp.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,20 @@

from __future__ import print_function, division

from keras.datasets import mnist
from keras.layers.merge import _Merge
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Add, Lambda
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import RMSprop, Adam
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, Add, Lambda
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import RMSprop, Adam
from functools import partial

from libs.blocks import ResidualBlock, OptimizedResBlockDisc1
from libs.architectures import build_generator, build_discriminator
from gan import GAN

import keras.backend as K
import tensorflow.keras.backend as K

import matplotlib.pyplot as plt

Expand All @@ -30,11 +29,10 @@
import numpy as np


class RandomWeightedAverage(_Merge):
def randomWeightedAverage(inputs):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])

class WGANGP(GAN):
def __init__(self, *args, **kwargs):
Expand Down Expand Up @@ -70,7 +68,7 @@ def compile(self):
valid = self.critic(real_img)

# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage()([real_img, fake_img])
interpolated_img = Lambda(randomWeightedAverage, output_shape=lambda x: x[0])([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)

Expand Down Expand Up @@ -111,7 +109,7 @@ def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
gradients = K.gradients(y_pred, [averaged_samples])[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
Expand Down

0 comments on commit d662c3e

Please sign in to comment.