Skip to content

Commit

Permalink
seperated network architectures from gan types. updated loading and p…
Browse files Browse the repository at this point in the history
…lotting
  • Loading branch information
Akatuoro committed Oct 15, 2018
1 parent 90bde39 commit e5685cb
Show file tree
Hide file tree
Showing 8 changed files with 309 additions and 123 deletions.
58 changes: 13 additions & 45 deletions gan.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# Large amount of credit goes to:
# https://github.com/eriklindernoren/Keras-GAN/blob/master/gan/gan.py
# which I've used as a reference for this implementation

from __future__ import print_function, division

from keras.datasets import mnist
Expand All @@ -7,6 +11,7 @@
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from libs.architectures import build_generator, build_discriminator

from PIL import Image
import matplotlib.pyplot as plt
Expand All @@ -16,7 +21,7 @@
import numpy as np

class GAN():
def __init__(self, shape, save_path='images/'):
def __init__(self, shape, architecture='dense', save_path='images/'):
self.img_rows = shape[0]
self.img_cols = shape[1]
self.channels = shape[2]
Expand All @@ -25,16 +30,20 @@ def __init__(self, shape, save_path='images/'):

self.save_path = save_path

self.architecture = architecture
self.compile()

def compile(self):
optimizer = Adam(0.0002, 0.5)

# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator = build_discriminator(self.architecture, self.img_shape)
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])

# Build the generator
self.generator = self.build_generator()
self.generator = build_generator(self.architecture, self.latent_dim, self.img_shape)

# The generator takes noise as input and generates imgs
z = Input(shape=(self.latent_dim,))
Expand All @@ -51,47 +60,6 @@ def __init__(self, shape, save_path='images/'):
self.combined = Model(z, validity)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)


def build_generator(self):

model = Sequential()

model.add(Dense(256, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))

model.summary()

noise = Input(shape=(self.latent_dim,))
img = model(noise)

return Model(noise, img)

def build_discriminator(self):

model = Sequential()

model.add(Flatten(input_shape=self.img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()

img = Input(shape=self.img_shape)
validity = model(img)

return Model(img, validity)

def train(self, X_train, epochs, batch_size=128, sample_interval=50):

# Rescale -1 to 1
Expand Down Expand Up @@ -170,7 +138,7 @@ def sample_images(self, epoch):


if __name__ == '__main__':
gan = GAN()
(X_train,_), (_,_) = mnist.load_data()
gan = GAN(shape=X_train[0].shape)
gan.train(X_train=X_train, epochs=30000, batch_size=32, sample_interval=200)

21 changes: 21 additions & 0 deletions libs/architectures/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from libs.architectures import dense, conv1, conv2, resnet

def build_generator(architecture, latent_dim, img_shape):
if (architecture == 'dense'):
return dense.generator(latent_dim, img_shape)
if (architecture == 'conv1'):
return conv1.generator(latent_dim, img_shape)
if (architecture == 'conv2'):
return conv2.generator(latent_dim, img_shape)
if (architecture == 'resnet'):
return resnet.generator(latent_dim, img_shape)

def build_discriminator(architecture, img_shape):
if (architecture == 'dense'):
return dense.discriminator(img_shape)
if (architecture == 'conv1'):
return conv1.discriminator(img_shape)
if (architecture == 'conv2'):
return conv2.discriminator(img_shape)
if (architecture == 'resnet'):
return resnet.discriminator(img_shape)
60 changes: 60 additions & 0 deletions libs/architectures/conv1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model

import numpy as np

def generator(latent_dim, img_shape):
model = Sequential()

model.add(Dense(128 * 8 * 8, activation="relu", input_dim=latent_dim))
model.add(Reshape((8, 8, 128)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(img_shape[2], kernel_size=3, padding='same'))
model.add(Activation("tanh"))

model.summary()

noise = Input(shape=(latent_dim,))
img = model(noise)

return Model(noise, img)


def discriminator(img_shape):

model = Sequential()

model.add(Conv2D(16, kernel_size=3, strides=2, input_shape=img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()

img = Input(shape=img_shape)
validity = model(img)

return Model(img, validity)

75 changes: 75 additions & 0 deletions libs/architectures/conv2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model

import numpy as np

def generator(latent_dim, img_shape):
model = Sequential()

model.add(Dense(128 * 32 * 32, activation="relu", input_dim=latent_dim))
model.add(Reshape((32, 32, 128)))
model.add(BatchNormalization(momentum=0.8))
#model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
#model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(32, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(16, kernel_size=3, padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(16, kernel_size=4, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(16, kernel_size=4, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Conv2D(img_shape[2], kernel_size=3, padding='same'))
model.add(Activation("tanh"))

model.summary()

noise = Input(shape=(latent_dim,))
img = model(noise)

return Model(noise, img)


def discriminator(img_shape):

model = Sequential()

model.add(Conv2D(16, kernel_size=3, strides=1, input_shape=img_shape, padding="same"))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
#model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=1, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()

img = Input(shape=img_shape)
validity = model(img)

return Model(img, validity)
48 changes: 48 additions & 0 deletions libs/architectures/dense.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model

import numpy as np

def generator(latent_dim, img_shape):

model = Sequential()

model.add(Dense(256, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))

model.summary()

noise = Input(shape=(latent_dim,))
img = model(noise)

return Model(noise, img)


def discriminator(img_shape):

model = Sequential()

model.add(Flatten(input_shape=img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()

img = Input(shape=img_shape)
validity = model(img)

return Model(img, validity)
48 changes: 48 additions & 0 deletions libs/architectures/resnet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Add, Lambda
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, GlobalAveragePooling2D, AveragePooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model

import numpy as np

from libs.blocks import ResidualBlock, OptimizedResBlockDisc1

def generator(latent_dim, img_shape):

noise = Input(shape=(latent_dim,))

x = Dense(128 * 4 * 4)(noise)
x = Reshape((4, 4, 128))(x)
x = ResidualBlock(128, 3, 'up')(x)
x = ResidualBlock(128, 3, 'up')(x)
x = ResidualBlock(128, 3, 'up')(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(img_shape[2], kernel_size=3, padding="same")(x)
img = Activation("tanh")(x)


model = Model(noise, img)
model.summary()

return model


def discriminator(img_shape):
img = Input(shape=img_shape)

x = Reshape(img_shape)(img)
x = OptimizedResBlockDisc1(128)(x)
x = ResidualBlock(128, 3, resample='down')(x)
x = ResidualBlock(128, 3, resample=None)(x)
x = ResidualBlock(128, 3, resample=None)(x)
x = Activation("relu")(x)
x = GlobalAveragePooling2D()(x)
validity = Dense(1)(x)

model = Model(img, validity)
model.summary()

return model

Loading

0 comments on commit e5685cb

Please sign in to comment.