Yes, it is possible to invoke a functional API generator model within a cGAN Model subclass in TensorFlow or Keras. The generator in a cGAN model can be defined as a functional API model, and then called within the cGAN Model subclass. Here is an example:
from tensorflow.keras.layers import Input, Dense, Reshape
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, LeakyReLU, BatchNormalization, Concatenate
class cGAN(Model):
def __init__(self, img_shape=(64,64,3)):
super(cGAN, self).__init__()
self.img_shape = img_shape
# Discriminator model
self.discriminator = self.build_discriminator()
# Generator model
self.generator = self.build_generator()
# Combined model
self.discriminator.trainable = False # Freeze discriminator weights
img_input = Input(shape=self.img_shape)
gen_output = self.generator(img_input)
disc_output = self.discriminator([img_input, gen_output])
self.combined = Model(inputs=img_input, outputs=[gen_output, disc_output])
# Compile models
self.generator.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001, beta_1=0.5))
self.discriminator.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.001, beta_1=0.5))
self.combined.compile(loss=['binary_crossentropy', 'binary_crossentropy'], optimizer=Adam(lr=0.001, beta_1=0.5))
def build_discriminator(self):
# Define input layers
img_input = Input(shape=self.img_shape)
target_input = Input(shape=self.img_shape)
combined_input = Concatenate()([img_input, target_input])
# Convolutional layers
x = Conv2D(filters=32, kernel_size=(3,3), strides=(2,2), padding='same')(combined_input)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization()(x)
# Flatten and output
x = Flatten()(x)
output = Dense(1, activation='sigmoid')(x)
return Model(inputs=[img_input, target_input], outputs=output)
def build_generator(self):
# Define input layer
noise_input = Input(shape=(100,))
x = Dense(8*8*256, activation='relu')(noise_input)
x = Reshape((8,8,256))(x)
x = Conv2DTranspose(filters=128, kernel_size=(3,3), strides=(2,2), padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization()(x)
x = Conv2DTranspose(filters=64, kernel_size=(3,3), strides=(2,2), padding='same')(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization()(x)
output = Conv2DTranspose(filters=3, kernel_size=(3,3), activation='tanh', padding='same')(x)
# Define functional API model
return Model(inputs=noise_input, outputs=output)
In this example, the generator is defined as a functional API model using the Model
class from Keras. The generator is then called within the build_discriminator
method of the cGAN Model subclass using the functional API syntax:
gen_output = self.generator(img_input)
disc_output = self.discriminator([img_input, gen_output])
Here, img_input
is the input to the discriminator, and gen_output
is the output of the generator given img_input
. These are concatenated using the Concatenate
layer, and passed to the discriminator using the discriminator
model.
Note that in this example, the discriminator takes two inputs - the real image and the generated image - so the inputs to the combined model are a single image (i.e., the real image), and the generator produces the corresponding fake image.
Asked: 2022-02-01 11:00:00 +0000
Seen: 12 times
Last updated: Mar 05 '23