def VAE_loss(true_images, logits, mean, std):
"""
Args:
true_images : batch of input images
logits : linear output of the decoder network (the constructed images)
mean : mean of the latent code
std : standard deviation of the latent code
"""
imgs_flat = tf.reshape(true_images, [-1, img_h*img_w*img_d])
encoder_loss = 0.5 * tf.reduce_sum(tf.square(mean)+tf.square(std)
-tf.log(tf.square(std))-1, 1)
decoder_loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=img_flat), 1)
return tf.reduce_mean(encoder_loss + decoder_loss)
def GAN_loss_without_labels(true_logit, fake_logit):
"""
Args:
true_logit : Given data from true distribution,
`true_logit` is the output of Discriminator (a column vector)
fake_logit : Given data generated from Generator,
`fake_logit` is the output of Discriminator (a column vector)
"""
true_prob = tf.nn.sigmoid(true_logit)
fake_prob = tf.nn.sigmoid(fake_logit)
d_loss = tf.reduce_mean(-tf.log(true_prob)-tf.log(1-fake_prob))
g_loss = tf.reduce_mean(-tf.log(fake_prob))
return d_loss, g_loss
def GAN_loss_with_labels(true_logit, fake_logit):
"""
Args:
true_logit : Given data from true distribution,
`true_logit` is the output of Discriminator (a matrix now)
fake_logit : Given data generated from Generator,
`fake_logit` is the output of Discriminator (a matrix now)
"""
d_true_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.labels, logits=self.true_logit, dim=1)
d_fake_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=1-self.labels, logits=self.fake_logit, dim=1)
g_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=self.labels, logits=self.fake_logit, dim=1)
d_loss = d_true_loss + d_fake_loss return tf.reduce_mean(d_loss), tf.reduce_mean(g_loss)