生成对抗网络前言(4)——变分自动编码器(Variational autoencoder,VAE)介绍

时间:2024-09-29 15:28:50
import numpy as np import matplotlib.pyplot as plt from scipy.stats import norm from keras.layers import Input, Dense, Lambda from keras.models import Model from keras import backend as K from keras import metrics from keras.datasets import mnist original_dim = 28*28 intermediate_dim = 64 latent_dim = 2 batch_size = 32 x = Input(shape=(original_dim,)) h = Dense(intermediate_dim, activation='relu')(x) z_mean = Dense(latent_dim)(h) z_log_var = Dense(latent_dim)(h) #定义采样函数(采样新的相似点) def sampling(args): z_mean, z_log_var = args epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0., stddev=0.1) return z_mean + K.exp(z_log_var / 2) * epsilon # note that "output_shape" isn't necessary with the TensorFlow backend # 注意,“output_shape”对于TensorFlow后端不是必需的。 z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var]) # 将采样得到的点映射回去重构原输出 decoder_h = Dense(intermediate_dim, activation='relu') decoder_mean = Dense(original_dim, activation='sigmoid') h_decoded = decoder_h(z) x_decoded_mean = decoder_mean(h_decoded) ''' 需要实例化三个模型: - 一个端到端的自动编码器,用于完成输入信号的重构 end-to-end autoencoder - 一个用于将输入空间映射为隐空间的编码器 encoder, from inputs to latent space - 一个利用隐空间的分布产生的样本点生成对应重构样本的生成器 generator, from latent space to reconstructed inputs ''' # 实例化VAE模型(端到端的自动编码器) vae = Model(x, x_decoded_mean) # Compute VAE loss # 计算VAE损失 xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)# 均方距离 kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)# KL散度 vae_loss = K.mean(xent_loss + kl_loss) vae.add_loss(vae_loss) vae.compile(optimizer='rmsprop') vae.summary() # 利用MNIST数据集训练VAE (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) vae.fit(x_train,shuffle=True,epochs=100, batch_size=batch_size,validation_data=(x_test, None)) # encoder, from inputs to latent space # 建立一个隐空间输入模型(将输入空间映射为隐空间的编码器) encoder = Model(x, z_mean) encoder.summary() # display a 2D plot of the digit classes in the latent space # 在潜在空间中显示数字类的2D图 x_test_encoded = encoder.predict(x_test, batch_size=batch_size) plt.figure(figsize=(6, 6)) plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test) plt.colorbar() plt.show() # 建立一个数字生成器,可以从学习的分布中取样(一个利用隐空间的分布产生的样本点生成对应的重构样本的生成器) decoder_input = Input(shape=(latent_dim,)) _h_decoded = decoder_h(decoder_input) _x_decoded_mean = decoder_mean(_h_decoded) generator = Model(decoder_input, _x_decoded_mean) generator.summary() # 生成MNIST数字 n = 15 # figure with 15x15 digits # 15X15数字图形 digit_size = 28 figure = np.zeros((digit_size * n, digit_size * n)) # linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian # 单位平方的线性间隔坐标通过高斯的逆CDF(ppf)变换。 # to produce values of the latent variables z, since the prior of the latent space is Gaussian # 产生潜在变量Z的值,因为潜在空间的先验是高斯 # grid_x = ((0.05, 0.95, n)) # grid_y = ((0.05, 0.95, n)) grid_x = np.linspace(-15, 15, n) grid_y = np.linspace(-15, 15, n) for i, yi in enumerate(grid_x): for j, xi in enumerate(grid_y): z_sample = np.array([[xi, yi]]) x_decoded = generator.predict(z_sample) digit = x_decoded[0].reshape(digit_size, digit_size) figure[i * digit_size: (i + 1) * digit_size, j * digit_size: (j + 1) * digit_size] = digit plt.figure(figsize=(10, 10)) plt.imshow(figure, cmap='Greys_r') plt.show()