日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

GAN生成对抗网络-DCGAN原理与基本实现-深度卷积生成对抗网络03

發布時間:2024/9/15 编程问答 31 豆豆
生活随笔 收集整理的這篇文章主要介紹了 GAN生成对抗网络-DCGAN原理与基本实现-深度卷积生成对抗网络03 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

什么是DCGAN





實現代碼

import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import matplotlib.pyplot as plt %matplotlib inline import numpy as np import glob import os # 顯存自適應分配 gpus = tf.config.experimental.list_physical_devices(device_type='GPU') for gpu in gpus:tf.config.experimental.set_memory_growth(gpu,True) gpu_ok = tf.test.is_gpu_available() print("tf version:", tf.__version__) print("use GPU", gpu_ok) # 判斷是否使用gpu進行訓練 # 手寫數據集 (train_images,train_labels),(test_images,test_labels) = tf.keras.datasets.mnist.load_data()


train_images = train_images.reshape(train_images.shape[0],28,28,1).astype("float32")

# 歸一化 train_images = (train_images-127.5)/127.5 BATCH_SIZE = 256 BUFFER_SIZE = 60000 # 創建數據集 datasets = tf.data.Dataset.from_tensor_slices(train_images) # 亂序 datasets = datasets.shuffle(BUFFER_SIZE).batch(BATCH_SIZE)


編寫模型

# 生成器模型 def generator_model():model = keras.Sequential() # 順序模型model.add(layers.Dense(7*7*256,input_shape=(100,),use_bias=False)) # 輸出7*7*256個單元,隨機數輸入數據形狀長度100的向量model.add(layers.BatchNormalization()) # 批處理model.add(layers.LeakyReLU())# LeakyReLU()激活model.add(layers.Reshape((7,7,256))) # 7*7*256model.add(layers.Conv2DTranspose(128,(5,5),strides=(1,1),padding="same",use_bias=False)) # 反卷積model.add(layers.BatchNormalization()) # 批處理model.add(layers.LeakyReLU())# LeakyReLU()激活 # 7*7*128model.add(layers.Conv2DTranspose(64,(5,5),strides=(2,2),padding="same",use_bias=False)) # 反卷積model.add(layers.BatchNormalization()) # 批處理model.add(layers.LeakyReLU())# LeakyReLU()激活 # 14*14*64model.add(layers.Conv2DTranspose(1,(5,5),strides=(2,2),padding="same",use_bias=False,activation="tanh")) # 反卷積 # 28*28*1return model # 判別模型 def discriminator_model():model = keras.Sequential()model.add(layers.Conv2D(64,(5,5),strides=(2,2),padding="same",input_shape = (28,28,1)))model.add(layers.LeakyReLU())model.add(layers.Dropout(0.3))model.add(layers.Conv2D(128,(5,5),strides=(2,2),padding="same"))model.add(layers.LeakyReLU())model.add(layers.Dropout(0.3))model.add(layers.Conv2D(256,(5,5),strides=(2,2),padding="same"))model.add(layers.LeakyReLU())model.add(layers.Flatten())model.add(layers.Dense(1))return model # 編寫loss binary_crossentropy(對數損失函數)即 log loss,與 sigmoid 相對應的損失函數,針對于二分類問題。 cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) # 辨別器loss def discriminator_loss(real_out,fake_out):read_loss = cross_entropy(tf.ones_like(real_out),real_out) # 使用binary_crossentropy 對真實圖片判別為1fake_loss = cross_entropy(tf.zeros_like(fake_out),fake_out) # 生成的圖片 判別為0return read_loss + fake_loss # 生成器loss def generator_loss(fake_out):return cross_entropy(tf.ones_like(fake_out),fake_out) # 希望對生成的圖片返回為1 # 優化器 generator_opt = tf.keras.optimizers.Adam(1e-4) # 學習速率1e-4 0.0001 discriminator_opt = tf.keras.optimizers.Adam(1e-4) EPOCHS = 100 # 訓練步數 noise_dim = 100 num_exp_to_generate = 16seed = tf.random.normal([num_exp_to_generate,noise_dim]) # 16,100 # 生成16個樣本,長度為100的隨機數 generator = generator_model() discriminator = discriminator_model() # 訓練一個epoch def train_step(images):noise = tf.random.normal([BATCH_SIZE,noise_dim])with tf.GradientTape() as gen_tape,tf.GradientTape() as disc_tape: # 梯度real_out = discriminator(images,training=True)gen_image = generator(noise,training=True)fake_out = discriminator(gen_image,training=True)gen_loss = generator_loss(fake_out)disc_loss = discriminator_loss(real_out,fake_out)gradient_gen = gen_tape.gradient(gen_loss,generator.trainable_variables)gradient_disc = disc_tape.gradient(disc_loss,discriminator.trainable_variables)generator_opt.apply_gradients(zip(gradient_gen,generator.trainable_variables))discriminator_opt.apply_gradients(zip(gradient_disc,discriminator.trainable_variables)) def genrate_plot_image(gen_model,test_noise):pre_images = gen_model(test_noise,training=False)fig = plt.figure(figsize=(4,4))for i in range(pre_images.shape[0]):plt.subplot(4,4,i+1)plt.imshow((pre_images[i,:,:,0]+1)/2,cmap="gray")plt.axis("off")plt.show() def train(dataset,epochs):for epoch in range(epochs):for image_batch in dataset:train_step(image_batch)print(".",end="")genrate_plot_image(generator,seed) # 訓練模型 train(datasets,EPOCHS)



總結

以上是生活随笔為你收集整理的GAN生成对抗网络-DCGAN原理与基本实现-深度卷积生成对抗网络03的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。