日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

《Tensorflow实战》之6.3VGGnet学习

發(fā)布時間:2025/5/22 编程问答 18 豆豆
生活随笔 收集整理的這篇文章主要介紹了 《Tensorflow实战》之6.3VGGnet学习 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.

這是我改寫的代碼,可以運行,但是過擬合現(xiàn)象嚴重,不知道怎么修改比較好

# -*- coding: utf-8 -*- """ Created on Wed Dec 20 14:45:35 2017@author: Administrator """#coding:utf-8 # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================import tensorflow as tf import numpy as npdata_name = 'YaleB_32x32.mat' sele_num = 10 import matlab.engine eng = matlab.engine.start_matlab() t = eng.data_imread_MSE(data_name,sele_num) eng.quit() #t = np.array(t) Train_Ma = np.array(t[0]).astype(np.float32) Train_Lab = np.array(t[1]).astype(np.int8) Test_Ma = np.array(t[2]).astype(np.float32) Test_Lab = np.array(t[3]).astype(np.int8) Num_fea = Train_Ma.shape[1] Num_Class = Train_Lab.shape[1] image_row = 32 image_column = 32def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):n_in = input_op.get_shape()[-1].valuewith tf.name_scope(name) as scope:kernel = tf.get_variable(scope+"w",shape=[kh, kw, n_in, n_out],dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d())conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding='SAME')bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)biases = tf.Variable(bias_init_val, trainable=True, name='b')z = tf.nn.bias_add(conv, biases)activation = tf.nn.relu(z, name=scope)p += [kernel, biases]return activation # 全連接層函數(shù) def fc_op(input_op, name, n_out, p):n_in = input_op.get_shape()[-1].valuewith tf.name_scope(name) as scope:kernel = tf.get_variable(scope+"w",shape=[n_in, n_out],dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name='b')activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)p += [kernel, biases]return activationdef mpool_op(input_op, name, kh, kw, dh, dw):return tf.nn.max_pool(input_op,ksize=[1, kh, kw, 1],strides=[1, dh, dw, 1],padding='SAME',name=name)# assume input_op shape is 224x224x3 sess = tf.InteractiveSession() # ---------- 定義 輸入和輸出 --------------- # x = tf.placeholder(tf.float32, [None, Num_fea]) y_ = tf.placeholder(tf.float32, [None, Num_Class]) x_image = tf.reshape(x, [-1,image_row,image_column,1]) keep_prob = tf.placeholder(tf.float32)# block 1 -- outputs 112x112x64 p = [] conv1_1 = conv_op(x_image, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p) conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p) pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dw=2, dh=2)# block 2 -- outputs 56x56x128 conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p) conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p) pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2)# # block 3 -- outputs 28x28x256 conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2)# block 4 -- outputs 14x14x512 conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) pool4 = mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2)# block 5 -- outputs 7x7x512 conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p) pool5 = mpool_op(conv5_3, name="pool5", kh=2, kw=2, dw=2, dh=2)# flatten shp = pool5.get_shape() flattened_shape = shp[1].value * shp[2].value * shp[3].value resh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")# fully connected fc6 = fc_op(resh1, name="fc6", n_out=4096, p=p) fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p) fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")fc8 = fc_op(fc7_drop, name="fc8", n_out=Num_Class, p=p) predictions = tf.nn.softmax(fc8)cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(predictions), reduction_indices=[1])) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.global_variables_initializer().run()for i in range(1000):train_accuracy = accuracy.eval(feed_dict={x:Train_Ma, y_: Train_Lab, keep_prob: 1.0})print("step %d, training accuracy %g"%(i, train_accuracy))train_step.run(feed_dict={x: Train_Ma, y_: Train_Lab, keep_prob: 0.8})print("test accuracy %g"%accuracy.eval(feed_dict={x: Test_Ma, y_: Test_Lab, keep_prob: 1.0}))

?

另外一種更簡便的改寫

# -*- coding: utf-8 -*- """ Created on Wed Dec 20 15:40:44 2017@author: Administrator """# -*- coding: utf-8 -*- """ Created on Wed Dec 20 14:45:35 2017@author: Administrator """#coding:utf-8 # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================import tensorflow as tf import numpy as npdata_name = 'YaleB_32x32.mat' sele_num = 10 import matlab.engine eng = matlab.engine.start_matlab() t = eng.data_imread_MSE(data_name,sele_num) eng.quit() #t = np.array(t) Train_Ma = np.array(t[0]).astype(np.float32) Train_Lab = np.array(t[1]).astype(np.int8) Test_Ma = np.array(t[2]).astype(np.float32) Test_Lab = np.array(t[3]).astype(np.int8) Num_fea = Train_Ma.shape[1] Num_Class = Train_Lab.shape[1] image_row = 32 image_column = 32def conv_op(input_op, name, kh, kw, n_out, dh, dw, p):n_in = input_op.get_shape()[-1].valuewith tf.name_scope(name) as scope:kernel = tf.get_variable(scope+"w",shape=[kh, kw, n_in, n_out],dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer_conv2d())conv = tf.nn.conv2d(input_op, kernel, (1, dh, dw, 1), padding='SAME')bias_init_val = tf.constant(0.0, shape=[n_out], dtype=tf.float32)biases = tf.Variable(bias_init_val, trainable=True, name='b')z = tf.nn.bias_add(conv, biases)activation = tf.nn.relu(z, name=scope)p += [kernel, biases]return activation # 全連接層函數(shù) def fc_op(input_op, name, n_out, p):n_in = input_op.get_shape()[-1].valuewith tf.name_scope(name) as scope:kernel = tf.get_variable(scope+"w",shape=[n_in, n_out],dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())biases = tf.Variable(tf.constant(0.1, shape=[n_out], dtype=tf.float32), name='b')activation = tf.nn.relu_layer(input_op, kernel, biases, name=scope)p += [kernel, biases]return activationdef mpool_op(input_op, name, kh, kw, dh, dw):return tf.nn.max_pool(input_op,ksize=[1, kh, kw, 1],strides=[1, dh, dw, 1],padding='SAME',name=name)# assume input_op shape is 224x224x3# block 1 -- outputs 112x112x64 def inference_op(input_op, keep_prob):p = []# assume input_op shape is 224x224x3# block 1 -- outputs 112x112x64conv1_1 = conv_op(input_op, name="conv1_1", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)conv1_2 = conv_op(conv1_1, name="conv1_2", kh=3, kw=3, n_out=64, dh=1, dw=1, p=p)pool1 = mpool_op(conv1_2, name="pool1", kh=2, kw=2, dw=2, dh=2)# block 2 -- outputs 56x56x128conv2_1 = conv_op(pool1, name="conv2_1", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)conv2_2 = conv_op(conv2_1, name="conv2_2", kh=3, kw=3, n_out=128, dh=1, dw=1, p=p)pool2 = mpool_op(conv2_2, name="pool2", kh=2, kw=2, dh=2, dw=2)# # block 3 -- outputs 28x28x256conv3_1 = conv_op(pool2, name="conv3_1", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)conv3_2 = conv_op(conv3_1, name="conv3_2", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p)conv3_3 = conv_op(conv3_2, name="conv3_3", kh=3, kw=3, n_out=256, dh=1, dw=1, p=p) pool3 = mpool_op(conv3_3, name="pool3", kh=2, kw=2, dh=2, dw=2)# block 4 -- outputs 14x14x512conv4_1 = conv_op(pool3, name="conv4_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv4_2 = conv_op(conv4_1, name="conv4_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv4_3 = conv_op(conv4_2, name="conv4_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)pool4 = mpool_op(conv4_3, name="pool4", kh=2, kw=2, dh=2, dw=2)# block 5 -- outputs 7x7x512conv5_1 = conv_op(pool4, name="conv5_1", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv5_2 = conv_op(conv5_1, name="conv5_2", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)conv5_3 = conv_op(conv5_2, name="conv5_3", kh=3, kw=3, n_out=512, dh=1, dw=1, p=p)pool5 = mpool_op(conv5_3, name="pool5", kh=2, kw=2, dw=2, dh=2)# flattenshp = pool5.get_shape()flattened_shape = shp[1].value * shp[2].value * shp[3].valueresh1 = tf.reshape(pool5, [-1, flattened_shape], name="resh1")# fully connectedfc6 = fc_op(resh1, name="fc6", n_out=4096, p=p)fc6_drop = tf.nn.dropout(fc6, keep_prob, name="fc6_drop")fc7 = fc_op(fc6_drop, name="fc7", n_out=4096, p=p)fc7_drop = tf.nn.dropout(fc7, keep_prob, name="fc7_drop")fc8 = fc_op(fc7_drop, name="fc8", n_out=Num_Class, p=p)predictions = tf.nn.softmax(fc8)return predictions, fc8, p# ---------- 定義 輸入和輸出 --------------- # sess = tf.InteractiveSession() x = tf.placeholder(tf.float32, [None, Num_fea]) y_ = tf.placeholder(tf.float32, [None, Num_Class]) x_image = tf.reshape(x, [-1,image_row,image_column,1]) keep_prob = tf.placeholder(tf.float32) predictions, fc8, p = inference_op(x_image, keep_prob) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(predictions), reduction_indices=[1])) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(predictions,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.global_variables_initializer().run()for i in range(100):train_accuracy = accuracy.eval(feed_dict={x:Train_Ma, y_: Train_Lab, keep_prob: 1.0})print("step %d, training accuracy %g"%(i, train_accuracy))train_step.run(feed_dict={x: Train_Ma, y_: Train_Lab, keep_prob: 0.8})print("test accuracy %g"%accuracy.eval(feed_dict={x: Test_Ma, y_: Test_Lab, keep_prob: 1.0}))

  

  

轉(zhuǎn)載于:https://www.cnblogs.com/Jerry-PR/p/8074076.html

總結(jié)

以上是生活随笔為你收集整理的《Tensorflow实战》之6.3VGGnet学习的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。

主站蜘蛛池模板: 天堂va蜜桃 | 秋霞在线视频观看 | 深夜在线网站 | 日日骚av | 久久久久久中文 | 4444亚洲人成无码网在线观看 | 96久久| 欧美在线观看一区 | 久久一级免费视频 | 亚洲日本欧美在线 | 午夜福利理论片在线观看 | 蜜芽在线视频 | 免费成人高清 | 久久综合88 | 亚洲美免无码中文字幕在线 | 免费视频色 | 三上悠亚在线观看一区二区 | 黄色在线资源 | 亚洲手机在线 | 欧美午夜精品久久久久免费视 | 国产又粗又长又大 | 中文字幕最新 | 懂色一区二区三区免费观看 | 成人音影 | 成人av网址在线观看 | 久久性精品 | 欧美日韩国产电影 | 春色校园激情 | 欧美综合在线一区 | 免费av网站观看 | 日本一区高清 | 日本少妇作爱视频 | 夜夜导航 | 亚洲毛片儿 | 黄色女女| 天堂久久爱 | 玖玖精品在线视频 | 欧美国产高清 | 亚洲av无码国产精品久久久久 | 国产精品成人免费精品自在线观看 | 久久亚洲精华国产精华液 | 亚洲一区二区三区在线播放 | 欧美资源在线观看 | 日韩毛片在线看 | 国产精品13p | 久久综合婷婷 | 51 吃瓜网 | 国产一区二区三区四区视频 | 国产农村妇女精品久久久 | 午夜国产免费 | jizz美女 | 一级黄色大片 | 嫩草研究院在线观看 | 亚洲欧美日韩专区 | 91日韩在线视频 | 熟女少妇内射日韩亚洲 | 日本不卡高清视频 | 黄页网站免费观看 | 久久久国产一区二区 | 香蕉大人久久国产成人av | 欧美成视频 | 爱啪啪av | 成人激情片 | 美女在线一区 | 久久婷婷av | 亚洲综合精品视频 | 在线观看国产日韩 | 亚洲成人高清在线 | 91精品国产综合久久福利软件 | 色综合图片 | 四虎精品一区 | 成人免费在线视频网站 | 亚洲在线观看av | 中文字幕丝袜诱惑 | 成人亚洲免费 | 黄网站免费看 | 九色视频国产 | 少妇免费看 | 亚洲校园激情 | 亚洲视频第一页 | 女人高潮娇喘1分47秒 | 香蕉久草| 亚洲精品aaaa| 91精品久久久久久久久 | 琪琪久久 | 国产亚洲午夜 | 爱爱视频在线播放 | 国产精品无码一区二区三区 | 国产精欧美一区二区三区蓝颜男同 | 午夜tv| 色超碰 | 日韩激情四射 | 99激情网| 亚欧av在线| 91快射 | av资源网在线 | 欧美体内she精高潮 日韩一区免费 | 国产黄色影院 | 天堂bt在线 |