日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 人文社科 > 生活经验 >内容正文

生活经验

python构建cnn图片匹配_tensorflow搭建cnn人脸识别训练+识别代码(python)

發布時間:2023/11/27 生活经验 31 豆豆
生活随笔 收集整理的這篇文章主要介紹了 python构建cnn图片匹配_tensorflow搭建cnn人脸识别训练+识别代码(python) 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

#-*- coding: utf-8 -*-

from skimage importio,transformimportglobimportosimporttensorflow as tfimportnumpy as npimporttime

path='D:/code/python/Anaconda3/envs/faces'

#將所有的圖片resize成100*100

w=128h=128c=3

#讀取圖片

defread_img(path):

cate=[path+'/'+x for x in os.listdir(path) if os.path.isdir(path+'/'+x)]

imgs=[]

labels=[]for idx,folder inenumerate(cate):for im in glob.glob(folder+'/*.png'):print('reading the images:%s'%(im))

img=io.imread(im)

img=transform.resize(img,(w,h,c))

imgs.append(img)

labels.append(idx)returnnp.asarray(imgs,np.float32),np.asarray(labels,np.int32)

data,label=read_img(path)#打亂順序

num_example=data.shape[0]

arr=np.arange(num_example)

np.random.shuffle(arr)

data=data[arr]

label=label[arr]#將所有數據分為訓練集和驗證集

ratio=0.8s=np.int(num_example*ratio)

x_train=data[:s]

y_train=label[:s]

x_val=data[s:]

y_val=label[s:]#-----------------構建網絡----------------------#占位符

x=tf.placeholder(tf.float32,shape=[None,w,h,c],name='x')

y_=tf.placeholder(tf.int32,shape=[None,],name='y_')defCNNlayer():#第一個卷積層(128——>64)

conv1=tf.layers.conv2d(

inputs=x,

filters=32,

kernel_size=[5, 5],

padding="same",

activation=tf.nn.relu,

kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))

pool1=tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)#第二個卷積層(64->32)

conv2=tf.layers.conv2d(

inputs=pool1,

filters=64,

kernel_size=[5, 5],

padding="same",

activation=tf.nn.relu,

kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))

pool2=tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)#第三個卷積層(32->16)

conv3=tf.layers.conv2d(

inputs=pool2,

filters=128,

kernel_size=[3, 3],

padding="same",

activation=tf.nn.relu,

kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))

pool3=tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)#第四個卷積層(16->8)

conv4=tf.layers.conv2d(

inputs=pool3,

filters=128,

kernel_size=[3, 3],

padding="same",

activation=tf.nn.relu,

kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))

pool4=tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)

re1= tf.reshape(pool4, [-1, 8 * 8 * 128])#全連接層

dense1 = tf.layers.dense(inputs=re1,

units=1024,

activation=tf.nn.relu,

kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),

kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

dense2= tf.layers.dense(inputs=dense1,

units=512,

activation=tf.nn.relu,

kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),

kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))

logits= tf.layers.dense(inputs=dense2,

units=60,

activation=None,

kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),

kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))returnlogits#---------------------------網絡結束---------------------------

logits=CNNlayer()

loss=tf.losses.sparse_softmax_cross_entropy(labels=y_,logits=logits)

train_op=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)

correct_prediction= tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_)

acc=tf.reduce_mean(tf.cast(correct_prediction, tf.float32))#定義一個函數,按批次取數據

def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):assert len(inputs) ==len(targets)ifshuffle:

indices=np.arange(len(inputs))

np.random.shuffle(indices)for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):ifshuffle:

excerpt= indices[start_idx:start_idx +batch_size]else:

excerpt= slice(start_idx, start_idx +batch_size)yieldinputs[excerpt], targets[excerpt]#訓練和測試數據,可將n_epoch設置更大一些

saver=tf.train.Saver(max_to_keep=3)

max_acc=0

f=open('ckpt1/acc.txt','w')

n_epoch=10batch_size=64sess=tf.InteractiveSession()

sess.run(tf.global_variables_initializer())for epoch inrange(n_epoch):

start_time=time.time()#training

train_loss, train_acc, n_batch =0, 0, 0for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):

_,err,ac=sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a})

train_loss+= err; train_acc += ac; n_batch += 1

print("train loss: %f" % (train_loss/n_batch))print("train acc: %f" % (train_acc/n_batch))#validation

val_loss, val_acc, n_batch =0, 0, 0for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):

err, ac= sess.run([loss,acc], feed_dict={x: x_val_a, y_: y_val_a})

val_loss+= err; val_acc += ac; n_batch += 1

print("validation loss: %f" % (val_loss/n_batch))print("validation acc: %f" % (val_acc/n_batch))

f.write(str(epoch+1)+', val_acc:'+str(val_acc)+'\n')if val_acc>max_acc:

max_acc=val_acc

saver.save(sess,'ckpt1/faces.ckpt',global_step=epoch+1)

f.close()

sess.close()

總結

以上是生活随笔為你收集整理的python构建cnn图片匹配_tensorflow搭建cnn人脸识别训练+识别代码(python)的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。