日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當(dāng)前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

Deep Learning菜鸡篇,我的第一个深度神经网络

發(fā)布時間:2024/6/30 编程问答 27 豆豆
生活随笔 收集整理的這篇文章主要介紹了 Deep Learning菜鸡篇,我的第一个深度神经网络 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.

  看了一大堆一大堆的框架,作為一個low B , 我還是喜歡從底層實(shí)現(xiàn)開始,看了吳恩達(dá)Coursera上的視頻,同時也在網(wǎng)站上做了一些編程練習(xí),不得不說課程的質(zhì)量和練習(xí)題的質(zhì)量都是

杠杠的,很到位,這篇博客算是對第一個課程的總結(jié)。

  說是深度的神經(jīng)網(wǎng)絡(luò),也就是層數(shù)深一點(diǎn)的全連接網(wǎng)絡(luò),只是練練手順便復(fù)習(xí)一下課程,我用的樣本是自己做的,(30000,400)的訓(xùn)練樣本,附件里會上傳,正樣本是car,200個正樣本,

200個負(fù)樣本,測試集大小(30000,100),樣本的數(shù)量神馬的選取的都不合理,勿噴,下面上代碼

++++++++++++++++++++++++++這一部分是需要的函數(shù) relu 和 sigmoid
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np

def sigmoid(Z):
"""
Implements the sigmoid activation in numpy

Arguments:
Z -- numpy array of any shape

Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""

A = 1 / (1 + np.exp(-Z))
cache = Z

return A, cache

def relu(Z):
"""
Implement the RELU function.

Arguments:
Z -- Output of the linear layer, of any shape

Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""

A = np.maximum(0, Z)

assert (A.shape == Z.shape)

cache = Z
return A, cache

# def sigmoid_backprob(dA, cache):
# """
# :param dA:
# :param cache:
# :return:
# """
# Z = cache
# tmp , _ = sigmoid(Z)
# dZ = dA * tmp * (1 - tmp)
# return dZ

def sigmoid_backprob(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.

Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently

Returns:
dZ -- Gradient of the cost with respect to Z
"""

Z = cache

s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)

assert (dZ.shape == Z.shape)

return dZ

def relu_backprob(dA, cache):
Z = cache
Daz = Z > 0
dZ = dA * Daz
return dZ


# def relu_backprob(dA, cache):
# """
# Implement the backward propagation for a single RELU unit.
#
# Arguments:
# dA -- post-activation gradient, of any shape
# cache -- 'Z' where we store for computing backward propagation efficiently
#
# Returns:
# dZ -- Gradient of the cost with respect to Z
# """
#
# Z = cache.reshape(dA.shape)
# dZ = np.array(dA, copy=True) # just converting dz to a correct object.
#
# # When z <= 0, you should set dz to 0 as well.
# dZ[Z <= 0] = 0
#
# assert (dZ.shape == Z.shape)
#
# return dZ

++++++++++++++++++++++++++這一部分是樣本的讀取++++++++++++++++++++++++++++++++++++++

#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np

def load_data_set():
train_set = np.load('train_set.npy')
train_label = np.load('train_label.npy')
test_set = np.load('test_set.npy')
test_label = np.load('test_label.npy')

return train_set, train_label, test_set, test_label


+++++++++++++++++++++++網(wǎng)絡(luò)+++++++++++++++++++++++++++++ #!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
from activation_function import sigmoid, relu, sigmoid_backprob, relu_backprob
from read_data import load_data_set
train_set,train_label,test_set, test_label = load_data_set()

def init_parameters_deep(layer_items):
paramters= {}
L= len(layer_items)
for l in range(1,L):
W = np.random.randn(layer_items[l], layer_items[l-1])*0.01
b = np.zeros((layer_items[l], 1))
paramters['W'+str(l)]= W
paramters['b'+str(l)]= b

return paramters

def linear_forward(A, W, b):
Z = np.dot(W, A) + b
cache = (A, W, b)
return Z, cache

def linear_activation_forward(A_prev, W, b, activation):

Z, linear_cache = linear_forward(A_prev, W, b)
if activation == 'sigmoid':
A, activation_cache = sigmoid(Z)

elif activation == 'relu':
A, activation_cache = relu(Z)

cache = (linear_cache, activation_cache)
return A, cache

def L_forward_model(X,parameters):
caches = []
L = len(parameters) // 2
A = X
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)], parameters['b'+str(l)], activation= 'relu')
caches.append(cache)
AL, cache = linear_activation_forward(A, parameters['W'+str(L)], parameters['b'+str(L)], activation= 'sigmoid')
caches.append(cache)

return AL, caches

def compute_cost(AL, Y,):
m = AL.shape[1]
cost = -1/m *(Y* np.log(AL) + (1 - Y)* np.log(1- AL)).sum()
cost = np.squeeze(cost)
assert (cost.shape == ())
return cost

def linear_back(dZ, cache):
A_prev, W, b = cache
m = A_prev.shape[1]
dW = 1/m * np.dot(dZ, A_prev.T)
db = 1/m * np.sum(dZ, axis=1, keepdims=True)
dA_prev = np.dot(W.T, dZ)

return dA_prev, dW, db

def linear_activation_back(dA, cache, activation):
linear_cache, activation_cache = cache
if activation == 'sigmoid':
dZ = sigmoid_backprob(dA, activation_cache)
dA_prev, dW, db = linear_back(dZ, linear_cache)

elif activation == 'relu':
dZ = relu_backprob(dA, activation_cache)
dA_prev, dW, db = linear_back(dZ, linear_cache)

return dA_prev, dW, db

def L_backprob_model(AL, Y, caches):
grads = {}
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
L = len(caches)
cache = caches[L-1]
grads['dA'+str(L-1)] , grads['dW'+str(L)], grads['db'+str(L)] = linear_activation_back(dAL, cache, activation= 'sigmoid')

for l in reversed(range(L- 1)):
cache = caches[l]
grads['dA' + str(l)], grads['dW' + str(l+1)], grads['db' + str(l+1)] = linear_activation_back(dAL, cache,activation='relu')

return grads

def update_parameters(parameters, grads, learning_rate = 0.01):
L = len(parameters) // 2
for l in range(1,L+1):
parameters['W' + str(l)] = parameters['W' + str(l)] - learning_rate * grads['dW' + str(l)]
parameters['b' + str(l)] = parameters['b' + str(l)] - learning_rate * grads['db' + str(l)]
return parameters

def L_nn_deep(layer_items, X, Y, num_iter, learning_rate,print_cost = False):
costs = []
parameters = init_parameters_deep(layer_items)

for i in range(num_iter):
AL, caches = L_forward_model(X, parameters)
cost = compute_cost(AL, Y)
grads = L_backprob_model(AL, Y, caches)
parameters = update_parameters(parameters ,grads, learning_rate)
if i%100 == 0:
costs.append(cost)
if print_cost:
print('第%d次迭代,cost:%f'%(i, cost))

return parameters

layers = [30000, 10000, 5000, 1000, 100,1]

p = L_nn_deep(layers, train_set, train_label, 2000, 0.01, True)

我這個參數(shù)神馬的,還有數(shù)據(jù)集都坑的要死,反正我的內(nèi)存被直接刷爆了,,懶得改了



轉(zhuǎn)載于:https://www.cnblogs.com/zxxian/p/7857467.html

總結(jié)

以上是生活随笔為你收集整理的Deep Learning菜鸡篇,我的第一个深度神经网络的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。