TensorFlow学习笔记(二十一) tensorflow机器学习模型
生活随笔
收集整理的這篇文章主要介紹了
TensorFlow学习笔记(二十一) tensorflow机器学习模型
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
直接給出一般的通用編程模型,并通過3個例子來看看使用情況。
1. 線性回歸,用來預測體重年齡與 血脂含量的關系 #Linear Regression import tensorflow as tf# Explicitly create a Graph object W = tf.Variable(tf.zeros([2, 1]), name="weights") b = tf.Variable(0., name="bias")# define the training loop operations def inference(X):# compute inference model over data X and return the result return tf.matmul(X,W) + b #矩陣相乘def loss(X, Y):# compute loss over training data X and expected values YY_predicted = inference(X)return tf.reduce_sum(tf.squared_difference(Y,Y_predicted))def inputs():# read/generate input training data X and expected outputs Yweight_age = [[84, 46], [73, 20], [65, 52], [70, 30], [76, 57], [69, 25], [63, 28], [72, 36],[79,51],[75,50],[82,34],[59,46],[67,23],[85,37],[55,40],[63,30]]blood_fat_content = [354, 190, 405, 263, 451, 302, 288, 385, 402, 365, 209, 290, 346, 254, 395,434,220,374,308,220,311,181,274,303,244]return tf.to_float(weight_age), tf.to_float(blood_fat_content) #with GradientDescentOptimizer def train(total_loss):learning_rate = 0.0000001return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss) def evaluate(sess, X, Y):# evaluate the resulting trained modelprint(sess.run(inference([[80., 25.]]))) # ~ 303print(sess.run(inference([[65., 25.]])))with tf.Session() as sess:tf.global_variables_initializer().run()X, Y = inputs()print(X)print(Y)total_loss = loss(X, Y)train_op = train(total_loss)coord = tf.train.Coordinator()threads = tf.train.start_queue_runners(sess=sess, coord=coord)#tf.train.start_queue_runners 這個函數將會啟動輸入管道的線程,填充樣本到隊列中,#以便出隊操作可以從隊列中拿到樣本。這種情況下最好配合使用一個tf.train.Coordinator,這樣可以在發生錯誤的情況下正確地關閉這些線程。#actual training looptraining_steps = 1000for step in range(training_steps):sess.run([train_op])#for debugging and learning purposes, see how the loss gets decremented thru training stepsif step % 10 == 0:print("loss: ", sess.run([total_loss]))evaluate(sess, X, Y)coord.request_stop()coord.join(threads)sess.close()
2. 使用邏輯回歸LR,本地讀取文件數據,然后預測泰坦尼克生存者 import tensorflow as tf# same params and variables initialization as log reg. W = tf.Variable(tf.zeros([5, 1]), name="weights") b = tf.Variable(0., name="bias")# former inference is now used for combining inputs def combine_inputs(X):return tf.matmul(X, W) + b# new inferred value is the sigmoid applied to the former def inference(X):return tf.sigmoid(combine_inputs(X)) #邏輯回歸 LR#calculating cross entropy 交叉熵 def loss(X, Y):return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=combine_inputs(X), logits=Y))def read_csv(batch_size, file_name, record_defaults):#filename_queue = tf.train.string_input_producer([os.path.dirname(__file__) + "/" + file_name])filename_queue = tf.train.string_input_producer(["E:\\testData\\taitannike\\" + file_name])reader = tf.TextLineReader(skip_header_lines=1)key, value = reader.read(filename_queue)# decode_csv will convert a Tensor from type string (the text line) in# a tuple of tensor columns with the specified defaults, which also# sets the data type for each columndecoded = tf.decode_csv(value, record_defaults=record_defaults)# batch actually reads the file and loads "batch_size" rows in a single tensorreturn tf.train.shuffle_batch(decoded,batch_size=batch_size,capacity=batch_size * 50,min_after_dequeue=batch_size)def inputs():passenger_id, survived, pclass, name, sex, age, sibsp, parch, ticket, fare, cabin, embarked =read_csv(100, "train.csv", [[0.0], [0.0], [0], [""], [""], [0.0], [0.0], [0.0], [""], [0.0],[""], [""]])# convert categorical datais_first_class = tf.to_float(tf.equal(pclass, [1]))is_second_class = tf.to_float(tf.equal(pclass, [2]))is_third_class = tf.to_float(tf.equal(pclass, [3]))gender = tf.to_float(tf.equal(sex, ["female"]))# Finally we pack all the features in a single matrix;# We then transpose to have a matrix with one example per row and one feature per column.features = tf.transpose(tf.stack([is_first_class, is_second_class, is_third_class, gender, age]))survived = tf.reshape(survived, [100, 1])return features, survived def train(total_loss):learning_rate = 0.01return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss) def evaluate(sess, X, Y):predicted = tf.cast(inference(X) > 0.5, tf.float32)print(sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32)))) with tf.Session() as sess:tf.global_variables_initializer().run()#tf.initialize_all_variables().run()X, Y = inputs()total_loss = loss(X, Y)train_op = train(total_loss)coord = tf.train.Coordinator()threads = tf.train.start_queue_runners(sess=sess, coord=coord)# actual training looptraining_steps = 1000for step in range(training_steps):sess.run([train_op])# for debugging and learning purposes, see how the loss gets decremented thru training stepsif step % 10 == 0:print("loss: ", sess.run([total_loss]))evaluate(sess, X, Y)coord.request_stop()coord.join(threads)sess.close() 3. 使用softmax的分類,對水仙花數據分類
#softmax C-classify ,Iris-data import tensorflow as tf# this time weights form a matrix, not a column vector, one "weight vector" per class. W = tf.Variable(tf.zeros([4, 3]), name="weights") # so do the biases, one per class. b = tf.Variable(tf.zeros([3], name="bias"))# former inference is now used for combining inputs def combine_inputs(X):return tf.matmul(X, W) + bdef inference(X):return tf.nn.softmax(combine_inputs(X))def loss(X, Y):return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=combine_inputs(X))) def read_csv(batch_size, file_name, record_defaults):filename_queue = tf.train.string_input_producer(["E:\\testData\\taitannike\\" + file_name])#filename_queue = tf.train.string_input_producer([os.path.dirname(__file__) + "/" + file_name])reader = tf.TextLineReader(skip_header_lines=1)key, value = reader.read(filename_queue)# decode_csv will convert a Tensor from type string (the text line) in# a tuple of tensor columns with the specified defaults, which also# sets the data type for each columndecoded = tf.decode_csv(value, record_defaults=record_defaults)# batch actually reads the file and loads "batch_size" rows in a single tensorreturn tf.train.shuffle_batch(decoded,batch_size=batch_size,capacity=batch_size * 50,min_after_dequeue=batch_size)def inputs():sepal_length, sepal_width, petal_length, petal_width, label =\read_csv(100, "iris.data", [[0.0], [0.0], [0.0], [0.0], [""]])# convert class names to a 0 based class index.label_number = tf.to_int32(tf.argmax(tf.to_int32(tf.stack([tf.equal(label, ["Iris-setosa"]),tf.equal(label, ["Iris-versicolor"]),tf.equal(label, ["Iris-virginica"])])), 0))# Pack all the features that we care about in a single matrix;# We then transpose to have a matrix with one example per row and one feature per column.features = tf.transpose(tf.stack([sepal_length, sepal_width, petal_length, petal_width]))return features, label_numberdef train(total_loss):learning_rate = 0.01return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)def evaluate(sess, X, Y):predicted = tf.cast(tf.arg_max(inference(X), 1), tf.int32)print(sess.run(tf.reduce_mean(tf.cast(tf.equal(predicted, Y), tf.float32))))# Launch the graph in a session, setup boilerplate with tf.Session() as sess:tf.global_variables_initializer().run()#tf.initialize_all_variables().run()X, Y = inputs()total_loss = loss(X, Y)train_op = train(total_loss)coord = tf.train.Coordinator()threads = tf.train.start_queue_runners(sess=sess, coord=coord)# actual training looptraining_steps = 10000for step in range(training_steps):sess.run([train_op])# for debugging and learning purposes, see how the loss gets decremented thru training stepsif step % 10 == 0:print("loss: ", sess.run([total_loss]))evaluate(sess, X, Y)coord.request_stop()coord.join(threads)sess.close()
總結
以上是生活随笔為你收集整理的TensorFlow学习笔记(二十一) tensorflow机器学习模型的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: TensorFlow学习笔记(二十) t
- 下一篇: TensorFlow学习笔记(二十二)