生活随笔
收集整理的這篇文章主要介紹了
天气情况图像分类练习赛 第三阶段(赛中感)
小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.
第三階段也是實戰(zhàn)階段,不同于前兩個階段的填空而是實打?qū)嵉念A(yù)測分析
題目會給出8000張照片數(shù)據(jù),其中6000作為訓(xùn)練集而另外2000張作位測試集,通過對6000張的訓(xùn)練來預(yù)測2000的結(jié)果,并將結(jié)果輸出到csv文件中,提交檢驗成功
我們之前學(xué)了一陣子的TensorFlow,對神經(jīng)網(wǎng)絡(luò)的搭建有的大體的認識,而且在網(wǎng)上也輕松找到對應(yīng)的模板,我們打算根據(jù)本題修改模板使其為之所用
在與同學(xué)的一起努力之下,初步代碼已經(jīng)完成,我們又進行修改和完善,最后成型(見如下代碼)
import os
from PIL import Image
import numpy as np
import matplotlib
.pyplot as plt
from tensorflow import keras
import cv2
import keras
from keras import datasets
, layers
, modelsos
.environ
['KERAS_BACKEND'] = 'tensorflow'js_path
= '/home/kesci/input/weather_image1552/train.json'
test_path
= '/home/kesci/input/weather_image1552/測試集/'
train_path
= '/home/kesci/input/weather_image1552/訓(xùn)練集/'
import json
testdata
=400#測試集數(shù)量
path
= '/home/kesci/input/weather_image1552/train.json'
with
open(path
, 'r') as f
:label
= json
.load(f
)def
read_image(paths
):os
.listdir(paths
)filelist
= []for root
, dirs
, files in os
.walk(paths
):for file in files
:if os
.path
.splitext(file
)[1] == ".jpg":filelist
.append(os
.path
.join(root
, file
))return filelistdef
im_resize(paths
):for filename in paths
:with Image
.open(filename
) as im
:newim
= im
.resize((128, 128))newim
.save(filename
)def
im_array(paths
):M
= []for filename in paths
:im
= Image
.open(filename
)im_L
= im
.convert("L")im_L
= im_L
.resize((128, 128))Core
= im_L
.getdata()arr1
= np
.array(Core
, dtype
='float32') / 255.0list_img
= arr1
.tolist()M
.extend(list_img
)return M
# mp={'cloudy':0,'sunny':1}
dict_label
={0:'1',1:'0'}
mp
= {'sunny': 0, 'cloudy': 1}
# label=[0]*len(filelist_1)+[1]*len(filelist_2)
js_pic
= []
js_lab
= []
cnt
= 0
for key in label
:if (cnt
< testdata
):js_pic
.append(key
)js_lab
.append(mp
[label
[key
]])cnt
+= 1
train_lables
= np
.array(js_lab
)tot
= []
cnt
=0
for key in label
:if (cnt
< 2000):tot
.append(key
)cnt
+= 1features
= []
filelist
= []
for i in
range(len(js_pic
)):img
= cv2
.imread(train_path
+ js_pic
[i
], 0)#print(train_path + js_pic[i])filelist
.append(train_path
+ js_pic
[i
])
trainfilelist
= filelist
M
= []
M
= im_array(trainfilelist
)
train_images
=np
.array(M
).reshape(len(trainfilelist
),128,128)train_images
= train_images
[ ..., np
.newaxis
]
#print(train_images)
# X = np.array(list(zip(x1,x2))).reshape(len(x1), 2)
# train_images=np.array(M)
# train_images = train_images[ ..., np.newaxis ]# 神經(jīng)網(wǎng)絡(luò)
model
= models
.Sequential()
model
.add(layers
.Conv2D(32, (3, 3), activation
='relu', input_shape
=(128, 128, 1)))
model
.add(layers
.MaxPooling2D((2, 2)))
model
.add(layers
.Conv2D(64, (3, 3), activation
='relu'))
model
.add(layers
.MaxPooling2D((2, 2)))
model
.add(layers
.Conv2D(64, (3, 3), activation
='relu'))
model
.add(layers
.Flatten())
model
.add(layers
.Dense(64, activation
='relu'))
model
.add(layers
.Dense(2, activation
='softmax'))
model
.summary()
model
.compile(optimizer
='adam', loss
='sparse_categorical_crossentropy', metrics
=['accuracy'])model
.fit(train_images
, train_lables
, epochs
=10)
#
,batch_size
=400
# print(model.evaluate(train_images,train_lables))a
=[]
# test = r'C:\Users\carvi\Desktop\人工智能\天氣識別\test'
filelist
= read_image(test_path
)
im_resize(filelist
)
for i in
range(2000):im
= Image
.open(test_path
+ tot
[i
])#print(test_path + tot[i])im_L
= im
.convert("L")Core
= im_L
.getdata()arr1
= np
.array(Core
, dtype
='float32') / 255.0list_img
= arr1
.tolist()images
= np
.array(list_img
).reshape(-1, 128, 128, 1)predictions_single
= model
.predict(images
)#print("預(yù)測結(jié)果為:", dict_label[np.argmax(predictions_single)])#print("預(yù)測結(jié)果為:", np.argmax(predictions_single))a
.append(np
.argmax(predictions_single
))#print(predictions_single)np
.savetxt('/home/kesci/input/new.csv',a
,delimiter
= ',')
print(a
)
"""
for filename in filelist
:im
= Image
.open(filename
)#print(filename)im_L
= im
.convert("L")Core
= im_L
.getdata()arr1
= np
.array(Core
, dtype
='float32') / 255.0list_img
= arr1
.tolist()images
= np
.array(list_img
).reshape(-1, 128, 128, 1)predictions_single
= model
.predict(images
)print("預(yù)測結(jié)果為:", np
.argmax(predictions_single
))print(predictions_single
)
"""
但是對我們來說還有個巨大的麻煩,就是如何輸出數(shù)據(jù)結(jié)果到csv文件,這可屬實困擾到我,我查閱大量資料,但是最終效果總是不能讓我滿意,最后我想到一個方法:將答案結(jié)果輸入到一個數(shù)組里,然后放在txt文件中,然后我再寫另一個程序讀取txt文件,然后輸出到csv文件中,通過中折的方法達到我的目的。
輸出程序如圖
import os
import numpy as np
import pandas as pd
#
!/usr
/bin
/python
# coding = UFT-8
data
= pd
.read_table('C:\\Users\DELL\Desktop\活動\人工智能\圖像\ceshi.txt',sep
='\n')
#header=None:沒有每列的column name,可以自己設(shè)定
#encoding='gb2312':其他編碼中文顯示錯誤
#sep=',': ','隔開
data1
= pd
.DataFrame(data
)
data1
.to_csv('C:\\Users\DELL\Desktop\活動\人工智能\圖像\data1.csv',sep
='\n',index
=False
)
#data1 = pd.DataFrame(arr1, header = False, index = False) # header:原第一行的索引,index:原第一列的索引
#data1.to_csv('C:\\Users\DELL\Desktop\活動\人工智能\圖像\data1.csv\data1.csv',sep='\n')
但是提交最終結(jié)果后發(fā)現(xiàn)得分只有0.5,實屬懵逼了。我辛辛苦苦做了一陣子爭取率只有一半,和剛開始蒙的一樣,(我一開始把結(jié)果全部預(yù)測為1,就是純瞎蒙的答案提交上去,看看能得到多少分,沒想到是0.5)
還有個問題就是,按理說訓(xùn)練集越大正確率越高,但是實際是我6000個數(shù)據(jù)的訓(xùn)練正確率只有百分之50多,但是400個訓(xùn)練集卻有百分之八十多,有時甚至到百分之九十幾,搞不清為什么
最終提交的csv文件如圖
繼續(xù)搞吧,唉,路還長著呢~
總結(jié)
以上是生活随笔為你收集整理的天气情况图像分类练习赛 第三阶段(赛中感)的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。
如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。