這篇文章主要講解了“CNN怎么實現(xiàn)數(shù)字識別并改變參數(shù)”,文中的講解內容簡單清晰,易于學習與理解,下面請大家跟著小編的思路慢慢深入,一起來研究和學習“CNN怎么實現(xiàn)數(shù)字識別并改變參數(shù)”吧!
成都創(chuàng)新互聯(lián)公司專注為客戶提供全方位的互聯(lián)網(wǎng)綜合服務,包含不限于成都網(wǎng)站建設、網(wǎng)站建設、澤州網(wǎng)絡推廣、小程序定制開發(fā)、澤州網(wǎng)絡營銷、澤州企業(yè)策劃、澤州品牌公關、搜索引擎seo、人物專訪、企業(yè)宣傳片、企業(yè)代運營等,從售前售中售后,我們都將竭誠為您服務,您的肯定,是我們最大的嘉獎;成都創(chuàng)新互聯(lián)公司為所有大學生創(chuàng)業(yè)者提供澤州建站搭建服務,24小時服務熱線:028-86922220,官方網(wǎng)址:www.cdcxhl.com
1.網(wǎng)絡層級結構概述
Input layer: 輸入數(shù)據(jù)為原始訓練圖像
Conv1:6 個 5 * 5 的卷積核,步長 Stride 為 1
Pooling1:卷積核 size 為 2 * 2,步長 Stride 為 2
Conv2:12 個 5 * 5 的卷積核,步長 Stride 為 1
Pooling2:卷積核 size 為 2 * 2,步長 Stride 為 2
Output layer:輸出為 10 維向量
2.實驗基本流程
(1)獲取訓練數(shù)據(jù)和測試數(shù)據(jù)
直接使用keras里面的手寫數(shù)據(jù)集
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
(2)定義網(wǎng)絡層級結構
代碼:
def get_model():
model = Sequential()
model.add(Conv2D(filters=6, kernel_size=(5, 5),strides=1,activation='relu',input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Conv2D(filters=12, kernel_size=(5, 5),strides=1,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Flatten())
#model.add(Conv2D(filters=120, kernel_size=(5, 5),activation='relu'))
model.add(Dense(120, activation='relu'))
model.add(Dense(84, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# 編譯模型,采用多分類的損失函數(shù),優(yōu)化器是Adadelta
model.compile(loss='categorical_crossentropy',
optimizer='Adadelta',
metrics=['accuracy'])
return model
(3)交叉驗證
直接附上代碼
def k_cross(data,target,bsize,epoch,sp):
print("------進行交叉驗證------")
ans=0 #交叉驗證正確率的和
kf = KFold(n_splits=sp, shuffle = True)
for train, test in kf.split(data):
model.fit(data[train], target[train],
batch_size=bsize,
epochs=epoch,
verbose=0,
validation_data=(data[test], target[test]))
score = model.evaluate(data[test], target[test], verbose=0)
ans+=score[1]
return ans/sp
3完整代碼
我這里直接就3折了,太多了運行時間太長。
最后完整代碼:
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 15:42:27 2019
@author: pff
"""
from __future__ import print_function
import numpy as np
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
def getdata():
#提取出訓練集和測試集
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
# 采用one-hot編碼
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
#將測試集和訓練集合并,便于后面交叉驗證
data = np.row_stack((x_train,x_test))
target = np.row_stack((y_train,y_test))
return data, target
# 構建模型
def get_model():
model = Sequential()鄭州做無痛人流手術費用 http://www.zzzykdfk.com/
model.add(Conv2D(filters=6, kernel_size=(5, 5),strides=1,activation='relu',input_shape=(28, 28, 1)))
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Conv2D(filters=12, kernel_size=(5, 5),strides=1,activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2),strides=2))
model.add(Flatten())
#model.add(Conv2D(filters=120, kernel_size=(5, 5),activation='relu'))
model.add(Dense(120, activation='relu'))
model.add(Dense(84, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# 編譯模型,采用多分類的損失函數(shù),用 Adadelta 算法做優(yōu)化方法
model.compile(loss='categorical_crossentropy',
optimizer='Adadelta',
metrics=['accuracy'])
return model
def kcross(data,target,bsize,epoch,sp):
print("------進行交叉驗證------")
ans=0
kf = KFold(n_splits=sp, shuffle = True)
for train, test in kf.split(data):
#print("第{}次開始".format(i+1))
model.fit(data[train], target[train],
batch_size=bsize,
epochs=epoch,
verbose=0,
validation_data=(data[test], target[test]))
score = model.evaluate(data[test], target[test], verbose=0)
ans+=score[1]
return ans/sp
#畫結果圖
def draw(batch_size,y,epoch):
plt.figure()
plt.rcParams['font.sans-serif']='SimHei'
plt.ylabel('正確率')
plt.xlabel('batch_size')
plt.title('不同參數(shù)下卷積神經(jīng)網(wǎng)絡數(shù)字識別圖')
for i in range(len(y)):
plt.scatter(batch_size, y[i], s=30, c='r', marker='x', linewidths=1)
plt.plot(batch_size,y[i],label="epoch:"+str(epoch[i]))
plt.legend()
plt.show()
if __name__=="__main__":
data,target=getdata()
model=get_model()
'''
設置epoch和baitch_size參數(shù)
y:存儲每一次的結果
'''
epoch=[1,3,5,7]
size=[50,100,150,200,250]
y=np.zeros([4,5])
for i in range(len(epoch)):
for j in range(len(size)):
print("now:",i,j)
y[i,j]=kcross(data,target,size[j],epoch[i],3)
draw(size,y,epoch)
最后得出運行結果
感謝各位的閱讀,以上就是“CNN怎么實現(xiàn)數(shù)字識別并改變參數(shù)”的內容了,經(jīng)過本文的學習后,相信大家對CNN怎么實現(xiàn)數(shù)字識別并改變參數(shù)這一問題有了更深刻的體會,具體使用情況還需要大家實踐驗證。這里是創(chuàng)新互聯(lián),小編將為大家推送更多相關知識點的文章,歡迎關注!