200字范文,内容丰富有趣,生活中的好帮手!
200字范文 > 卷积神经网络(CNN)之MNIST手写数字数据集的实现

卷积神经网络(CNN)之MNIST手写数字数据集的实现

时间:2018-11-23 19:44:11

相关推荐

卷积神经网络(CNN)之MNIST手写数字数据集的实现

MNIST数据集是一个非常经典的手写数字识别的数据集,本人很多文章都是拿这个数据集来做示例,MNIST的具体介绍与用法可以参阅:

MNIST数据集手写数字识别(一)/weixin_41896770/article/details/119576575

MNIST数据集手写数字识别(二)/weixin_41896770/article/details/119710429

本章在前面介绍卷积层和池化层的基础上,构造一个简单的卷积神经网络来看下这个学习效果怎么样。老规矩,先来一张CNN的网络构造图,直观清晰。我将把所有代码都贴出来分享给大家,算是对整年的一个完美收尾。

在图像处理领域,基本都会使用到CNN,所以说掌握卷积神经网络就显得特别重要了。代码就是解释,有什么疑问的地方,欢迎留言交流。

simple_convnet.py

import numpy as npfrom collections import OrderedDictfrom common.layers import *from common.gradient import numerical_gradientimport pickleclass SimpleConvNet:'''input_dim:输入数据形状(MNIST手写数字)conv_param:卷积层字典的超参数(滤波器数量、滤波器大小、填充、步幅)weight_init_std:权重的标准差指定'relu'或'he'的情况下设定“He的初始值”指定'sigmoid'或'xavier'的情况下设定“Xavier的初始值”'''def __init__(self,input_dim=(1,28,28),conv_param={'filter_num':30,'filter_size':5,'pad':0,'stride':1},hidden_size=100,output_size=10,weight_init_std=0.01):filter_num=conv_param['filter_num']filter_size=conv_param['filter_size']filter_pad=conv_param['pad']filter_stride=conv_param['stride']input_size=input_dim[1]conv_output_size=(input_size-filter_size+2*filter_pad) / filter_stride+1pool_output_size=int(filter_num * (conv_output_size/2) * (conv_output_size/2))#权重和偏置的初始化self.params={}self.params['W1']=weight_init_std * np.random.randn(filter_num,input_dim[0],filter_size,filter_size)self.params['b1']=np.zeros(filter_num)self.params['W2']=weight_init_std * np.random.randn(pool_output_size,hidden_size)self.params['b2']=np.zeros(hidden_size)self.params['W3']=weight_init_std * np.random.randn(hidden_size,output_size)self.params['b3']=np.zeros(output_size)#生成CNN的各层self.layers=OrderedDict()self.layers['Conv1']=Convolution(self.params['W1'],self.params['b1'],conv_param['stride'],conv_param['pad'])self.layers['Relu1']=Relu()self.layers['Pool1']=Pooling(pool_h=2,pool_w=2,stride=2)self.layers['Affine1']=Affine(self.params['W2'],self.params['b2'])self.layers['Relu2']=Relu()self.layers['Affine2']=Affine(self.params['W3'],self.params['b3'])self.last_layer=SoftmaxWithLoss()def predict(self,x):for layer in self.layers.values():x=layer.forward(x)return xdef loss(self,x,t):y=self.predict(x)return self.last_layer.forward(y,t)def accuracy(self,x,t,batch_size=100):if t.ndim != 1 : t=np.argmax(t,axis=1)acc=0.0for i in range(int(x.shape[0] / batch_size)):tx=x[i*batch_size:(i+1)*batch_size]tt=t[i*batch_size:(i+1)*batch_size]y=self.predict(tx)y=np.argmax(y,axis=1)acc += np.sum(y == tt) return acc / x.shape[0]def numerical_gradient(self,x,t):'''数值微分求梯度'''loss_w=lambda w: self.loss(x,t)grads={}for idx in (1,2,3):grads['W'+str(idx)]=numerical_gradient(loss_w,self.params['W'+str(idx)])grads['b'+str(idx)]=numerical_gradient(loss_w,self.params['b'+str(idx)])return gradsdef gradient(self,x,t):'''误差反向传播法求梯度'''# forwardself.loss(x,t)# backwarddout=1dout=self.last_layer.backward(dout)layers=list(self.layers.values())layers.reverse()for layer in layers:dout=layer.backward(dout)grads={}grads['W1'],grads['b1']=self.layers['Conv1'].dW,self.layers['Conv1'].dbgrads['W2'],grads['b2']=self.layers['Affine1'].dW,self.layers['Affine1'].dbgrads['W3'],grads['b3']=self.layers['Affine2'].dW,self.layers['Affine2'].dbreturn gradsdef save_params(self,file_name="params.pkl"):params={}for key,val in self.params.items():params[key]=valwith open(file_name,'wb') as f:pickle.dump(params,f)def load_params(self,file_name="params.pkl"):with open(file_name,'rb') as f:params=pickle.load(f)for key,val in params.items():self.params[key]=valfor i,key in enumerate(['Conv1','Affine1','Affine2']):self.layers[key].W=self.params['W'+str(i+1)]self.layers[key].b=self.params['b'+str(i+1)]

layers.py

import numpy as npfrom common.functions import *from common.util import im2col,col2imclass Relu:def __init__(self):self.mask=Nonedef forward(self,x):self.mask=(x<=0)out=x.copy()out[self.mask]=0return outdef backward(self,dout):dout[self.mask]=0dx=doutreturn dxclass Sigmoid:def __init__(self):self.out=Nonedef forward(self,x):out=sigmoid(x)self.out=outreturn outdef backward(self,dout):dx=dout*(1.0-self.out)*self.outreturn dxclass Affine:def __init__(self,W,b):self.W=Wself.b=bself.x=Noneself.original_x_shape=None# 权重和偏置参数的导数self.dW=Noneself.db=Nonedef forward(self,x):# 对应张量self.original_x_shape=x.shapex=x.reshape(x.shape[0],-1)self.x=xout=np.dot(self.x,self.W)+self.breturn outdef backward(self,dout):dx=np.dot(dout,self.W.T)self.dW=np.dot(self.x.T,dout)self.db=np.sum(dout,axis=0)dx=dx.reshape(*self.original_x_shape) # 还原输入数据的形状(对应张量)return dxclass SoftmaxWithLoss:def __init__(self):self.loss=Noneself.y=None # softmax的输出self.t=None # 监督数据def forward(self,x,t):self.t=tself.y=softmax(x)self.loss=cross_entropy_error(self.y,self.t)return self.lossdef backward(self,dout=1):batch_size=self.t.shape[0]if self.t.size== self.y.size: # 监督数据是one-hot-vector的情况dx=(self.y-self.t)/batch_sizeelse:dx=self.y.copy()dx[np.arange(batch_size),self.t]-=1dx=dx/batch_sizereturn dxclass Dropout:'''随机删除神经元self.mask:保存的是False和True的数组,False的值为0是删除的数据'''def __init__(self,dropout_ratio=0.5):self.dropout_ratio=dropout_ratioself.mask=Nonedef forward(self,x,train_flg=True):if train_flg:self.mask=np.random.rand(*x.shape)>self.dropout_ratioreturn x*self.maskelse:return x*(1.0-self.dropout_ratio)def backward(self,dout):return dout*self.maskclass BatchNormalization:'''批标准化处理'''def __init__(self,gamma,beta,momentum=0.9,running_mean=None,running_var=None):self.gamma=gammaself.beta=betaself.momentum=momentumself.input_shape=None # Conv层的情况下为4维,全连接层的情况下为2维 # 测试时使用的平均值和方差self.running_mean=running_meanself.running_var=running_var # backward时使用的中间数据self.batch_size=Noneself.xc=Noneself.std=Noneself.dgamma=Noneself.dbeta=Nonedef forward(self,x,train_flg=True):self.input_shape=x.shapeif x.ndim != 2:N,C,H,W=x.shapex=x.reshape(N,-1)out=self.__forward(x,train_flg)return out.reshape(*self.input_shape)def __forward(self,x,train_flg):if self.running_mean is None:N,D=x.shapeself.running_mean=np.zeros(D)self.running_var=np.zeros(D)if train_flg:mu=x.mean(axis=0)xc=x-muvar=np.mean(xc**2,axis=0)std=np.sqrt(var+10e-7)xn=xc/stdself.batch_size=x.shape[0]self.xc=xcself.xn=xnself.std=stdself.running_mean=self.momentum*self.running_mean+(1-self.momentum)*muself.running_var=self.momentum*self.running_var+(1-self.momentum)*var else:xc=x-self.running_meanxn=xc/((np.sqrt(self.running_var+10e-7)))out=self.gamma*xn+self.beta return outdef backward(self,dout):if dout.ndim != 2:N,C,H,W=dout.shapedout=dout.reshape(N,-1)dx=self.__backward(dout)dx=dx.reshape(*self.input_shape)return dxdef __backward(self,dout):dbeta=dout.sum(axis=0)dgamma=np.sum(self.xn*dout,axis=0)dxn=self.gamma*doutdxc=dxn/self.stddstd=-np.sum((dxn*self.xc)/(self.std*self.std),axis=0)dvar=0.5*dstd/self.stddxc += (2.0/self.batch_size)*self.xc*dvardmu=np.sum(dxc,axis=0)dx=dxc-dmu/self.batch_sizeself.dgamma=dgammaself.dbeta=dbetareturn dxclass Convolution:def __init__(self,W,b,stride=1,pad=0):self.W=Wself.b=bself.stride=strideself.pad=pad# 中间数据(backward时使用)self.x=None self.col=Noneself.col_W=None# 权重和偏置参数的梯度self.dW=Noneself.db=Nonedef forward(self,x):FN,C,FH,FW=self.W.shapeN,C,H,W=x.shapeout_h=1+int((H+2*self.pad-FH)/self.stride)out_w=1+int((W+2*self.pad-FW)/self.stride)col=im2col(x,FH,FW,self.stride,self.pad)col_W=self.W.reshape(FN,-1).Tout=np.dot(col,col_W)+self.bout=out.reshape(N,out_h,out_w,-1).transpose(0,3,1,2)self.x=xself.col=colself.col_W=col_Wreturn outdef backward(self,dout):FN,C,FH,FW=self.W.shapedout=dout.transpose(0,2,3,1).reshape(-1,FN)self.db=np.sum(dout,axis=0)self.dW=np.dot(self.col.T,dout)self.dW=self.dW.transpose(1,0).reshape(FN,C,FH,FW)dcol=np.dot(dout,self.col_W.T)dx=col2im(dcol,self.x.shape,FH,FW,self.stride,self.pad)return dxclass Pooling:def __init__(self,pool_h,pool_w,stride=1,pad=0):self.pool_h=pool_hself.pool_w=pool_wself.stride=strideself.pad=padself.x=Noneself.arg_max=Nonedef forward(self,x):N,C,H,W=x.shapeout_h=int(1+(H-self.pool_h)/self.stride)out_w=int(1+(W-self.pool_w)/self.stride)col=im2col(x,self.pool_h,self.pool_w,self.stride,self.pad)col=col.reshape(-1,self.pool_h*self.pool_w)arg_max=np.argmax(col,axis=1)out=np.max(col,axis=1)out=out.reshape(N,out_h,out_w,C).transpose(0,3,1,2)self.x=xself.arg_max=arg_maxreturn outdef backward(self,dout):dout=dout.transpose(0,2,3,1)pool_size=self.pool_h*self.pool_wdmax=np.zeros((dout.size,pool_size))dmax[np.arange(self.arg_max.size),self.arg_max.flatten()]=dout.flatten()dmax=dmax.reshape(dout.shape+(pool_size,)) dcol=dmax.reshape(dmax.shape[0]*dmax.shape[1]*dmax.shape[2],-1)dx=col2im(dcol,self.x.shape,self.pool_h,self.pool_w,self.stride,self.pad)return dx

trainer.py

import numpy as npfrom common.optimizer import *class Trainer:'''把前面用来训练的代码做一个类'''def __init__(self,network,x_train,t_train,x_test,t_test,epochs=20,mini_batch_size=100,optimizer='SGD',optimizer_param={'lr':0.01},evaluate_sample_num_per_epoch=None,verbose=True):work=networkself.verbose=verbose#是否打印数据(调试或查看)self.x_train=x_trainself.t_train=t_trainself.x_test=x_testself.t_test=t_testself.epochs=epochsself.batch_size=mini_batch_sizeself.evaluate_sample_num_per_epoch=evaluate_sample_num_per_epochoptimizer_dict={'sgd':SGD,'momentum':Momentum,'nesterov':Nesterov,'adagrad':AdaGrad,'rmsprop':RMSprop,'adam':Adam}self.optimizer=optimizer_dict[optimizer.lower()](**optimizer_param)self.train_size=x_train.shape[0]self.iter_per_epoch=max(self.train_size/mini_batch_size,1)self.max_iter=int(epochs*self.iter_per_epoch)self.current_iter=0self.current_epoch=0self.train_loss_list=[]self.train_acc_list=[]self.test_acc_list=[]def train_step(self):batch_mask=np.random.choice(self.train_size,self.batch_size)x_batch=self.x_train[batch_mask]t_batch=self.t_train[batch_mask]grads=work.gradient(x_batch,t_batch)self.optimizer.update(work.params,grads)loss=work.loss(x_batch,t_batch)self.train_loss_list.append(loss)if self.verbose:print('训练损失值:'+str(loss))if self.current_iter%self.iter_per_epoch==0:self.current_epoch+=1x_train_sample,t_train_sample=self.x_train,self.t_trainx_test_sample,t_test_sample=self.x_test,self.t_testif not self.evaluate_sample_num_per_epoch is None:t=self.evaluate_sample_num_per_epochx_train_sample,t_train_sample=self.x_train[:t],self.t_train[:t]x_test_sample,t_test_sample=self.x_test[:t],self.t_test[:t]train_acc=work.accuracy(x_train_sample,t_train_sample)test_acc=work.accuracy(x_test_sample,t_test_sample) self.train_acc_list.append(train_acc)self.test_acc_list.append(test_acc)if self.verbose:print('epoch:'+str(self.current_epoch)+',train acc:'+str(train_acc)+' | test acc:'+str(test_acc))self.current_iter+=1def train(self):for i in range(self.max_iter):self.train_step()test_acc=work.accuracy(self.x_test,self.t_test)if self.verbose:print('最终测试的正确率:'+str(format(test_acc,'.2%')))

util.py

import numpy as npdef smooth_curve(x):'''使得图形变得更光滑'''window_len=11s=np.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]]w=np.kaiser(window_len,2)y=np.convolve(w/w.sum(),s,mode='valid')return y[5:len(y)-5]def shuffle_dataset(x,t):'''打乱数据集'''permutation=np.random.permutation(x.shape[0])x=x[permutation,:] if x.ndim == 2 else x[permutation,:,:,:]t=t[permutation]return x,tdef conv_output_size(input_size,filter_size,stride=1,pad=0):return (input_size+2*pad-filter_size) / stride+1def im2col(input_data,filter_h,filter_w,stride=1,pad=0):'''四维转二维input_data : 由(数据量,通道,高,长)的4维数组构成的输入数据filter_h : 滤波器的高filter_w : 滤波器的长stride : 步幅pad : 填充'''N,C,H,W=input_data.shapeout_h=(H+2*pad-filter_h)//stride+1out_w=(W+2*pad-filter_w)//stride+1img=np.pad(input_data,[(0,0),(0,0),(pad,pad),(pad,pad)],'constant')col=np.zeros((N,C,filter_h,filter_w,out_h,out_w))for y in range(filter_h):y_max=y+stride*out_hfor x in range(filter_w):x_max=x+stride*out_wcol[:,:,y,x,:,:]=img[:,:,y:y_max:stride,x:x_max:stride]col=col.transpose(0,4,5,1,2,3).reshape(N*out_h*out_w,-1)return coldef col2im(col,input_shape,filter_h,filter_w,stride=1,pad=0):'''input_shape : 输入数据的形状(例:(10,1,28,28))'''N,C,H,W=input_shapeout_h=(H+2*pad-filter_h)//stride+1out_w=(W+2*pad-filter_w)//stride+1col=col.reshape(N,out_h,out_w,C,filter_h,filter_w).transpose(0,3,4,5,1,2)img=np.zeros((N,C,H+2*pad+stride-1,W+2*pad+stride-1))for y in range(filter_h):y_max=y+stride*out_hfor x in range(filter_w):x_max=x+stride*out_wimg[:,:,y:y_max:stride,x:x_max:stride] += col[:,:,y,x,:,:]return img[:,:,pad:H+pad,pad:W+pad]

optimizer.py

import numpy as npclass SGD:'''随机梯度下降法,lr是学习率'''def __init__(self,lr=0.01):self.lr=lrdef update(self,params,grads):for i in params.keys():params[i]-=self.lr*grads[i]class Momentum:'''动量SGD,模拟小球在地面滚动'''def __init__(self,lr=0.01,momentum=0.9):self.lr=lrself.momentum=momentumself.v=Nonedef update(self,params,grads):if self.v is None:self.v={}for k,v in params.items():self.v[k]=np.zeros_like(v)for k in params.keys():self.v[k]=self.momentum*self.v[k]-self.lr*grads[k]params[k]+=self.v[k]class AdaGrad:'''调节学习率的SGD'''def __init__(self,lr=0.01):self.lr=lrself.h=Nonedef update(self,params,grads):if self.h is None:self.h={}for k,v in params.items():self.h[k]=np.zeros_like(v)for k in params.keys():self.h[k]=self.h[k]+grads[k]*grads[k]params[k]-=self.lr*grads[k]/(np.sqrt(self.h[k])+1e-7)#加一个微小值防止为0class Adam:'''融合Momentum和AdaGrad'''def __init__(self,lr=0.01,beta1=0.9,beta2=0.999):self.lr=lrself.beta1=beta1self.beta2=beta2self.iter=0self.m=Noneself.v=Nonedef update(self,params,grads):if self.m is None:self.m,self.v={},{}for k,v in params.items():self.m[k]=np.zeros_like(v)self.v[k]=np.zeros_like(v)self.iter+=1lr_t=self.lr*np.sqrt(1.0-self.beta2**self.iter)/(1.0-self.beta1**self.iter)for k in params.keys():self.m[k]=self.beta1*self.m[k]+(1-self.beta1)*grads[k]self.v[k]=self.beta2*self.v[k]+(1-self.beta2)*(grads[k]**2)params[k]-=lr_t*self.m[k]/(np.sqrt(self.v[k])+1e-7)class Nesterov:def __init__(self,lr=0.01,momentum=0.9):self.lr=lrself.momentum=momentumself.v=Nonedef update(self,params,grads):if self.v is None:self.v={}for k,v in params.items():self.v[k]=np.zeros_like(v)for k in params.keys():self.v[k]=self.v[k]*self.momentumself.v[k]-=self.lr*grads[k]params[k]+=self.momentum*self.momentum*self.v[k]params[k]-=(1+self.momentum)*self.lr*grads[k]class RMSprop:def __init__(self,lr=0.01,decay_rate=0.99):self.lr=lrself.decay_rate=decay_rateself.h=Nonedef update(self,params,grads):if self.h is None:self.h={}for k,v in params.items():self.h[k]=np.zeros_like(v)for k in params.keys():self.h[k]=self.h[k]*self.decay_rateself.h[k]+=(1-self.decay_rate)*grads[k]*grads[k]params[k]-=self.lr*grads[k]/(np.sqrt(self.h[k])+1e-7)

最后来测试下这个CNN的最终效果如何:

import numpy as npimport matplotlib.pyplot as pltfrom dataset.mnist import load_mnistfrom simple_convnet import SimpleConvNetfrom common.trainer import Trainer#加载MNIST数据集,保持输入数据的形状,不做一维处理(x_train,t_train),(x_test,t_test)=load_mnist(flatten=False)#减少数据训练测试,节省时间#x_train,t_train=x_train[:5000],t_train[:5000]#x_test,t_test=x_test[:1000],t_test[:1000]max_epochs=20network=SimpleConvNet(input_dim=(1,28,28),conv_param={'filter_num': 30,'filter_size': 5,'pad': 0,'stride': 1},hidden_size=100,output_size=10,weight_init_std=0.01)trainer=Trainer(network,x_train,t_train,x_test,t_test,epochs=max_epochs,mini_batch_size=100,optimizer='Adam',optimizer_param={'lr': 0.001},evaluate_sample_num_per_epoch=1000)trainer.train()#保存参数network.save_params("params.pkl")print("保存参数成功")#绘制图形markers={'train': 'o','test': 's'}x=np.arange(max_epochs)plt.plot(x,trainer.train_acc_list,marker='o',label='train',markevery=2)plt.plot(x,trainer.test_acc_list,marker='s',label='test',markevery=2)plt.xlabel("epochs")plt.ylabel("accuracy")plt.ylim(0,1.0)plt.legend(loc='lower right')plt.show()

测试的正确率达到了99%左右,这在一个小型的卷积神经网络里面,已经是很不错的识别率了!

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。