最近给学院老师的一篇论文帮忙改进BP神经网络,由于最后要发表论文,神经网络必须自己手写,搞了几个晚上,总算把基础的BP神经网络写出来,接下来再把老师的改进算法实现就ok了。**(当然那代码不能公开了)我这里用的是《MATLAB神经网络43个案例分析》这本书中的语音特征信号数据集。该数据就是普通的结构化数据,分为民歌、古筝、摇滚和流行四类不同音乐类别。(PS:神经网络的学习笔记没时间整理,马上蓝桥杯国赛,比赛结束回学校又是课设,这学期为了机器学习专业课也就是上课听听,还要火线复习把不喜欢的嵌入式专业课给应付过去,估计只有暑假再整理写博客发表了!!!!!)
对于BP神经网络的理论讲解请参照我的另外的两篇博文:机器学习(周志华)——神经网络模型(一) 、机器学习(周志华)——神经网络模型(二)
BP神经网络类的Python代码如下。激活函数全部使用的Sigmoid函数,隐含层节点数是利用经验公式取输入与输出层节点数乘积的平方根,在代码中实现了BGD、SGD、MBGD三种的梯度下降算法。
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/5/14 17:13
# @Author : DaiPuWei
# @Site : 计通303实验室
# @File : BPNN.py
# @Software: PyCharm Community Editionimport numpy as np
from copy import deepcopy
import random"""这是基本的BP神经网络模型,层与层之间的权重采用随机生成,并且服从标准正态分布
"""def Sigmoid(x):"""这是S型激活函数计算公式:param x: 需要进行计算的数据:return: S型激活函数的函数值"""function = 1.0 / (1.0 + np.exp(-x))return functiondef Sigmoid_Derivative(x):"""这是S型激活函数的导数计算公式:param x: 需要进行计算的数据:return: S型激活函数的导数的函数值"""f = Sigmoid(x)derivative = f*(1.0-f)return derivativeclass BPNN:def __init__(self,input_n,hidden_n,output_n,input_weights = None,hidden_weights = None,hidden_threshold = None,output_threshold = None):"""这是BP神经网络类的构造函数:param input_n:输入层神经元个数:param hidden_n: 隐藏层神经元个数:param output_n: 输出层神经元个数"""self.Train_Data = [] # 训练数据集self.Train_Label = [] # 训练数据集标签self.input_n = input_n # 输入层神经元个数self.hidden_n = hidden_n # 隐含层神经元个数self.output_n = output_n # 输出层神经元个数self.input_cells = np.zeros(self.input_n).reshape((1,self.input_n)) # 输入层神经元self.hidden_cells = np.zeros(self.hidden_n).reshape((1,self.hidden_n)) # 隐含层神经元self.hidden_cells_input = np.zeros(self.hidden_n).reshape((1,self.hidden_n)) # 隐含层的输入(不含阈值后进行sigmoid)self.output_cells = np.zeros(self.output_n).reshape((1,self.output_n)) # 输出层神经元self.output_cells_input = np.zeros(self.output_n).reshape((1,self.output_n)) # 输出层的输入(不含阈值后进行sigmoid)if input_weights is not None:self.input_weights = input_weightselse:self.input_weights = np.random.randn(self.input_n,self.hidden_n) # 输入层与隐含层之间的权重if hidden_weights is not None:self.hidden_weights = hidden_weightselse:self.hidden_weights = np.random.randn(self.hidden_n,self.output_n) # 隐含层与输出层之间的权重if hidden_threshold is not None:self.hidden_threshold = hidden_thresholdelse:self.hidden_threshold = np.random.randn(1, self.hidden_n) # 隐含层的阈值if output_threshold is not None:self.output_threshold = output_thresholdelse:self.output_threshold = np.random.randn(1, self.output_n) # 输出层的阈值self.input_weights_copy = deepcopy(self.input_weights) # 输入层与隐含层之间的权重备份self.hidden_weights_copy = deepcopy(self.hidden_weights) # 隐含层与输出层之间的权重备份self.hidden_threshold_copy = deepcopy(self.hidden_threshold) # 隐含层的阈值备份self.output_threshold_copy = deepcopy(self.output_threshold) # 输出层的阈值备份def Init(self,Train_Data,Train_Label):"""这是初始化训练数据与标签的函数:param Train_Data: 训练数据:param Train_Label: 训练标签"""self.Train_Data = Train_Dataself.Train_Label = Train_Labeldef predict(self,input):"""这是BP神经网络向前学习传递的函数:param input: 输入神经元的数据:return: 更新相应的参数"""# 拷贝输入层输入数据self.input_cells = deepcopy(input)# 计算隐含层输出self.hidden_cells_input = self.input_cells.dot(self.input_weights)self.hidden_cells = self.hidden_cells_input + self.hidden_thresholdself.hidden_cells = Sigmoid(self.hidden_cells)# 计算输出层输出self.output_cells_input = self.hidden_cells.dot(self.hidden_weights)self.output_cells = self.output_cells_input + self.output_thresholdself.output_cells = Sigmoid(self.output_cells)return self.output_cellsdef Gradient_Vector(self,ideal_output):"""这是计算每组输入数据的相关梯度向量的函数:param ideal_output: 理想(目标或者实际真实)输出"""# 隐含层与输出层之间的权重与阈值的更新的相关梯度error = ideal_output - self.output_cellsderivative = Sigmoid_Derivative(self.output_cells)g = derivative * error# 隐藏层-输出层权重增量hidden_weights_increasement = self.hidden_cells.T.dot(g)# 隐藏层阈值增量output_threshold_increasement = -g# 输入层与输出层之间的权重与阈值的更新的相关梯度e = Sigmoid_Derivative(self.hidden_cells) * g.dot(self.hidden_weights.T)# 输入层-隐藏层权重增量input_weights_increasement = self.input_cells.T.dot(e)# 输入层阈值增量hidden_threshold_increasement = -ereturn hidden_weights_increasement,output_threshold_increasement\,input_weights_increasement,hidden_threshold_increasementdef back_propagate(self,hidden_weights_increasement,output_threshold_increasement\,input_weights_increasement,hidden_threshold_increasement,learn_rate):"""这是向后误差传递函数,进行参数调整:param g: 隐含层与输出层之间的权重与阈值的更新的相关梯度:param e: 输入层与输出层之间的权重与阈值的更新的相关梯度:param input_cell: 输入层的总和:param hidden_cell: 隐藏层的总和:param learn_rate: 学习率"""# 隐含层与输出层之间的权重的更新self.hidden_weights = self.hidden_weights + hidden_weights_increasement*learn_rate#输出层的阈值更新self.output_threshold = self.output_threshold + output_threshold_increasement*learn_rate#输入层与隐含层之间的权重更新self.input_weights = self.input_weights + learn_rate*input_weights_increasement#隐含层的阈值更新self.hidden_threshold = self.hidden_threshold + hidden_threshold_increasement*learn_ratedef reset(self):"""这是BPNN的重置函数,是在改变一次迭代过程后回复相关参数的初始值"""self.input_weights = deepcopy(self.input_weights_copy) # 输入层与隐含层之间的权重备份self.hidden_weights = deepcopy(self.hidden_weights_copy) # 隐含层与输出层之间的权重备份self.hidden_threshold = deepcopy(self.hidden_threshold_copy) # 隐含层的阈值备份self.output_threshold = deepcopy(self.output_threshold_copy) # 输出层的阈值备份def train_batch(self,input,output):"""这是对一次一组数据进行训练的函数:param input: 组输入数据:param output: 输入数据标记"""# 进行预测,得到预测分类结果#print(output)output = output.reshape(1, len(output))input = input.reshape(1, len(input))self.output_cells = self.predict(input)# 计算相关梯度向量hidden_weights_increasement, output_threshold_increasement \, input_weights_increasement, hidden_threshold_increasement = self.Gradient_Vector(output)# 计算分类误差error = np.sum((output-self.output_cells)**2)return hidden_weights_increasement,output_threshold_increasement\,input_weights_increasement,hidden_threshold_increasement,errordef train_BGD(self,inputs,outputs,limitation,learn_rate):"""这是BP神经网络的训练函数,利用BGD算法进行参数更新:param inputs: 训练数据集:param outputs: 训练数据集的标签:param limitaion: 迭代次数:param learn_rate: 学习率"""#初始化训练数据集与对应的标签self.Init(inputs,outputs)for j in range(limitation):self.train_dataset_BGD(learn_rate)def train_dataset_BGD(self,learn_rate):"""这是一次迭代过程中利用BGD算法对整个训练集进行训练的函数:param learn_rate: 学习率"""Hidden_Weights_Increasement = [] # 隐藏层-输出层之间的权重增量Output_Threshold_Increasement = [] # 输出层之间的阈值增量Input_Weights_Increasement = [] # 输入层-隐藏层之间的权重增量Hidden_Threshold_Increasement = [] # 隐藏层的阈值增量for (train_data,train_label) in zip(self.Train_Data,self.Train_Label):# 进行对训练数据集进行遍历,利用每组数据进行训练,并得到相关梯度增量和对应的误差hidden_weights_increasement, output_threshold_increasement \,input_weights_increasement, hidden_threshold_increasement,err = self.train_batch(train_data,train_label)Hidden_Weights_Increasement.append(hidden_weights_increasement)Output_Threshold_Increasement.append(output_threshold_increasement)Input_Weights_Increasement.append(input_weights_increasement)Hidden_Threshold_Increasement.append(hidden_threshold_increasement)# 计算总梯度hidden_weights_increasement_sum = np.sum(np.array(Hidden_Weights_Increasement),0)output_threshold_increasement_sum = np.sum(np.array(Output_Threshold_Increasement),0)input_weights_increasement_sum = np.sum(np.array(Input_Weights_Increasement),0)hidden_threshold_increasement_sum = np.sum(np.array(Hidden_Threshold_Increasement),0)# 进行误差反向传播,调整超参数self.back_propagate(hidden_weights_increasement_sum,output_threshold_increasement_sum,input_weights_increasement_sum,hidden_threshold_increasement_sum,learn_rate)def train_SGD(self,inputs,outputs,limitation,learn_rate):"""这是BP神经网络的训练函数,利用SGD算法进行参数更新:param inputs: 训练数据集:param outputs: 训练数据集的标签:param limitaion: 迭代次数:param learn_rate: 学习率"""#初始化训练数据集与对应的标签self.Init(inputs,outputs)for j in range(limitation):self.train_dataset_SGD(learn_rate)def train_dataset_SGD(self, learn_rate):"""这是一次迭代过程中利用SGD算法对整个训练集进行训练的函数:param learn_rate: 学习率"""# 将数据集与标签集随机打乱self.Shuffle_SGD()for (train_data, train_label) in zip(self.Train_Data, self.Train_Label):# 进行对训练数据集进行遍历,利用每组数据进行训练,并得到相关梯度增量和对应的误差hidden_weights_increasement, output_threshold_increasement, input_weights_increasement, \hidden_threshold_increasement, err = self.train_batch(train_data,train_label)# 进行误差反向传播,调整超参数self.back_propagate(hidden_weights_increasement, output_threshold_increasement,input_weights_increasement, hidden_threshold_increasement, learn_rate)def Shuffle_SGD(self):"""这是执行SGD算法前,将数据集与标记集的随机打乱的函数"""#首先生成与进行SGD算法的训练数据长度一样的自然数序列并打乱length = len(self.Train_Label)random_sequence = list(np.arange(length))random.shuffle(random_sequence)data = [self.Train_Data[index] for index in random_sequence]label = [self.Train_Label[index] for index in random_sequence]self.Train_Data = np.array(data)self.Train_Label = np.array(label)def train_MBGD(self,inputs,outputs,limitation,learn_rate,mini_batch_size):"""这是BP神经网络的训练函数,利用MBGD算法进行参数更新:param inputs: 训练数据集:param outputs: 训练数据集的标签:param limitaion: 迭代次数:param learn_rate: 学习率:param mini_batch_size: 小样本规模"""#初始化训练数据集与对应的标签self.Init(inputs,outputs)for j in range(limitation):self.train_dataset_MBGD(learn_rate,mini_batch_size)def train_dataset_MBGD(self, learn_rate,mini_batch_size):"""这是一次迭代过程中利用MBGD算法对整个训练集进行训练的函数:param learn_rate: 学习率:param mini_batch_size: 小样本个数"""# 将数据集与标签集根据mini_batch_size分成小样本self.Split_MBGD(mini_batch_size)for (mini_batch_data, mini_batch_label) in zip(self.Train_Data, self.Train_Label):Hidden_Weights_Increasement = [] # 隐藏层-输出层之间的权重增量Output_Threshold_Increasement = [] # 输出层之间的阈值增量Input_Weights_Increasement = [] # 输入层-隐藏层之间的权重增量Hidden_Threshold_Increasement = [] # 隐藏层的阈值增量# 进行对训练数据集进行遍历,利用每组数据进行训练,并得到相关梯度增量和对应的误差for (train_data,train_label) in zip(mini_batch_data, mini_batch_label):hidden_weights_increasement, output_threshold_increasement, input_weights_increasement, \hidden_threshold_increasement, err = self.train_batch(train_data,train_label)Hidden_Weights_Increasement.append(hidden_weights_increasement)Output_Threshold_Increasement.append(output_threshold_increasement)Input_Weights_Increasement.append(input_weights_increasement)Hidden_Threshold_Increasement.append(hidden_threshold_increasement)# 计算梯度平均值hidden_weights_increasement_avg = np.average(np.array(Hidden_Weights_Increasement), 0)output_threshold_increasement_avg = np.average(np.array(Output_Threshold_Increasement), 0)input_weights_increasement_avg = np.average(np.array(Input_Weights_Increasement), 0)hidden_threshold_increasement_avg = np.average(np.array(Hidden_Threshold_Increasement), 0)# 进行误差反向传播,调整超参数self.back_propagate(hidden_weights_increasement_avg, output_threshold_increasement_avg,input_weights_increasement_avg, hidden_threshold_increasement_avg, learn_rate)self.Return()def Split_MBGD(self,mini_batch_size):"""这是执行MBGD算法前,将数据集与标记集生成mini-batches的函数:param mini_batch_size: 小样本规模"""'''由于不知道数据集长度是否能被mini_batch_size整除,首先计算数据集长度与mini_batch_size的余数rest,若rest为零最好不须执行下面的if判断语句,若不为零,首先先分配除去余数的的数据集,然后通过下面if语句把余下的数据与结果进行追加。'''length = len(self.Train_Label)rest = length % mini_batch_sizerandom_sequence = np.arange(length)random.shuffle(random_sequence)mini_batches = [random_sequence[k:k + mini_batch_size] for k inrange(0, length - rest, mini_batch_size)]mini_batch_inputs = []mini_batch_outputs = []for batch in mini_batches:mini_batch_data = [self.Train_Data[_batch] for _batch in batch]mini_batch_label = [self.Train_Label[_batch] for _batch in batch]mini_batch_inputs.append(list(mini_batch_data))mini_batch_outputs.append(list(mini_batch_label))# 数据集长度不能被mini_batch_size整除,把余下的数据组成最后一个mini_batchif rest > 0:batch = random_sequence[length - rest:length]mini_batch_data = [self.Train_Data[_batch] for _batch in batch]mini_batch_label = [self.Train_Label[_batch] for _batch in batch]mini_batch_inputs.append(list(mini_batch_data))mini_batch_outputs.append(list(mini_batch_label))self.Train_Data = np.array(mini_batch_inputs)self.Train_Label = np.array(mini_batch_outputs)def Return(self):"""这是将MBGD算法的小样本数据集和标签集还原的函数"""data = []label = []for (train_data,train_label) in zip(self.Train_Data,self.Train_Label):for (_data,_label) in zip(train_data,train_label):data.append(_data)label.append(_label)self.Train_Data = np.array(data)self.Train_Label = np.array(label)def test(self,Test_Data):"""这是BP神经网络测试函数:param Test_Data: 测试数据"""predict_labels = []# 生成标准输出神经元并置0tmp = [0]*self.output_nfor test_data in Test_Data:predict_output = self.predict(test_data)index = np.argmax(predict_output)tmp1 = deepcopy(tmp)tmp1[index] = 1predict_labels.append(tmp1)predict_labels = np.array(predict_labels)return predict_labels
接下来利用《MATLAB神经网络43个案例分析》这本书中的语音特征信号数据集进行实验,利用5折交叉验证统计实验结果。代码如下:
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
# # @Time : 2018/8/1011:02
# # @Author : DaiPuWei
# # E-Mail : 771830171@qq.com
# # @Site : 计通303实验室
# # @File : Test.py
# # @Software: PyCharmimport matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MinMaxScaler
from BPNN import BPNN
from sklearn.model_selection import KFolddef Load_Voice_Data(path):"""这是导入数据的函数:param path: 数据文件的路径:return: 数据集"""data = []label = []with open(path) as f:for line in f.readlines():str = line.strip().split("\t")tmp = []for i in range(1,len(str)):tmp.append(float(str[i]))data.append(tmp)if 1 == int(str[0]):label.append([1,0,0,0])elif 2 == int(str[0]):label.append([0,1,0,0])elif 3 == int(str[0]):label.append([0,0,1,0])else:label.append([0,0,0,1])data = np.array(data)label = np.array(label)return data,labeldef Merge(data,columns):"""这是数组合并成DataFrame函数:param data: 数据:param columns: DataFrame的列名称:return: 返回合并后的训练结果精度的DataFrame,以方便出图"""Data = np.array(data)Data = Data.Tans = pd.DataFrame(data=Data,columns=columns)return ansdef run_main():"""这是主函数"""path = "./data.txt"Data, Label = Load_Voice_Data(path)# 数据归一化Data = MinMaxScaler().fit_transform(Data)# 解决画图是的中文乱码问题mpl.rcParams['font.sans-serif'] = [u'simHei']mpl.rcParams['axes.unicode_minus'] = False# 初始化迭代次数数组,小样本规模、L2正则化参数、学习率数组learn_rate = 0.01Iteration = 10000mini_batch = 64# 初始化构BP神经网络与间质BP神经网络与Brain_Interstitial_BPNN模型col1 = np.shape(Data)[1]col2 = np.shape(Label)[1]input_n = col1hidden_n = int(np.sqrt(col1 * col2))output_n = col2# 各种算法的指标BGD_Accuracy = [] # BGD算法平均精度SGD_Accuracy = [] # SGD算法平均精度MBGD_Accuracy = [] # MBGD算法平均精度BGD_MSE = [] # BGD算法MSESGD_MSE = [] # SGD算法MSEMBGD_MSE = [] # MBGD算法MSE# 生成交叉验证的训练集与测试集的下标组合kf = KFold(n_splits=5, shuffle=True, random_state=np.random.randint(0, len(Data)))train_test_index = []for (train_index, test_index) in kf.split(Data):train_test_index.append((train_index, test_index))for (train_index, test_index) in train_test_index:# 将数据集分成训练数据集与测试数据集Train_Data = Data[train_index]Train_Label = Label[train_index]Test_Data = Data[test_index]Test_Label = Label[test_index]# 初始化相关结果数组bgd_accuracy = [] # bgd算法的一次交叉验证精度sgd_accuracy = [] # sgd算法的一次交叉验证精度mbgd_accuracy = [] # mbgd算法的一次交叉验证精度bgd_mse = [] # bgd算法的一次交叉验证MSEsgd_mse = [] # sgd算法的一次交叉验证MSEmbgd_mse = [] # mbgd算法的一次交叉验证MSE# 初始化各种梯度算法的BPNNBGD = BPNN(input_n, hidden_n, output_n)SGD = BPNN(input_n, hidden_n, output_n)MBGD = BPNN(input_n, hidden_n, output_n)BGD.Init(Train_Data,Train_Label)SGD.Init(Train_Data, Train_Label)MBGD.Init(Train_Data, Train_Label)for iter in np.arange(Iteration):BGD.train_dataset_BGD(learn_rate)SGD.train_dataset_SGD(learn_rate)MBGD.train_dataset_MBGD(learn_rate,mini_batch)# BGD算法结果predict_outputs = BGD.test(Test_Data)bgd_accuracy.append(accuracy_score(Test_Label, predict_outputs))bgd_mse.append(np.sum((Test_Label - predict_outputs) ** 2) / len(Test_Label))# SGD算法结果predict_outputs = SGD.test(Test_Data)sgd_accuracy.append(accuracy_score(Test_Label, predict_outputs))sgd_mse.append(np.sum((Test_Label - predict_outputs) ** 2) / len(Test_Label))# MBGD算法结果predict_outputs = BGD.test(Test_Data)mbgd_accuracy.append(accuracy_score(Test_Label, predict_outputs))mbgd_mse.append(np.sum((Test_Label - predict_outputs) ** 2) / len(Test_Label))if (iter+1) % 100 == 0:print("第%d次迭代,学习率learn_rate=%f" % (iter + 1,learn_rate))print("BGD算法的测试精度为:%f,测试均方误差为:%f" % (bgd_accuracy[iter], bgd_mse[iter]))print("SGD算法的测试精度为:%f,测试均方误差为:%f" % (sgd_accuracy[iter], sgd_mse[iter]))print("MBGD算法的测试精度为:%f,测试均方误差为:%f" % (mbgd_accuracy[iter], mbgd_mse[iter]))BGD_Accuracy.append(list(bgd_accuracy))SGD_Accuracy.append(list(sgd_accuracy))MBGD_Accuracy.append(list(mbgd_accuracy))BGD_MSE.append(list(bgd_mse))SGD_MSE.append(list(sgd_mse))MBGD_MSE.append(list(mbgd_mse))# 计算BP神经网络模型与基于细胞间质环境神经网络模型的平均测试均方误差并可视化col = ["BGD", "SGD","MBGD"]BGD_MSE = np.average(np.array(BGD_MSE), 0)SGD_MSE = np.average(np.array(SGD_MSE), 0)MBGD_MSE = np.average(np.array(MBGD_MSE), 0)xticks = np.arange(1, len(BGD_MSE) + 1)error = [BGD_MSE,SGD_MSE,MBGD_MSE]error_result = Merge(error, col)error_result.to_excel("./不同梯度算法之间的测试MSE_iteration=%d_learn_rate=%f.xlsx"%(Iteration,learn_rate))error_result.describe().to_excel("./不同梯度算法之间的测试MSE的统计信息_iteration=%d_learn_rate=%f.xlsx"%(Iteration,learn_rate))plt.plot(xticks, BGD_MSE, 'c-')plt.plot(xticks, SGD_MSE, 'b-.')plt.plot(xticks, MBGD_MSE, 'r--')plt.legend(labels=col, loc="best")plt.xlabel("Iterations")plt.ylabel("Test MSE")plt.xlim(1, len(BGD_MSE))plt.grid(True)plt.savefig("./不同梯度算法之间的测试MSE_iteration=%d_learn_rate=%f.jpg"%(Iteration,learn_rate))plt.close()# 计算BP神经网络模型与基于细胞间质环境神经网络模型的测试精度并可视化col = ["BGD", "SGD", "MBGD"]BGD_Accuracy = np.average(np.array(BGD_Accuracy), 0)SGD_Accuracy = np.average(np.array(SGD_Accuracy), 0)MBGD_Accuracy = np.average(np.array(MBGD_Accuracy), 0)acc = [BGD_Accuracy,SGD_Accuracy,MBGD_Accuracy]accuracy = Merge(acc, col)accuracy.to_excel("./不同梯度算法的测试精度_iteration=%d_learn_rate=%f.xlsx"%(Iteration,learn_rate))accuracy.describe().to_excel("./不同梯度算法的测试精度统计信息_iteration=%d_learn_rate=%f.xlsx"%(Iteration,learn_rate))xticks = np.arange(1, len(BGD_Accuracy) + 1)plt.plot(xticks, BGD_Accuracy, 'c-')plt.plot(xticks, SGD_Accuracy, 'b--')plt.plot(xticks, MBGD_Accuracy, 'r-.')plt.legend(labels=col, loc="best")plt.xlabel("Iterations")plt.ylabel("Test Accuracy")plt.grid(True)plt.savefig("./不同梯度算法的测试精度比较iteration=%d_learn_rate=%f.jpg"%(Iteration,learn_rate))plt.close()if __name__ == '__main__':run_main()
下面是均方误差的实验结果:
下面是分类准确率的实验结果:
从实验结果可以看出BGD与MBGD的实验及诶过完全重合了,这可能是数据量较小导致的。
下面是最近申请的微信公众号:AI那点小事的二维码。在公众号中将会分享AI、机器学习、深度学习和计算机视觉的技术干货,对读者有需要的可以扫描下方二维码进行获取,最后感谢各位读者的支持。