99
MATLAB中的神经网络训练框架
内置训练函数 自定义训练循环
trainnet: dlnetwork对象 dlnetwork对象|模型函数
% 模型损失函数
function [loss,gradients,state] = modelLoss(net,X,T)
[Y,state] = forward(net,X);
loss = crossentropy(Y,T);
gradients = dlgradient(loss,net.Learnables);
end
% 构建模型
layers = [
sequenceInputLayer(numChannels)
...
fullyConnectedLayer(numClasses)
softmaxLayer];
dlnet = dlnetwork(layers);
% 使用minibatchqueue 管理批量数据
miniBatchSize = 128;
mbq = minibatchqueue(cdsTrain, ...
MiniBatchSize=miniBatchSize, ...
MiniBatchFcn=@preprocessMiniBatch, ...
MiniBatchFormat=["TCB" ""]);
% 训练设置
numEpochs = 10;
initialLearnRate = 0.01;
% 训练循环
epoch = 0;
iteration = 0;
while epoch < numEpochs && ~monitor.Stop
epoch = epoch + 1;
shuffle(mbq);
while hasdata(mbq) && ~monitor.Stop
iteration = iteration + 1;
[X,T] = next(mbq);
[loss,gradients] = dlfeval(@modelLoss,net,X,T);
updateFcn = @(parameters,gradients)
sgdStep(parameters,gradients,learnRate);
net = dlupdate(updateFcn,net,gradients);
end
end
训练过程
•在每次迭代 (each minibatch), 进行预处理
•将输入数据传给网络进行前馈计算
•和真值进行比较
•自动微分
•更新模型权重和偏置 (parameters).
% 构建模型
layers = [
sequenceInputLayer(numChannels)
convolution1dLayer(5,20,Padding="same")
layerNormalizationLayer
reluLayer
convolution1dLayer(5,20,Padding="same")
layerNormalizationLayer
reluLayer
convolution1dLayer(5,20,Padding="same")
layerNormalizationLayer
reluLayer
globalAveragePooling1dLayer
fullyConnectedLayer(numClasses)
softmaxLayer];
net = dlnetwork(layers)
% 训练设置
options = trainingOptions("adam", ...
MaxEpochs=maxEpochs, ...
MiniBatchSize=miniBatchSize, ...
InitialLearnRate=0.01, ...
GradientThreshold=1, ...
Shuffle="never", ...
Metrics="rmse", ...
Plots="training-progress", ...
Verbose=0);
% 训练模型
net = trainnet(Xtrain,TTrain,layers,@lossFun,options);
链接 链接