第一步、導(dǎo)入需要的包
1
2
3
4
5
6
7
8
9
10
|
import os import scipy.io as sio import numpy as np import torch import torch.nn as nn import torch.backends.cudnn as cudnn import torch.optim as optim from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils from torch.autograd import Variable |
1
2
|
batchSize = 128 # batchsize的大小 niter = 10 # epoch的最大值 |
第二步、構(gòu)建神經(jīng)網(wǎng)絡(luò)
設(shè)神經(jīng)網(wǎng)絡(luò)為如上圖所示,輸入層4個(gè)神經(jīng)元,兩層隱含層各4個(gè)神經(jīng)元,輸出層一個(gè)神經(jīng)。每一層網(wǎng)絡(luò)所做的都是線性變換,即y=W×X+b;代碼實(shí)現(xiàn)如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
class Neuralnetwork(nn.Module): def __init__( self , in_dim, n_hidden_1, n_hidden_2, out_dim): super (Neuralnetwork, self ).__init__() self .layer1 = nn.Linear(in_dim, n_hidden_1) self .layer2 = nn.Linear(n_hidden_1, n_hidden_2) self .layer3 = nn.Linear(n_hidden_2, out_dim) def forward( self , x): x = x.view(x.size( 0 ), - 1 ) x = self .layer1(x) x = self .layer2(x) x = self .layer3(x) return x model = Neuralnetwork( 1 * 3 , 4 , 4 , 1 ) print (model) # net architecture |
1
2
3
4
5
|
Neuralnetwork( (layer1): Linear(in_features = 3 , out_features = 4 , bias = True ) (layer2): Linear(in_features = 4 , out_features = 4 , bias = True ) (layer3): Linear(in_features = 4 , out_features = 1 , bias = True ) ) |
??? 第三步、讀取數(shù)據(jù)
自定義的數(shù)據(jù)為demo_SBPFea.mat,是MATLAB保存的數(shù)據(jù)格式,其存儲(chǔ)的內(nèi)容如下:包括fea(1000*3)和sbp(1000*1)兩個(gè)數(shù)組;fea為特征向量,行為樣本數(shù),列為特征寬度;sbp為標(biāo)簽
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
class SBPEstimateDataset(Dataset): def __init__( self , ext = 'demo' ): data = sio.loadmat(ext + '_SBPFea.mat' ) self .fea = data[ 'fea' ] self .sbp = data[ 'sbp' ] def __len__( self ): return len ( self .sbp) def __getitem__( self , idx): fea = self .fea[idx] sbp = self .sbp[idx] """Convert ndarrays to Tensors.""" return { 'fea' : torch.from_numpy(fea). float (), 'sbp' : torch.from_numpy(sbp). float () } train_dataset = SBPEstimateDataset(ext = 'demo' ) train_loader = DataLoader(train_dataset, batch_size = batchSize, # 分批次訓(xùn)練 shuffle = True , num_workers = int ( 8 )) |
整個(gè)數(shù)據(jù)樣本為1000,以batchSize = 128劃分,分為8份,前7份為104個(gè)樣本,第8份則為104個(gè)樣本。在網(wǎng)絡(luò)訓(xùn)練過(guò)程中,是一份數(shù)據(jù)一份數(shù)據(jù)進(jìn)行訓(xùn)練的
第四步、模型訓(xùn)練
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
# 優(yōu)化器,Adam optimizer = optim.Adam( list (model.parameters()), lr = 0.0001 , betas = ( 0.9 , 0.999 ),weight_decay = 0.004 ) scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma = 0.997 ) criterion = nn.MSELoss() # loss function if torch.cuda.is_available(): # 有GPU,則用GPU計(jì)算 model.cuda() criterion.cuda() for epoch in range (niter): losses = [] ERROR_Train = [] model.train() for i, data in enumerate (train_loader, 0 ): model.zero_grad() # 首先提取清零 real_cpu, label_cpu = data[ 'fea' ], data[ 'sbp' ] if torch.cuda.is_available(): # CUDA可用情況下,將Tensor 在GPU上運(yùn)行 real_cpu = real_cpu.cuda() label_cpu = label_cpu.cuda() input = real_cpu label = label_cpu inputv = Variable( input ) labelv = Variable(label) output = model(inputv) err = criterion(output, labelv) err.backward() optimizer.step() losses.append(err.data[ 0 ]) error = output.data - label + 1e - 12 ERROR_Train.extend(error) MAE = np.average(np. abs (np.array(ERROR_Train))) ME = np.average(np.array(ERROR_Train)) STD = np.std(np.array(ERROR_Train)) print ( '[%d/%d] Loss: %.4f MAE: %.4f Mean Error: %.4f STD: %.4f' % ( epoch, niter, np.average(losses), MAE, ME, STD)) |
1
2
3
4
5
6
7
8
9
10
11
|
??? [ 0 / 10 ] Loss: 18384.6699 MAE: 135.3871 Mean Error: - 135.3871 STD: 7.5580 [ 1 / 10 ] Loss: 17063.0215 MAE: 130.4145 Mean Error: - 130.4145 STD: 7.8918 [ 2 / 10 ] Loss: 13689.1934 MAE: 116.6625 Mean Error: - 116.6625 STD: 9.7946 [ 3 / 10 ] Loss: 8192.9053 MAE: 89.6611 Mean Error: - 89.6611 STD: 12.9911 [ 4 / 10 ] Loss: 2979.1340 MAE: 52.5410 Mean Error: - 52.5279 STD: 15.0930 [ 5 / 10 ] Loss: 599.7094 MAE: 22.2735 Mean Error: - 19.9979 STD: 14.2069 [ 6 / 10 ] Loss: 207.2831 MAE: 11.2394 Mean Error: - 4.8821 STD: 13.5528 [ 7 / 10 ] Loss: 189.8173 MAE: 9.8020 Mean Error: - 1.2357 STD: 13.7095 [ 8 / 10 ] Loss: 188.3376 MAE: 9.6512 Mean Error: - 0.6498 STD: 13.7075 [ 9 / 10 ] Loss: 186.8393 MAE: 9.6946 Mean Error: - 1.0850 STD: 13.6332 ?? |
以上這篇Pytorch 神經(jīng)網(wǎng)絡(luò)—自定義數(shù)據(jù)集上實(shí)現(xiàn)教程就是小編分享給大家的全部?jī)?nèi)容了,希望能給大家一個(gè)參考,也希望大家多多支持服務(wù)器之家。
原文鏈接:https://blog.csdn.net/qq_21905401/article/details/82627402