1.构建网络
nn.Moudle是pytorch官方指定的编写Net模块,在init函数中添加需要使用的层,在foeword中定义网络流向。
下面详细解释各层:
conv1层:输入channel = 1 ,输出chanael = 10,滤波器5*5
maxpooling = 2*2
conv2层:输入channel = 10 ,输出chanael = 20,滤波器5*5,
dropout
maxpooling = 2*2
fc1层:输入320 个神经单元,输出50个神经单元
fc1层:输入50个神经单元 ,输出10个神经单元
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
class Net(nn.Module):
def __init__( self ):
super (Net, self ).__init__()
self .conv1 = nn.Conv2d( 1 , 10 , kernel_size = 5 )
self .conv2 = nn.Conv2d( 10 , 20 , kernel_size = 5 )
self .conv2_drop = nn.Dropout2d()
self .fc1 = nn.Linear( 320 , 50 )
self .fc2 = nn.Linear( 50 , 10 )
def forward( self , x): #x.size() = 28*28*1
x = F.relu(F.max_pool2d( self .conv1(x), 2 ))
x = F.relu(F.max_pool2d( self .conv2_drop( self .conv2(x)), 2 )) #x.size() =12*12*10
x = x.view( - 1 , 320 ) #x.size() =1*320
x = F.relu( self .fc1(x))
x = F.dropout(x, training = self .training)
x = self .fc2(x)
return F.log_softmax(x, dim = 1 )
|
2.编写训练代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
model = Net() #调用写好的网络
if args.cuda: #如果有GPU使用CPU
model.cuda()
optimizer = optim.SGD(model.parameters(), lr = args.lr, momentum = args.momentum) #设置SGD随机梯度下降算法
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate (train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad() #梯度初始化为O
output = model(data)
loss = F.nll_loss(output, target) #简历loss function
loss.backward() #反向传播,计算梯度
optimizer.step() #更新权重
if batch_idx % args.log_interval = = 0 : #输出信息
print ( 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}' . format (
epoch, batch_idx * len (data), len (train_loader.dataset),
100. * batch_idx / len (train_loader), loss.data[ 0 ]))
|
3.编写测试代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
|
def test():
model. eval ()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile = True ), Variable(target)
output = model(data)
test_loss + = F.nll_loss(output, target, size_average = False ).data[ 0 ] # sum up batch loss
pred = output.data. max ( 1 , keepdim = True )[ 1 ] # get the index of the max log-probability
correct + = pred.eq(target.data.view_as(pred)). long ().cpu(). sum ()
test_loss / = len (test_loader.dataset)
print ( '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n' . format (
test_loss, correct, len (test_loader.dataset),
100. * correct / len (test_loader.dataset)))
|
4.指导程序train和test
1
2
3
|
for epoch in range ( 1 , args.epochs + 1 ):
train(epoch) #训练N个epoch
test() #检验在测试集上的表现
|
5.完整代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# Training settings
parser = argparse.ArgumentParser(description = 'PyTorch MNIST Example' )
parser.add_argument( '--batch-size' , type = int , default = 64 , metavar = 'N' ,
help = 'input batch size for training (default: 64)' )
parser.add_argument( '--test-batch-size' , type = int , default = 1000 , metavar = 'N' ,
help = 'input batch size for testing (default: 1000)' )
parser.add_argument( '--epochs' , type = int , default = 10 , metavar = 'N' ,
help = 'number of epochs to train (default: 10)' )
parser.add_argument( '--lr' , type = float , default = 0.01 , metavar = 'LR' ,
help = 'learning rate (default: 0.01)' )
parser.add_argument( '--momentum' , type = float , default = 0.5 , metavar = 'M' ,
help = 'SGD momentum (default: 0.5)' )
parser.add_argument( '--no-cuda' , action = 'store_true' , default = False ,
help = 'disables CUDA training' )
parser.add_argument( '--seed' , type = int , default = 1 , metavar = 'S' ,
help = 'random seed (default: 1)' )
parser.add_argument( '--log-interval' , type = int , default = 10 , metavar = 'N' ,
help = 'how many batches to wait before logging training status' )
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = { 'num_workers' : 1 , 'pin_memory' : True } if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST( '../data' , train = True , download = True ,
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(( 0.1307 ,), ( 0.3081 ,))
])),
batch_size = args.batch_size, shuffle = True , * * kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST( '../data' , train = False , transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(( 0.1307 ,), ( 0.3081 ,))
])),
batch_size = args.test_batch_size, shuffle = True , * * kwargs)
class Net(nn.Module):
def __init__( self ):
super (Net, self ).__init__()
self .conv1 = nn.Conv2d( 1 , 10 , kernel_size = 5 )
self .conv2 = nn.Conv2d( 10 , 20 , kernel_size = 5 )
self .conv2_drop = nn.Dropout2d()
self .fc1 = nn.Linear( 320 , 50 )
self .fc2 = nn.Linear( 50 , 10 )
def forward( self , x):
print (x.size())
x = F.relu(F.max_pool2d( self .conv1(x), 2 ))
print (x.size())
x = F.relu(F.max_pool2d( self .conv2_drop( self .conv2(x)), 2 ))
print (x.size())
x = x.view( - 1 , 320 )
x = F.relu( self .fc1(x))
x = F.dropout(x, training = self .training)
x = self .fc2(x)
return F.log_softmax(x, dim = 1 )
model = Net() #调用写好的网络
if args.cuda: #如果有GPU使用CPU
model.cuda()
optimizer = optim.SGD(model.parameters(), lr = args.lr, momentum = args.momentum) #设置SGD随机梯度下降算法
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate (train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad() #梯度初始化为O
output = model(data)
loss = F.nll_loss(output, target) #简历loss function
loss.backward() #反向传播,计算梯度
optimizer.step() #更新权重
if batch_idx % args.log_interval = = 0 : #输出信息
print ( 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}' . format (
epoch, batch_idx * len (data), len (train_loader.dataset),
100. * batch_idx / len (train_loader), loss.data[ 0 ]))
def test():
model. eval ()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile = True ), Variable(target)
output = model(data)
test_loss + = F.nll_loss(output, target, size_average = False ).data[ 0 ] # sum up batch loss
pred = output.data. max ( 1 , keepdim = True )[ 1 ] # get the index of the max log-probability
correct + = pred.eq(target.data.view_as(pred)). long ().cpu(). sum ()
test_loss / = len (test_loader.dataset)
print ( '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n' . format (
test_loss, correct, len (test_loader.dataset),
100. * correct / len (test_loader.dataset)))
for epoch in range ( 1 , args.epochs + 1 ):
train(epoch)
test()
|
以上就是pytorch教程实现mnist手写数字识别代码示例的详细内容,更多关于pytorch实现mnist手写数字识别的资料请关注服务器之家其它相关文章!
原文链接:https://blog.csdn.net/xz1308579340/article/details/79520969