torchvision包 包含了目前流行的数据集,模型结构和常用的图片转换工具。
torchvision.datasets中包含了以下数据集
MNIST
COCO(用于图像标注和目标检测)(Captioning and Detection)
LSUN Classification
ImageFolder
Imagenet-12
CIFAR10 and CIFAR100
STL10
torchvision.models
torchvision.models模块的 子模块中包含以下模型结构。
AlexNet
VGG
ResNet
SqueezeNet
DenseNet You can construct a model with random weights by calling its constructor:
pytorch torchvision transform
对PIL.Image进行变换
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
|
from __future__ import print_function
import argparse #Python 命令行解析工具
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
class Net(nn.Module):
def __init__( self ):
super (Net, self ).__init__()
self .conv1 = nn.Conv2d( 1 , 10 , kernel_size = 5 )
self .conv2 = nn.Conv2d( 10 , 20 , kernel_size = 5 )
self .conv2_drop = nn.Dropout2d()
self .fc1 = nn.Linear( 320 , 50 )
self .fc2 = nn.Linear( 50 , 10 )
def forward( self , x):
x = F.relu(F.max_pool2d( self .conv1(x), 2 ))
x = F.relu(F.max_pool2d( self .conv2_drop( self .conv2(x)), 2 ))
x = x.view( - 1 , 320 )
x = F.relu( self .fc1(x))
x = F.dropout(x, training = self .training)
x = self .fc2(x)
return F.log_softmax(x, dim = 1 )
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate (train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval = = 0 :
print ( 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}' . format (
epoch, batch_idx * len (data), len (train_loader.dataset),
100. * batch_idx / len (train_loader), loss.item()))
def test(args, model, device, test_loader):
model. eval ()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss + = F.nll_loss(output, target, size_average = False ).item() # sum up batch loss
pred = output. max ( 1 , keepdim = True )[ 1 ] # get the index of the max log-probability
correct + = pred.eq(target.view_as(pred)). sum ().item()
test_loss / = len (test_loader.dataset)
print ( '\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n' . format (
test_loss, correct, len (test_loader.dataset),
100. * correct / len (test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description = 'PyTorch MNIST Example' )
parser.add_argument( '--batch-size' , type = int , default = 64 , metavar = 'N' ,
help = 'input batch size for training (default: 64)' )
parser.add_argument( '--test-batch-size' , type = int , default = 1000 , metavar = 'N' ,
help = 'input batch size for testing (default: 1000)' )
parser.add_argument( '--epochs' , type = int , default = 10 , metavar = 'N' ,
help = 'number of epochs to train (default: 10)' )
parser.add_argument( '--lr' , type = float , default = 0.01 , metavar = 'LR' ,
help = 'learning rate (default: 0.01)' )
parser.add_argument( '--momentum' , type = float , default = 0.5 , metavar = 'M' ,
help = 'SGD momentum (default: 0.5)' )
parser.add_argument( '--no-cuda' , action = 'store_true' , default = False ,
help = 'disables CUDA training' )
parser.add_argument( '--seed' , type = int , default = 1 , metavar = 'S' ,
help = 'random seed (default: 1)' )
parser.add_argument( '--log-interval' , type = int , default = 10 , metavar = 'N' ,
help = 'how many batches to wait before logging training status' )
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device( "cuda" if use_cuda else "cpu" )
kwargs = { 'num_workers' : 1 , 'pin_memory' : True } if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST( '../data' , train = True , download = True ,
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(( 0.1307 ,), ( 0.3081 ,))
])),
batch_size = args.batch_size, shuffle = True , * * kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST( '../data' , train = False , transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(( 0.1307 ,), ( 0.3081 ,))
])),
batch_size = args.test_batch_size, shuffle = True , * * kwargs)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr = args.lr, momentum = args.momentum)
for epoch in range ( 1 , args.epochs + 1 ):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if __name__ = = '__main__' :
main()
|
以上这篇pytorch实现mnist分类的示例讲解就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持服务器之家。
原文链接:https://blog.csdn.net/KyrieHe/article/details/80516737