用pytorch搭建一个DNN网络,主要目的是熟悉pytorch的使用
"""
test Function
""" import torch
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms class simpleNet(nn.Module):
''' define the 3 layers Network'''
def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
super(simpleNet, self).__init__()
self.layer1 = nn.Linear(in_dim, n_hidden_1)
self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)
self.layer3 = nn.Linear(n_hidden_2, out_dim) def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x class Activation_Net(nn.Module):
def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
super(Activation_Net, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(in_dim, n_hidden_1), nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.Linear(n_hidden_1, n_hidden_2), nn.ReLU(True)
)
self.layer3 = nn.Sequential(
nn.Linear(n_hidden_2, out_dim)
) def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x class Batch_Net(nn.Module):
def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
super(Batch_Net, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(in_dim, n_hidden_1), nn.BatchNorm1d(n_hidden_1) ,nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.Linear(n_hidden_1,n_hidden_2), nn.BatchNorm1d(n_hidden_2), nn.ReLU(True)
)
self.layer3 = nn.Sequential(
nn.Linear(n_hidden_2, out_dim)
) def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x batch_size = 64
learning_rate = 1e-2
num_epochs = 20 data_tf = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
) train_dataset = datasets.MNIST(root='./data', train=True, transform=data_tf, download=True)
test_dataset = datasets.MNIST(root='./data', train=False, transform=data_tf)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) model = Batch_Net(28*28, 300, 100, 10)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate) # Training
epoch = 0
for data in train_loader:
img, label = data
img = img.view(img.size(0), -1)
img = Variable(img)
label = Variable(label)
out = model(img)
loss = criterion(out, label)
print_loss = loss.data.item() optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch += 1
if epoch % 50 == 0:
print('epoch:{}, loss:{:.4f}'.format(epoch, loss.data.item())) # Evalue
model.eval() # turn the model to test pattern, do some as dropout, batchNormalization
eval_loss = 0
eval_acc = 0
for data in test_loader:
img, label = data
img = img.view(img.size(0), -1)
img = Variable(img) # 前向传播不需要保留缓存,释放掉内存,节约内存空间
label = Variable(label)
out = model(img)
loss = criterion(out, label) eval_loss += loss.data * label.size(0)
_, pred = torch.max(out, 1) # 返回每一行中最大值和对应的索引
s = (pred == label)
num_correct = (pred == label).sum()
eval_acc += num_correct.data.item()
print('Test Loss:{:6f}, Acc:{:.6f}'.format(eval_loss/len(test_dataset), eval_acc/len(test_dataset)))