import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision from torchvision import datasets, transforms
下载经典的MNIST数据集
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 训练集Dataloader train_loader = torch.utils.data.DataLoader( datasets.MNIST(root='.', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=64, shuffle=True, num_workers=0) # 测试集Dataloader test_loader = torch.utils.data.DataLoader( datasets.MNIST(root='.', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=64, shuffle=True, num_workers=0)
这里我们使用一个4层CNN(卷积神经网络),网络结构:Conv-Conv-FC-FC
class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): # Perform the usual forward pass x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2(x), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = self.fc2(x) return F.log_softmax(x, dim=1) model = Net().to(device)
现在我们使用SGD(随机梯度下降)算法来训练模型,以有监督的方式学习分类任务
optimizer = optim.SGD(model.parameters(), lr=0.01) def train(epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if batch_idx % 1 == 0: print('\rTrain Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item()), end='') def test(): with torch.no_grad(): model.eval() test_loss = 0 correct = 0 for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) test_loss += F.nll_loss(output, target).item() pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n' .format(test_loss, correct, len(test_loader.dataset), 100. * correct / len(test_loader.dataset)))
开始训练,每训练一个epoch测试一次模型,在20个epoch内,模型准确率可以达到98.7%
epochs = 20 for epoch in range(1, epochs + 1): train(epoch) test()
Train Epoch: 1 [29984/60000 (100%)] Loss: 0.130790 Test set: Average loss: 0.0033, Accuracy: 9370/10000 (94%) Train Epoch: 2 [29984/60000 (100%)] Loss: 0.212607 Test set: Average loss: 0.0020, Accuracy: 9594/10000 (96%) Train Epoch: 3 [29984/60000 (100%)] Loss: 0.054339 Test set: Average loss: 0.0016, Accuracy: 9673/10000 (97%) Train Epoch: 4 [29984/60000 (100%)] Loss: 0.085429 Test set: Average loss: 0.0012, Accuracy: 9766/10000 (98%) Train Epoch: 5 [29984/60000 (100%)] Loss: 0.084620 Test set: Average loss: 0.0010, Accuracy: 9800/10000 (98%) Train Epoch: 6 [29984/60000 (100%)] Loss: 0.053965 Test set: Average loss: 0.0009, Accuracy: 9826/10000 (98%) Train Epoch: 7 [29984/60000 (100%)] Loss: 0.098088 Test set: Average loss: 0.0008, Accuracy: 9826/10000 (98%) Train Epoch: 8 [29184/60000 (49%)] Loss: 0.008589