常言道“熟能生巧”,通过大量练习不同领域的题目来为最终的大考做准备。如果能将这种技巧应用于分类、回归或聚类问题,那会怎样呢?迁移学习就是这样一种技术,它允许利用在标准数据集(如ImageNet)上训练得到的模型权重来提高特定任务的效率。
在深入了解迁移学习如何工作之前,先来看看进行迁移学习后能获得的好处。迁移学习过程中的学习是快速的——普通的卷积神经网络可能需要几天甚至几周来训练,但通过迁移学习可以缩短这个过程。迁移学习模型通常比定制模型的准确率高出20%,并且需要的训练数据更少——由于已经在大型数据集上训练过,模型已经能够检测到特定的特征,因此需要更少的训练数据来进一步改进模型。
from torchvision import models, transforms
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
import time
import copy
import numpy as np
import matplotlib.pyplot as plt
# 预处理
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.1, hue=0.1),
transforms.RandomAffine(degrees=40, translate=None, scale=(1, 2), shear=15, resample=False, fillcolor=0),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
# 加载数据集
from torchvision.datasets import ImageFolder
train_dataset = ImageFolder('path_to_train_dataset', transform=transform)
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=8, shuffle=True)
# 可视化数据集
def imshow(inp, title=None):
""“Imshow for Tensor.”""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
if title is not None:
plt.title(title)
plt.pause(0.001) # pause a bit so that plots are updated
inputs, classes = next(iter(trainloader))
out = torchvision.utils.make_grid(inputs)
imshow(out, title=[class_names[x] for x in classes])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_ft = models.vgg16(pretrained=True)
# 划分数据集
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2)
criterion = nn.CrossEntropyLoss()
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
for inputs, labels in trainloader:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes
epoch_acc = running_corrects.double() / dataset_sizes
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
model.load_state_dict(best_model_wts)
return model
model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25)