總結(jié)深度學(xué)習(xí)PyTorch神經(jīng)網(wǎng)絡(luò)箱使用

極市導(dǎo)讀
?本文介紹了Pytorch神經(jīng)網(wǎng)絡(luò)箱的使用,包括核心組件、神經(jīng)網(wǎng)絡(luò)實例、構(gòu)建方法、優(yōu)化器比較等內(nèi)容,非常全面。>>加入極市CV技術(shù)交流群,走在計算機視覺的最前沿
1?神經(jīng)網(wǎng)絡(luò)核心組件
層:神經(jīng)網(wǎng)絡(luò)的基本結(jié)構(gòu),將輸入張量轉(zhuǎn)換為輸出張量
模型:層構(gòu)成的網(wǎng)絡(luò)
損失函數(shù):參數(shù)學(xué)習(xí)的目標(biāo)函數(shù),通過最小化損失函數(shù)來學(xué)習(xí)各種參數(shù)
優(yōu)化器:如何是損失函數(shù)最小
2?神經(jīng)網(wǎng)絡(luò)實例
如果初學(xué)者,建議直接看3,避免運行結(jié)果有誤。

2.1 背景說明

2.2 準備數(shù)據(jù)
##(1)導(dǎo)入必要的模塊
import numpy as np
import torch
# 導(dǎo)入內(nèi)置的 mnist數(shù)據(jù)
from torchvision.datasets import mnist
# 導(dǎo)入預(yù)處理模塊
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
# 導(dǎo)入nn及優(yōu)化器
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
## (2) 定義一些超參數(shù)
train_batch_size = 64
test_batch_size = 128
learning_rate = 0.01
num_epoches = 20
lr = 0.01
momentum = 0.5
## (3) 下載數(shù)據(jù)并對數(shù)據(jù)進行預(yù)處理
# 定義預(yù)處理函數(shù),這些預(yù)處理依次放在Compose函數(shù)中
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])])
# 下載數(shù)據(jù),并對數(shù)據(jù)進行預(yù)處理
train_dataset = mnist.MNIST('./data', train=True, transform=transform, download=True)
test_dataset = mnist.MNIST('./data', train=False, transform=transform)
# dataloader是一個可迭代的對象,可以使用迭代器一樣使用
train_loader = DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=test_batch_size, shuffle=False)

2.3 可視化數(shù)據(jù)源
import matplotlib.pyplot as plt
%matplotlib inline
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])

2.4 構(gòu)建模型
## (1)構(gòu)建網(wǎng)絡(luò)
class Net(nn.Module):
"""
使用sequential構(gòu)建網(wǎng)絡(luò),Sequential()函數(shù)功能是將網(wǎng)絡(luò)的層組合一起
"""
def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):
super(Net, self).__init__()
self.layer1 = nn.Sequential(nn.Linear(in_dim, n_hidden_1),nn.BatchNorm1d(n_hidden_1))
self.layer2 = nn.Sequential(nn.Linear(n_hidden_1, n_hidden_2),nn.BatchNorm1d(n_hidden_2))
self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = self.layer3(x)
return x
## (2)實例化網(wǎng)絡(luò)
# 檢測是否有GPU,有就用,沒有就用CPU
device = torch.device("cuda:0" if torch.cuda.if_available else "cpu")
# 實例化網(wǎng)絡(luò)
model = Net(28*28, 300, 100, 10)
model.to(device)
# 定義損失函數(shù)和優(yōu)化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum)
2.5 訓(xùn)練模型
## 訓(xùn)練模型
# 開始訓(xùn)練
losses = []
acces = []
eval_losses = []
eval_acces = []
print("開始循環(huán),請耐心等待.....")
for epoch in range(num_epoches):
train_loss = 0
train_acc = 0
model.train()
# 動態(tài)修改參數(shù)學(xué)習(xí)率
if epoch%5==0:
optimizer.param_groups[0]['lr']*=0.1
for img, label in train_loader:
img=img.to(device)
label = label.to(device)
img = img.view(img.size(0), -1)
# 向前傳播
out = model(img)
loss = criterion(out, label)
# 反向傳播
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 記錄誤差
train_loss += loss.item()
# 計算分類的準確率
_, pred = out.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / img.shape[0]
train_acc +=acc
print("第一個循環(huán)結(jié)束,繼續(xù)耐心等待....")
losses.append(train_loss / len(train_loader))
acces.append(train_acc / len(train_loader))
# 在測試集上檢驗效果
eval_loss = 0
eval_acc = 0
# 將模型改為預(yù)測模式
model.eval()
for img, label in test_loader:
img=img.to(device)
label=label.to(device)
img=img.view(img.size(0),-1)
out = model(img)
loss = criterion(out, label)
# 記錄誤差
eval_loss += loss.item()
# 記錄準確率
_, pred = out.max(1)
num_correct = (pred == label).sum().item()
acc = num_correct / img.shape[0]
eval_acc +=acc
print("第二個循環(huán)結(jié)束,準備結(jié)束")
eval_losses.append(eval_loss / len(test_loader))
eval_acces.append(eval_acc / len(test_loader))
print('epoch: {}, Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'.format(epoch, train_loss / len(train_loader), train_acc / len(train_loader), eval_loss / len(test_loader), eval_acc / len(test_loader)))
## 可視化訓(xùn)練結(jié)果
plt.title('trainloss')
plt.plot(np.arange(len(losses)), losses)
plt.legend(['Train Loss'], loc='upper right')
print("開始循環(huán),請耐心等待.....")
3?全連接神經(jīng)網(wǎng)絡(luò)進行MNIST識別
3.1 數(shù)據(jù)
import numpy as np
import torch
from torchvision.datasets import mnist
from torch import nn
from torch.autograd import Variable
def data_tf(x):
x = np.array(x, dtype="float32")/255
x = (x-0.5)/0.5
x = x.reshape((-1)) # 這里是為了變?yōu)?行,然后m列
x = torch.from_numpy(x)
return x
# 下載數(shù)據(jù)集,有的話就不下載了
train_set = mnist.MNIST("./data",train=True, transform=data_tf, download=False)
test_set = mnist.MNIST("./data",train=False, transform=data_tf, download=False)
a, a_label = train_set[0]
print(a.shape)
print(a_label)
3.2 可視化數(shù)據(jù)
import matplotlib.pyplot as plt
for i in range(1, 37):
plt.subplot(6,6,i)
plt.xticks([]) # 不顯示坐標(biāo)系
plt.yticks([])
plt.imshow(train_set.data[i].numpy(), cmap="gray")
plt.title("%i" % train_set.targets[i])
plt.subplots_adjust(wspace = 0 , hspace = 1) # 調(diào)整
plt.show()

from torch.utils.data import DataLoader
train_data = DataLoader(train_set, batch_size=64, shuffle= True)
test_data = DataLoader(test_set, batch_size=128, shuffle=False)
a, a_label = next(iter(train_data))
print(a.shape)
print(a_label.shape)
3.3 定義神經(jīng)網(wǎng)絡(luò)
net = nn.Sequential( nn.Linear(784, 400),
nn.ReLU(),
nn.Linear(400, 200),
nn.ReLU(),
nn.Linear(200, 100),
nn.ReLU(),
nn.Linear(100,10),
nn.ReLU()
)
if torch.cuda.is_available():
net = net.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), 1e-1)
3.4 訓(xùn)練
losses = []
acces = []
eval_losses = []
eval_acces = []
# 一共訓(xùn)練20次
for e in range(20):
train_loss = 0
train_acc = 0
net.train()
for im, label in train_data:
if torch.cuda.is_available():
im = Variable(im).cuda()
label = Variable(label).cuda()
else:
im = Variable(im)
label =Variable(label)
# 前向傳播
out = net(im)
loss = criterion(out, label)
# 反向傳播
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 誤差
train_loss += loss.item()
#計算分類的準確率
# max函數(shù)參數(shù)1表示按行取最大值,第一個返回值是值,第二個返回值是下標(biāo)
# pred是一個固定1*64的向量
_,pred = out.max(1)
num_correct = (pred==label).sum().item()
acc = num_correct/im.shape[0]
train_acc += acc
# 此時一輪訓(xùn)練以及完了
losses.append(train_loss/len(train_data))
acces.append(train_acc/len(train_data))
# 在測試集上檢驗效果
eval_loss = 0
eval_acc = 0
net.eval()
for im, label in test_data:
if torch.cuda.is_available():
im = Variable(im).cuda()
label = Variable(label).cuda()
else:
im = Variable(im)
label =Variable(label)
# 前向傳播
out = net(im)
# 計算誤差
loss = criterion(out, label)
eval_loss += loss.item()
# 計算準確率
_,pred = out.max(1)
num_correct = (pred==label).sum().item()
acc = num_correct/im.shape[0]
eval_acc += acc
eval_losses.append(eval_loss/len(test_data))
eval_acces.append(eval_acc/len(test_data))
print('epoch: {}, Train Loss: {:.6f}, Train Acc: {:.6f}, Eval Loss: {:.6f}, Eval Acc: {:.6f}'.format(e, train_loss / len(train_data), train_acc / len(train_data), eval_loss / len(test_data), eval_acc / len(test_data)))
3.5 展示
%matplotlib inline
plt.subplot(2, 2, 1)
plt.title("train loss")
plt.plot(np.arange(len(losses)), losses)
plt.grid()
plt.subplot(2, 2, 2)
plt.title("train acc")
plt.plot(np.arange(len(acces)), acces)
plt.grid()
plt.subplot(2, 2, 3)
plt.title("test loss")
plt.plot(np.arange(len(eval_losses)), eval_losses)
plt.grid()
plt.subplot(2, 2, 4)
plt.title("test acc")
plt.plot(np.arange(len(eval_acces)), eval_acces)
plt.grid()
plt.subplots_adjust(wspace =0.5, hspace =0.5)

for i in range(1, 5):
im = test_set.data[i]
label = test_set.targets[i]
plt.subplot(2, 2, i)
plt.imshow(im.numpy(), cmap="gray")
plt.xticks([])
plt.yticks([])
im = data_tf(im)
im = Variable(im).cuda()
out = net(im)
_, pred = out.max(0)
plt.title("outcome=%i" % pred.item())
plt.show()

4 如何構(gòu)建神經(jīng)網(wǎng)絡(luò)?
搭建神經(jīng)網(wǎng)絡(luò)主要包含:選擇網(wǎng)絡(luò)層,構(gòu)建網(wǎng)絡(luò),選擇損失和優(yōu)化器。
4.1 構(gòu)建網(wǎng)絡(luò)層
可以在Sequential基礎(chǔ)上通過add_module()添加每一層,并且為每一層增加一個單獨的名字
通過字典的形式添加每一層,設(shè)置單獨的層名稱
class Net(torch.nn.Module):
def __init__(self):
super(Net4, self).__init__()
self.conv = torch.nn.Sequential(
OrderedDict(
[
("conv1", torch.nn.Conv2d(3, 32, 3, 1, 1)),
("relu1", torch.nn.ReLU()),
("pool", torch.nn.MaxPool2d(2))
]
))
self.dense = torch.nn.Sequential(
orderedDict([
("dense1", torch.nn.Linear(32*3*3,128)),
("relu2", torch.nn.ReLU()),
("dense2", torch.nn.Linear(128,10))
])
)
4.2 前向、反向傳播
4.3 訓(xùn)練模型
5 神經(jīng)網(wǎng)絡(luò)工具箱nn
5.2 nn.functional
6 優(yōu)化器
import torch.optim as optim
optimizer = optimSGD(model.parameters(), lr=lr, momentum=momentum)
把輸入數(shù)據(jù)傳入神經(jīng)網(wǎng)絡(luò)Net實例化對象model中,自行執(zhí)行forward函數(shù),得到out輸出值,然后用out與標(biāo)記lable計算損失值Loss
out = model(img)
loss = criterion(out, label)
缺省的情況下梯度是累加的,在梯度反向傳播前,需要清零梯度
opyimizer.zero_grad()
loss.backward()
基于當(dāng)前梯度更新參數(shù)
optimizer.step()
7?動態(tài)修改學(xué)習(xí)率參數(shù)
長度1的list optimizer.param_groups[0]長度為6的字典,包括權(quán)重、lr、momentum等
for epoch in range(num_epoches):
## 動態(tài)修改參數(shù)學(xué)習(xí)率
if epoch%5==0
optimizer.param_groups[0]['lr']*=0.1
print(optimizer.param_groups[0]['lr'])
for img, label in train_loader:
8?優(yōu)化器的比較
## (1)導(dǎo)入需要的模塊
import torch
import torch.utils.data as Data
import torch.nn.functional as F
import matplotlib.pyplot as plt
%matplotlib inline
#超參數(shù)
LR = 0.01
BATCH_SIZE =32
EPOCH =12
## (2)生成數(shù)據(jù)
# 生成訓(xùn)練數(shù)據(jù)
# torch.unsqueeze()作用是將一維變二維,torch只能處理二維數(shù)據(jù)
x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
# 0.1 * torch.normal(x.size()) 增加噪聲
y = x.pow(2) + 0.1 * torch.normal(torch.zeros(*x.size()))
torch_dataset = Data.TensorDataset(x,y)
# 一個代批量的生成器
loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True)
## (3)構(gòu)建神經(jīng)網(wǎng)絡(luò)
class Net(torch.nn.Module):
# 初始化
def __init__(self):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(1, 20)
self.predict = torch.nn.Linear(20, 1)
# 向前傳遞
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)
return x
## (4)使用多種優(yōu)化器
net_SGD = Net()
net_Momentum = Net()
net_RMSProp = Net()
net_Adam = Net()
nets = [net_SGD, net_Momentum, net_RMSProp, net_Adam]
opt_SGD =torch.optim.SGD(net_SGD.parameters(), lr=LR)
opt_Momentum =torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum = 0.9)
opt_RMSProp =torch.optim.RMSprop(net_RMSProp.parameters(), lr=LR, alpha = 0.9)
opt_Adam =torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
optimizers = [opt_SGD, opt_Momentum, opt_RMSProp, opt_Adam]
## (5)訓(xùn)練模型
loss_func = torch.nn.MSELoss()
loss_his = [[], [], [], []]
for epoch in range(EPOCH):
for step, (batch_x, batch_y) in enumerate(loader):
for net, opt, l_his in zip(nets, optimizers, loss_his):
output = net(batch_x)
loss = loss_func(output, batch_y)
opt.zero_grad()
loss.backward()
opt.step()
l_his.append(loss.data.numpy())
labels = ['SGD', 'Momentum', 'RMSProp', 'Adam']
## (6)可視化結(jié)果
for i, l_his in enumerate(loss_his):
plt.plot(l_his, label=labels[i])
plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.ylim((0, 0.2))
plt.show()

推薦閱讀

評論
圖片
表情
