- 手動實現(xiàn)前饋神經(jīng)網(wǎng)絡(luò)解決回歸、二分類、多分類任務(wù),分析實驗結(jié)果并繪制訓(xùn)練集和測試集的loss曲線;
- 利用torch.nn實現(xiàn)前饋神經(jīng)網(wǎng)絡(luò)解決上述回歸、二分類、多分類任務(wù),分析實驗結(jié)果并繪制訓(xùn)練集和測試集的loss曲線;
- 在多分類實驗的基礎(chǔ)上使用至少三種不同的激活函數(shù),對比使用不同激活函數(shù)的實驗結(jié)果;
- 對多分類任務(wù)中的模型,評估隱藏層層數(shù)和隱藏單元個數(shù)對實驗結(jié)果的影響使用不同的隱藏層層數(shù)和隱藏單元個數(shù),進行對比實驗并分析實驗結(jié)果;
- 在多分類任務(wù)實驗中分別手動實現(xiàn)和用torch.nn實現(xiàn)dropout,探究不同丟棄率對實驗結(jié)果的影響(可用loss曲線進行展示);
- 在多分類任務(wù)實驗中分別手動實現(xiàn)和用torch.nn實現(xiàn)L2正則化,探究懲罰項的權(quán)重對實驗結(jié)果的影響(可用loss曲線進行展示);
- 對回歸、二分類、多分類任務(wù)分別選擇上述實驗中效果最好的模型,采用10折交叉驗證評估實驗結(jié)果,要求除了最終結(jié)果外還需以表格的形式展示每折的實驗結(jié)果;
本次實驗所使用的是在Pycharm 環(huán)境下安裝的Python 3.9.7版本以及Pytorch 1.10。
(1)回歸任務(wù)的數(shù)據(jù)集
回歸任務(wù)為是單個數(shù)據(jù)集,數(shù)據(jù)集的大小為10000且訓(xùn)練集大小為7000,測試集大小為3000。數(shù)據(jù)集的樣本特征維度p為500,且服從如下的高維線性函數(shù)。
import torch
import numpy as np
num_inputs = 500
num_examples = 10000
x_features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
y_labels = torch.mm(x_features,torch.ones(500,1)*0.0056) + 0.028
y_labels += torch.tensor(np.random.normal(0, 0.01, size=y_labels.size()), dtype=torch.float)
#訓(xùn)練集
trainfeatures =x_features[:7000]
trainlabels = y_labels[:7000]
#測試集
testfeatures =x_features[7000:]
testlabels = y_labels[7000:]
(2)二分類任務(wù)的數(shù)據(jù)集
二分類任務(wù)的數(shù)據(jù)集由兩個數(shù)據(jù)集構(gòu)成,兩個數(shù)據(jù)集的大小均為10000且訓(xùn)練集大小為7000,測試集大小為3000。兩個數(shù)據(jù)集的樣本特征x的維度均為200,且分別服從均值互為相反數(shù)且方差相同的正態(tài)分布。兩個數(shù)據(jù)集的樣本標(biāo)簽分別為0和1。通過鍵入下面代碼,構(gòu)造該數(shù)據(jù)集。
import torch
from matplotlib import pyplot as plt
num_inputs = 200
#1類
x1 = torch.normal(1,1,(10000, num_inputs))
y1 = torch.ones(10000,1)
x1_train = x1[:7000]
x1_test = x1[7000:]
#0類
x2 = torch.normal(-1,1,(10000, num_inputs))
y2 = torch.zeros(10000,1)
x2_train = x2[:7000]
x2_test = x2[7000:]
# 合并訓(xùn)練集
train_features = torch.cat((x1_train,x2_train), 0).type(torch.FloatTensor)
train_labels = torch.cat((y1[:7000], y2[:7000]), 0).type(torch.FloatTensor)
# 合并測試集
test_features = torch.cat((x1_test,x2_test), 0).type(torch.FloatTensor)
test_labels = torch.cat((y1[7000:], y2[7000:]), 0).type(torch.FloatTensor)
plt.scatter(train_features.data.numpy()[:, 0], train_features.data.numpy()[:, 1], c=train_labels.data.numpy(), s=100, lw=0, cmap='RdYlGn')
plt.show()
(3)多分類數(shù)據(jù)集
多分類數(shù)據(jù)集為MNIST手寫體數(shù)據(jù)集,該數(shù)據(jù)集包含60000個用于訓(xùn)練的圖像樣本和10000個用于測試的圖像樣本。圖像是固定大?。?8×28像素),其值為0到1。為每個圖像都被平展并轉(zhuǎn)換為784個特征的一維numpy數(shù)組。通過鍵入下面代碼,下載MNIST手寫體數(shù)據(jù)集。
import torch #導(dǎo)入pytorch框架
import torchvision
import torchvision.transforms as transforms
## 生成數(shù)據(jù)
# 獲取FashionMNIST數(shù)據(jù)集,將所有數(shù)據(jù)轉(zhuǎn)化為Tensor
mnist_train = torchvision.datasets.MNIST(root='~/Datasets/MNIST',
train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.MNIST(root='~/Datasets/MNIST',
train=False, transform=transforms.ToTensor())
# 通過DataLoader 讀取小批量數(shù)據(jù)樣本
batch_size = 128
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True,num_workers=0)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False,num_workers=0)
1、手動實現(xiàn)前饋神經(jīng)網(wǎng)絡(luò)解決回歸、二分類、多分類任務(wù),分析實驗結(jié)果并繪制訓(xùn)練集和測試集的loss曲線
(1)回歸
## 導(dǎo)入實驗所需的包
import numpy as np
import torch
from torch.utils import data
from matplotlib import pyplot as plt
## 第一部分:構(gòu)建回歸任務(wù)數(shù)據(jù)集
n_train, n_test, num_inputs = 7000,3000,500
true_w, true_b = torch.ones(num_inputs, 1), 0.028 #添加噪聲項和b
features=torch.randn((n_train+ n_test, num_inputs))
labels = torch.matmul(features, true_w) + true_b
labels+=torch.tensor(np.random.normal(0,0.01, size=labels.size()))
train_features,test_features = features[:n_train, :], features[n_train:, :]
train_labels, test_labels = labels[:n_train], labels[n_train:, :]
print(train_features.shape) # 看看維數(shù)對不對
print(test_features.shape) # 看看維數(shù)對不對
## 第二部分:定義數(shù)據(jù)迭代器
dataset_train = data.TensorDataset(train_features, train_labels)
dataset_test = data.TensorDataset(test_features , test_labels )
batch_size = 50
train_iter = data.DataLoader(dataset=dataset_train, batch_size=batch_size, shuffle=True, num_workers=0)
test_iter = data.DataLoader(dataset=dataset_test , batch_size=batch_size, shuffle=True, num_workers=0)
## 第三部分:定義模型及其前向傳播過程
class Net():
def __init__(self):
# 定義并初始化模型參數(shù)
num_inputs, num_outputs, num_hiddens = 500,1,256
W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens, num_inputs)), dtype=torch.float32)
b1 = torch.zeros (1, dtype=torch.float32)
W2 = torch.tensor(np.random.normal(0, 0.01, (num_outputs, num_hiddens)), dtype=torch.float32)
b2 = torch.zeros (1, dtype=torch.float32)
# 上述四個變量求梯度
self.params = [W1, b1, W2, b2]
for param in self.params:
param.requires_grad_(requires_grad = True)
# 定義模型的結(jié)構(gòu)
self.inputs_layer = lambda x: x.view(x.shape[0],-1)
self.hiddens_layer = lambda x: self.my_ReLU(torch.matmul(x, W1.t())+ b1)
self.outputs_layer = lambda x: torch.matmul(x, W2.t())+ b2
@staticmethod
def my_ReLU(x):
return torch.max(input=x,other=torch.tensor(0.0))
def forward(self, x):
flatten_input = self.inputs_layer(x)
hidden_output = self.hiddens_layer(flatten_input)
final_output = self.outputs_layer(hidden_output)
return final_output
## 第四部分:定義損失函數(shù)及優(yōu)化算法
loss_func = torch.nn.MSELoss()
def SGD (params,lr):
for param in params:
param.data-=lr*param.grad
## 第五部分:定義測試函數(shù)
def test(data_iter,net,loss_func) :
test_loss_sum,c = 0.0,0
for X, y in data_iter:
result = net.forward(X)
test_loss_sum+= loss_func(result, y).item()
c +=1
return test_loss_sum/c
## 第六部分:定義模型訓(xùn)練函數(shù)
def train(net, train_iter,loss_func, num_epochs,batch_size,lr=None,optimizer=None):
train_loss_list = []
test_loss_list = []
for epoch in range(num_epochs):
train_l_sum,train_acc_sum,n,c = 0.0,0.0,0,0
for X,y in train_iter: # x和y分別是小批量樣本的特征和標(biāo)簽
y_hat = net.forward(X)
l=loss_func(y_hat,y)
l.backward()
optimizer(net.params, lr)
for param in net.params:
param.grad.data.zero_()
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
c += 1
test_loss = test(test_iter,net,loss_func)
train_loss_list.append(train_l_sum/c)
test_loss_list.append(test_loss)
# draw_loss(train_l_sum/c, test_loss, None)
print('epoch %d, train_loss %.4f,test_loss %.4f'%(epoch+1, train_l_sum/c,test_loss))
return train_loss_list,test_loss_list
## 第七部分:結(jié)果可視化
def draw_loss(train_loss, test_loss,valid_loss=None):
x = np.linspace(0,len(train_loss),len(train_loss))\
if valid_loss is None else np.linspace(0,len(train_loss),len(test_loss),len(valid_loss))
plt.plot(x,train_loss,label="Train_Loss",linewidth=1.5)
plt.plot(x,test_loss,label="Test_Loss",linewidth=1.5)
if valid_loss is not None:
plt.plot(x,test_loss,label="Valid_loss",linewidth=1.5)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
## 第八部分:訓(xùn)練模型
net=Net()
num_epochs=100
lr = 0.003
optimizer=SGD
train_loss,test_loss = train(net, train_iter,loss_func,num_epochs,batch_size,lr,optimizer)
draw_loss(train_loss,test_loss)
(2)二分類
## 導(dǎo)入實驗所需的包
import numpy as np
import torch
from torch.utils import data
from matplotlib import pyplot as plt
## 第一部分:構(gòu)建二分類任務(wù)數(shù)據(jù)集
n_train, n_test, num_inputs = 7000,3000,200
# 數(shù)據(jù)集1:樣本標(biāo)簽為0
x0 = torch.normal(1,1,(n_train+ n_test, num_inputs))
y0 = torch.zeros(n_train+ n_test,1)
# 數(shù)據(jù)集2:樣本標(biāo)簽為1
x1 = torch.normal(-1,1,(n_train+ n_test, num_inputs))
y1 = torch.ones(n_train+ n_test,1)
# torch.cat合并數(shù)據(jù)集
train_features = torch.cat((x0[:n_train], x1[:n_train]), 0).type(torch.FloatTensor)
train_labels = torch.cat((y0[:n_train], y1[:n_train]), 0).type(torch.FloatTensor)
test_features = torch.cat((x0[n_train:], x1[n_train:]), 0).type(torch.FloatTensor)
test_labels = torch.cat((y0[n_train:], y1[n_train:]), 0).type(torch.FloatTensor)
print(train_features.shape,train_labels.shape,test_features.shape,test_labels.shape)
## 第二部分:定義數(shù)據(jù)迭代器
dataset_train = data.TensorDataset(train_features, train_labels)
dataset_test = data.TensorDataset(test_features , test_labels )
batch_size = 50
train_iter = data.DataLoader(dataset=dataset_train, batch_size=batch_size, shuffle=True, num_workers=0)
test_iter = data.DataLoader(dataset=dataset_test , batch_size=batch_size, shuffle=True, num_workers=0)
## 第三部分:定義模型及其前向傳播過程
class Net():
def __init__(self):
# 定義并初始化模型參數(shù)
num_inputs, num_outputs, num_hiddens = 200,1,256
W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens, num_inputs)), dtype=torch.float32)
b1 = torch.zeros (1, dtype=torch.float32)
W2 = torch.tensor(np.random.normal(0, 0.01, (num_outputs, num_hiddens)), dtype=torch.float32)
b2 = torch.zeros (1, dtype=torch.float32)
# 上述四個變量求梯度
self.params = [W1, b1, W2, b2]
for param in self.params:
param.requires_grad_(requires_grad = True)
# 定義模型的結(jié)構(gòu)
self.inputs_layer = lambda x: x.view(x.shape[0],-1)
self.hiddens_layer = lambda x: self.my_ReLU(torch.matmul(x, W1.t())+ b1)
self.outputs_layer = lambda x: torch.matmul(x, W2.t())+ b2
@staticmethod
def my_ReLU(x):
return torch.max(input=x,other=torch.tensor(0.0))
def forward(self, x):
flatten_input = self.inputs_layer(x)
hidden_output = self.hiddens_layer(flatten_input)
final_output = self.outputs_layer(hidden_output)
return final_output
## 第四部分:定義損失函數(shù)及優(yōu)化算法
loss_func = torch.nn.BCEWithLogitsLoss()
def SGD (params,lr):
for param in params:
param.data-=lr*param.grad
## 第五部分:定義測試函數(shù)
def test(data_iter,net,loss_func) :
test_loss_sum,c = 0.0,0
for X, y in data_iter:
result = net.forward(X)
test_loss_sum+= loss_func(result, y).item()
c +=1
return test_loss_sum/c
## 第六部分:定義模型訓(xùn)練函數(shù)
def train(net, train_iter,loss_func, num_epochs,batch_size,lr=None,optimizer=None):
train_loss_list = []
test_loss_list = []
for epoch in range(num_epochs):
train_l_sum,train_acc_sum,n,c = 0.0,0.0,0,0
for X,y in train_iter: # x和y分別是小批量樣本的特征和標(biāo)簽
y_hat = net.forward(X)
l=loss_func(y_hat,y)
l.backward()
optimizer(net.params, lr)
for param in net.params:
param.grad.data.zero_()
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
c += 1
test_loss = test(test_iter, net,loss_func)
train_loss_list.append(train_l_sum/c)
test_loss_list.append(test_loss)
# draw_loss(train_l_sum/c, test_loss, None)
print('epoch %d, train_loss %.4f,test_loss %.4f'%(epoch+1, train_l_sum/c,test_loss))
return train_loss_list,test_loss_list
## 第七部分:結(jié)果可視化
def draw_loss(train_loss, test_loss,valid_loss=None):
x = np.linspace(0,len(train_loss),len(train_loss))\
if valid_loss is None else np.linspace(0,len(train_loss),len(test_loss),len(valid_loss))
plt.plot(x,train_loss,label="Train_Loss",linewidth=1.5)
plt.plot(x,test_loss,label="Test_Loss",linewidth=1.5)
if valid_loss is not None:
plt.plot(x,test_loss,label="Valid_loss",linewidth=1.5)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
## 第八部分:訓(xùn)練模型
net=Net()
num_epochs=100
lr = 0.003
optimizer=SGD
train_loss,test_loss = train(net, train_iter,loss_func,num_epochs,batch_size,lr,optimizer)
draw_loss(train_loss,test_loss)
(3)多分類文章來源:http://www.zghlxwxcb.cn/news/detail-611196.html
## 導(dǎo)入實驗所需的包
import numpy as np
import torch
from torch.utils import data
from matplotlib import pyplot as plt
import torchvision
import torchvision.transforms as transforms
## 第一部分:構(gòu)建二分類任務(wù)數(shù)據(jù)集
mnist_train = torchvision.datasets.MNIST(root='~/Datasets/MNIST',train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.MNIST(root='~/Datasets/MNIST',train=False, transform=transforms.ToTensor())
## 第二部分:定義數(shù)據(jù)迭代器
# 通過DataLoader 讀取小批量數(shù)據(jù)樣本
batch_size = 128
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True,num_workers=0)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False,num_workers=0)
## 第三部分:定義模型及其前向傳播過程
class Net():
def __init__(self):
# 定義并初始化模型參數(shù)
num_inputs, num_outputs, num_hiddens = 784,10,256
W1 = torch.tensor(np.random.normal(0, 0.01, (num_hiddens, num_inputs)), dtype=torch.float32)
b1 = torch.zeros (1, dtype=torch.float32)
W2 = torch.tensor(np.random.normal(0, 0.01, (num_outputs, num_hiddens)), dtype=torch.float32)
b2 = torch.zeros (1, dtype=torch.float32)
# 上述四個變量求梯度
self.params = [W1, b1, W2, b2]
for param in self.params:
param.requires_grad_(requires_grad = True)
# 定義模型的結(jié)構(gòu)
self.inputs_layer = lambda x: x.view(x.shape[0],-1)
self.hiddens_layer = lambda x: self.my_ReLU(torch.matmul(x, W1.t())+ b1)
self.outputs_layer = lambda x: torch.matmul(x, W2.t())+ b2
@staticmethod
def my_ReLU(x):
return torch.max(input=x,other=torch.tensor(0.0))
def forward(self, x):
flatten_input = self.inputs_layer(x)
hidden_output = self.hiddens_layer(flatten_input)
final_output = self.outputs_layer(hidden_output)
return final_output
## 第四部分:定義損失函數(shù)及優(yōu)化算法
loss_func = torch.nn.CrossEntropyLoss()
def SGD (params,lr):
for param in params:
param.data-=lr*param.grad
## 第五部分:定義測試函數(shù)
def test(data_iter,net,loss_func) :
test_loss_sum,c = 0.0,0
for X, y in data_iter:
result = net.forward(X)
test_loss_sum+= loss_func(result, y).item()
c +=1
return test_loss_sum/c
## 第六部分:定義模型訓(xùn)練函數(shù)
def train(net,train_iter,loss_func, num_epochs,batch_size,lr=None,optimizer=None):
train_loss_list = []
test_loss_list = []
for epoch in range(num_epochs):
train_l_sum,train_acc_sum,n,c = 0.0,0.0,0,0
for X,y in train_iter: # x和y分別是小批量樣本的特征和標(biāo)簽
y_hat = net.forward(X)
l=loss_func(y_hat,y)
l.backward()
optimizer(net.params, lr)
for param in net.params:
param.grad.data.zero_()
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
c += 1
test_loss = test(test_iter, net,loss_func)
train_loss_list.append(train_l_sum/c)
test_loss_list.append(test_loss)
# draw_loss(train_l_sum/c, test_loss, None)
print('epoch %d, train_loss %.4f,test_loss %.4f'%(epoch+1, train_l_sum/c,test_loss))
return train_loss_list,test_loss_list
## 第七部分:結(jié)果可視化
def draw_loss(train_loss, test_loss,valid_loss=None):
x = np.linspace(0,len(train_loss),len(train_loss))\
if valid_loss is None else np.linspace(0,len(train_loss),len(test_loss),len(valid_loss))
plt.plot(x,train_loss,label="Train_Loss",linewidth=1.5)
plt.plot(x,test_loss,label="Test_Loss",linewidth=1.5)
if valid_loss is not None:
plt.plot(x,test_loss,label="Valid_loss",linewidth=1.5)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
## 第八部分:訓(xùn)練模型
net=Net()
num_epochs=100
lr = 0.03
optimizer=SGD
train_loss,test_loss = train(net,train_iter,loss_func,num_epochs,batch_size,lr,optimizer)
draw_loss(train_loss,test_loss)
代碼實在是太多了,具體代碼見:??正在裝飾個人主頁...文章來源地址http://www.zghlxwxcb.cn/news/detail-611196.html
到了這里,關(guān)于前饋神經(jīng)網(wǎng)絡(luò)實驗的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!