import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
# 定義神經(jīng)網(wǎng)絡(luò)結(jié)構(gòu)
class SimpleNN(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# 設(shè)置超參數(shù)
input_size = 784 # MNIST數(shù)據(jù)集的輸入大小是28x28=784
hidden_size = 784
num_classes = 10
learning_rate = 0.01
num_epochs = 10
# 加載MNIST數(shù)據(jù)集
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor())
# 數(shù)據(jù)加載器
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=100, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=100, shuffle=False)
# 實(shí)例化模型
model = SimpleNN(input_size, hidden_size, num_classes)
# 定義損失函數(shù)和優(yōu)化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# 訓(xùn)練模型
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# 將輸入數(shù)據(jù)轉(zhuǎn)換為一維向量
images = images.reshape(-1, 28*28)
# 前向傳播
outputs = model(images)
loss = criterion(outputs, labels)
# 反向傳播和優(yōu)化
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# 測(cè)試模型
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
# 獲取模型參數(shù)
params = model.parameters()
# 打印每個(gè)參數(shù)的名稱和值
for name, param in model.named_parameters():
print(f'Parameter name: {name}')
print(f'Parameter value: {param}')
以下代碼測(cè)試正確率為:99.37%文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-830332.html
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
# 定義適合MNIST數(shù)據(jù)集的CNN模型
class MNISTCNN(nn.Module):
def __init__(self):
super(MNISTCNN, self).__init__()
# 卷積塊 1
self.conv_block1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
# 卷積塊 2
self.conv_block2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2)
)
# 全連接層
self.fc_layer = nn.Sequential(
nn.Linear(64 * 7 * 7, 512), # 假設(shè)經(jīng)過(guò)前面的卷積和池化后特征圖大小為7x7
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(512, 10) # MNIST有10個(gè)類別
)
def forward(self, x):
x = self.conv_block1(x)
x = self.conv_block2(x)
# 將卷積層輸出展平為一維向量
x = x.view(x.size(0), -1)
# 通過(guò)全連接層
x = self.fc_layer(x)
return x
# 創(chuàng)建模型實(shí)例
model = MNISTCNN()
# 定義損失函數(shù)和優(yōu)化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 加載MNIST數(shù)據(jù)集并預(yù)處理
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
# 使用DataLoader加載批量數(shù)據(jù)
batch_size = 64
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# 開(kāi)始訓(xùn)練
num_epochs = 10
for epoch in range(num_epochs):
for inputs, labels in train_loader:
# 前向傳播
outputs = model(inputs)
loss = criterion(outputs, labels)
# 反向傳播和優(yōu)化
optimizer.zero_grad() # 清空梯度緩存
loss.backward() # 計(jì)算梯度
optimizer.step() # 更新參數(shù)
# 每個(gè)epoch結(jié)束時(shí)打印損失
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
# 測(cè)試模型
model.eval() # 將模型切換到評(píng)估模式(禁用Dropout和BatchNorm等)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print(f'Test Accuracy: {100 * correct / total}%')
文章來(lái)源:http://www.zghlxwxcb.cn/news/detail-830332.html
到了這里,關(guān)于pytorch神經(jīng)網(wǎng)絡(luò)入門代碼的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!