在深度學(xué)習(xí)訓(xùn)練網(wǎng)絡(luò)過程中,我們常遇到如下的問題:屬性錯誤(其中非類型的對象沒有屬性'data'),解決的方法主要是查看網(wǎng)絡(luò)構(gòu)造是否出現(xiàn)問題。
廢話不多說,實踐出真知。舉個輕量級神經(jīng)網(wǎng)絡(luò)訓(xùn)練的例子,源代碼包含三部分:網(wǎng)絡(luò)構(gòu)造、數(shù)據(jù)預(yù)處理加載以及網(wǎng)絡(luò)訓(xùn)練。(使用的訓(xùn)練數(shù)據(jù)為ide可直接下載數(shù)據(jù),需要的碼友可以直接復(fù)現(xiàn))
網(wǎng)絡(luò)構(gòu)造
import torch
import torch.nn as nn
use_cuda = torch.cuda.is_available()
class dw_conv(nn.Module):
# 深度卷積
def __init__(self, in_channels, out_channels, stride):
super(dw_conv, self).__init__()
self.dw_conv_3 = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, groups=in_channels, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.dw_conv_3(x)
out = self.bn(out)
out = self.relu(out)
class point_conv(nn.Module):
def __init__(self, in_channels, out_channels):
# 點卷積
super(point_conv, self).__init__()
self.conv_1x1 = nn.Conv2d(
in_channels=in_channels, out_channels=out_channels, kernel_size=1
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(True)
def forward(self, x):
out = self.conv_1x1(x)
out = self.bn(out)
out = self.relu(out)
return out
class My_Mobilenet(nn.Module):
def __init__(self, num_classes):
super(My_Mobilenet, self).__init__()
self.num_classes = num_classes
# if large_img:
# self.features = nn.Sequential(
# nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2),
# # nn.BatchNorm2d(32),
# nn.ReLU(inplace=True),
# dw_conv(32, 32, 1),
# point_conv(32, 64),
# dw_conv(64, 64, 2),
# point_conv(64, 128),
# dw_conv(128, 128, 1),
# point_conv(128, 128),
# dw_conv(128, 128, 2),
# point_conv(128, 256),
# dw_conv(256, 256, 1),
# point_conv(256, 256),
# dw_conv(256, 256, 2),
# point_conv(256, 512),
#
# dw_conv(512, 512, 1),
# point_conv(512, 512),
# dw_conv(512, 512, 1),
# point_conv(512, 512),
# dw_conv(512, 512, 1),
# point_conv(512, 512),
# dw_conv(512, 512, 1),
# point_conv(512, 512),
# dw_conv(512, 512, 1),
# point_conv(512, 512),
#
# dw_conv(512, 512, 2),
# point_conv(512, 1024),
# dw_conv(1024, 1024, 2),
# point_conv(1024, 1024),
# nn.AvgPool2d(7),
# )
# else:
self.features = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
dw_conv(32, 32, 1),
point_conv(32, 64),
dw_conv(64, 64, 1),
point_conv(64, 128),
dw_conv(128, 128, 1),
point_conv(128, 128),
dw_conv(128, 128, 1),
point_conv(128, 256),
dw_conv(256, 256, 1),
point_conv(256, 256),
dw_conv(256, 256, 1),
point_conv(256, 512),
dw_conv(512, 512, 1),
point_conv(512, 512),
dw_conv(512, 512, 1),
point_conv(512, 512),
dw_conv(512, 512, 1),
point_conv(512, 512),
dw_conv(512, 512, 1),
point_conv(512, 512),
dw_conv(512, 512, 1),
point_conv(512, 512),
dw_conv(512, 512, 1),
point_conv(512, 1024),
dw_conv(1024, 1024, 1),
point_conv(1024, 1024),
nn.AvgPool2d(4),
)
self.fc = nn.Linear(1024, self.num_classes)
def forward(self, x):
out = self.features(x)
out = out.view(-1, 1024)
out = self.fc(out)
def mobilenet(num_classes):
"""
Model has been designed to work on either ImageNet or CIFAR-10
:param num_classes:1000 for ImageNet, 10 for CIFAR-10
:param large_img:True for ImageNet, False for CIFAR-10
:param kwargs:
:return:model
"""
model = My_Mobilenet(num_classes)
if use_cuda:
model = model.cuda()
return model
# from torchsummary import summary
# model = mobilenet(10, False)
# print(summary(model, (-1, 224, 224, -1)))
數(shù)據(jù)預(yù)處理加載
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import accuracy_score
from mobilenet_v1 import mobilenet
# 數(shù)據(jù)預(yù)處理 transforms
# 數(shù)據(jù)加載 datasets
# transforms the dataset
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4), # size大小可設(shè)置為 (32, 32)
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# load the dataset
valid_size = 0.1
train_dataset = datasets.CIFAR10(root='cifar10', train=True, download=False, transform=train_transform)
# 當(dāng)根目錄沒有'cifar10'數(shù)據(jù)集時,train=True可以直接從網(wǎng)上下載該數(shù)據(jù)集并保留在預(yù)先設(shè)置的根目錄中
valid_dataset = datasets.CIFAR10(root='cifar10', train=True, download=False, transform=valid_transform)
# 進行隨機采樣,根據(jù)需求可有可無
num_train = len(train_dataset)
# print(num_train)
indices = list(range(num_train))
split = int(valid_size * num_train)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = DataLoader(dataset=train_dataset, batch_size=16, sampler=train_sampler)
valid_loader = DataLoader(dataset=valid_dataset, batch_size=16, sampler=valid_sampler)
test_dataset = datasets.CIFAR10(root='cifar10', train=False, download=False, transform=valid_transform)
test_loader = DataLoader(dataset=test_dataset, batch_size=16, shuffle=False)
# print('len(train_loader):{}\tlen(valid_loader):{}\tlen(test_loader):{}'.format(
# len(train_loader), len(valid_loader), len(test_loader)))
#
# print(train_loader.dataset)
網(wǎng)絡(luò)訓(xùn)練
"""
模型評估從以下三個部分來設(shè)計:損失函數(shù)設(shè)計、模型訓(xùn)練、模型驗證測試
"""
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import accuracy_score
from mobilenet_v1 import mobilenet
from data_process_load import train_loader, valid_loader, test_loader
# import cifar10
use_cuda = torch.cuda.is_available()
model = mobilenet(num_classes=10) # 先使用cifar10這個小數(shù)據(jù)集
optimizer = optim.Adam(model.parameters(), lr=0.01)
scheduler = StepLR(optimizer=optimizer, step_size=10, gamma=0.5)
criterion = nn.CrossEntropyLoss()
# 定義訓(xùn)練函數(shù)
def train(epoch):
model.train() # 表明所有的參數(shù)都在訓(xùn)練,需要更新,而不再固定
for batch_idx, (datasets, target) in enumerate(train_loader):
if use_cuda:
datasets, target = datasets.cuda(), target.cuda()
# data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(datasets)
correct = 0
# print(output)
pred = output.data.max(1, keepdim=True)[1]
# print(target.data.view_as(pred))
# print(target, target.data)
correct += pred.eq(target.data.view_as(pred)).sum() # .eq()統(tǒng)計相同的個數(shù)
loss = criterion(output, target)
loss.backward()
accuracy = 100.0 * correct / len(output)
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss:{:.6f}, Accuracy:{:.2f}'.format(
epoch, batch_idx * len(datasets), len(train_loader.dataset), 100.0 * batch_idx / len(train_loader), loss.item(), accuracy
))
scheduler.step()
# 定義驗證函數(shù)
def validate(epoch):
model.eval()
valid_loss = 0
correct = 0
for data, target in valid_loader:
if use_cuda:
data, target = data.cuda(), target.cuda()
# data, target = Variable(data), Variable(target)
output = model(data)
valid_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct +=pred.eq(target.data.view_as(pred)).sum() # 正確類別分類個數(shù)
valid_loss /= len(valid_loader)
accuracy = 100. * correct / len(valid_loader)
print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{}(:.2f)%\n'.format(
valid_loss, correct, len(valid_loader), 100.0 * correct / len(valid_loader)
))
return valid_loss, accuracy
def test(epoch):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if use_cuda:
data, target = data.cuda(), target.cuda()
# data, target = Variable(data, validate(True)), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss += len(test_loader)
print('\nTest set: Average loss:{.4f}, Accuracy:{}/{}(:.2f)%\n'.format(
test_loss, correct, len(test_loader), 100.0 * correct / len(test_loader)
))
for epoch in range(50):
train(epoch)
loss, accuracy = validate(epoch)
將會出現(xiàn)一下報錯情況。文章來源:http://www.zghlxwxcb.cn/news/detail-510375.html

通過debug發(fā)現(xiàn)是網(wǎng)絡(luò)中定義的類函數(shù)forward沒有返回值再下方添加return out即可解決問題。文章來源地址http://www.zghlxwxcb.cn/news/detail-510375.html
到了這里,關(guān)于AttributeError: ‘NoneType‘ object has no attribute ‘data‘的文章就介紹完了。如果您還想了解更多內(nèi)容,請在右上角搜索TOY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!