Liz*_*Liz 79 python machine-learning python-3.x deep-learning pytorch
我正在尝试按如下方式训练以下 CNN,但我一直收到关于 .cuda() 的相同错误,我不知道如何修复它。到目前为止,这是我的一段代码。
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torchvision
from torchvision import datasets, transforms, models
from torch.utils.data.sampler import SubsetRandomSampler
data_dir = "/home/ubuntu/ML2/ExamII/train2/"
valid_size = .2
# Normalize the test and train sets with torchvision
train_transforms = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
])
# ImageFolder class to load the train and test images
train_data = datasets.ImageFolder(data_dir, transform=train_transforms)
test_data = datasets.ImageFolder(data_dir, transform=test_transforms)
# Number of train images
num_train = len(train_data)
indices = list(range(num_train))
# Split = 20% of train images
split = int(np.floor(valid_size * num_train))
# Shuffle indices of train images
np.random.shuffle(indices)
# Subset indices for test and train
train_idx, test_idx = indices[split:], indices[:split]
# Samples elements randomly from a given list of indices
train_sampler = SubsetRandomSampler(train_idx)
test_sampler = SubsetRandomSampler(test_idx)
# Batch and load the images
trainloader = torch.utils.data.DataLoader(train_data, sampler=train_sampler, batch_size=1)
testloader = torch.utils.data.DataLoader(test_data, sampler=test_sampler, batch_size=1)
#print(trainloader.dataset.classes)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.resnet50(pretrained=True)
model.fc = nn.Sequential(nn.Linear(2048, 512),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(512, 10),
nn.LogSigmoid())
# nn.LogSoftmax(dim=1))
# criterion = nn.NLLLoss()
criterion = nn.BCELoss()
optimizer = optim.Adam(model.fc.parameters(), lr=0.003)
model.to(device)
#Train the network
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
Run Code Online (Sandbox Code Playgroud)
但是,我一直在控制台中收到此错误:
RuntimeError: 输入类型 (torch.FloatTensor) 和权重类型 (torch.cuda.FloatTensor) 应该相同`
关于如何解决它的任何想法?我读到可能模型没有被推入我的 GPU,但不知道如何修复它。谢谢!
Nic*_*ais 135
您收到此错误是因为您的模型在 GPU 上,但您的数据在 CPU 上。因此,您需要将输入张量发送到 GPU。
inputs, labels = data # this is what you had
inputs, labels = inputs.cuda(), labels.cuda() # add this line
Run Code Online (Sandbox Code Playgroud)
或者像这样,与您的其余代码保持一致:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
inputs, labels = inputs.to(device), labels.to(device)
Run Code Online (Sandbox Code Playgroud)
在同样的错误,如果你输入张量在GPU上,但你的模型权重不是会得到提升。在这种情况下,您需要将模型权重发送到 GPU。
model = MyModel()
if torch.cuda.is_available():
model.cuda()
Run Code Online (Sandbox Code Playgroud)
pro*_*sti 13
.to()
方法。优势很明显,也很重要。您的设备明天可能不是“cuda”:
所以尽量避免model.cuda()
检查设备没有错
dev = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
Run Code Online (Sandbox Code Playgroud)
或对其进行硬编码:
dev=torch.device("cuda")
Run Code Online (Sandbox Code Playgroud)
与...一样:
dev="cuda"
Run Code Online (Sandbox Code Playgroud)
一般来说,您可以使用以下代码:
model.to(dev)
data = data.to(dev)
Run Code Online (Sandbox Code Playgroud)