Uma*_*aid 3 python deep-learning pytorch tensor
我有一个非常大的形状张量(512,3,224,224)
。我将其以 32 个批次输入到模型中,然后保存与目标标签相对应的分数2
。在每次迭代中,在每个切片之后,形状都会发生scores
变化。这会导致以下错误。我做错了什么以及如何解决它。
label = torch.ones(1)*2
def sub_forward(self, x):
x = self.vgg16(x)
x = self.bn1(x)
x = self.linear1(x)
x = self.linear2(x)
return x
Run Code Online (Sandbox Code Playgroud)
def get_scores(self, imgs, targets):
b, _, _, _ = imgs.shape
batch_size = 32
total_scores = []
for i in range(0, b, batch_size):
scores = self.sub_forward(imgs[i:i+batch_size,:,:,:])
scores = F.softmax(scores)
labels = targets[i:i+batch_size]
labels = labels.long()
scores = scores[:,labels]
print(i," scores: ", scores)
total_scores.append(scores)
print(i," total_socres: ", total_scores)
total_scores = torch.stack(total_scores)
return scores
Run Code Online (Sandbox Code Playgroud)
0 scores: tensor([[0.0811],
[0.0918],
[0.0716],
[0.1680],
[0.1689],
[0.1319],
[0.1556],
[0.2966],
[0.0913],
[0.1238],
[0.1480],
[0.1215],
[0.2524],
[0.1283],
[0.1603],
[0.1282],
[0.2668],
[0.1146],
[0.2043],
[0.2475],
[0.0865],
[0.1869],
[0.0860],
[0.1979],
[0.1677],
[0.1983],
[0.2623],
[0.1975],
[0.1894],
[0.3299],
[0.1970],
[0.1094]], device='cuda:0')
0 total_socres: [tensor([[0.0811],
[0.0918],
[0.0716],
[0.1680],
[0.1689],
[0.1319],
[0.1556],
[0.2966],
[0.0913],
[0.1238],
[0.1480],
[0.1215],
[0.2524],
[0.1283],
[0.1603],
[0.1282],
[0.2668],
[0.1146],
[0.2043],
[0.2475],
[0.0865],
[0.1869],
[0.0860],
[0.1979],
[0.1677],
[0.1983],
[0.2623],
[0.1975],
[0.1894],
[0.3299],
[0.1970],
[0.1094]], device='cuda:0')]
32 scores: tensor([], device='cuda:0', size=(32, 0))
32 total_socres: [tensor([[0.0811],
[0.0918],
[0.0716],
[0.1680],
[0.1689],
[0.1319],
[0.1556],
[0.2966],
[0.0913],
[0.1238],
[0.1480],
[0.1215],
[0.2524],
[0.1283],
[0.1603],
[0.1282],
[0.2668],
[0.1146],
[0.2043],
[0.2475],
[0.0865],
[0.1869],
[0.0860],
[0.1979],
[0.1677],
[0.1983],
[0.2623],
[0.1975],
[0.1894],
[0.3299],
[0.1970],
[0.1094]], device='cuda:0'), tensor([], device='cuda:0', size=(32, 0))]
Run Code Online (Sandbox Code Playgroud)
> RuntimeError: stack expects each tensor to be equal size, but got [32, 1] at entry 0 and [32, 0] at entry 1
Run Code Online (Sandbox Code Playgroud)
我不知道你的代码会发生什么,但你不应该诚实地进行批处理。请使用数据集:
import torch
class MyDataloader(torch.utils.data.Dataset):
def __init__(self):
self.images = torch.Tensor(512, 3, 224, 224)
def __len__(self):
return 512
def __getitem__(self, idx):
return self.images[idx, :, :, :], torch.ones(1) * 2
train_data = MyDataloader()
train_loader = torch.utils.data.DataLoader(train_data,
shuffle=True,
num_workers=2,
batch_size=32)
for batch_images, targets in train_loader:
print(batch_images.shape) # should be 32*3*224*224
... # let train your model
logits = model(batch_images, targets)
Run Code Online (Sandbox Code Playgroud)