Nic*_*ais 6 python conv-neural-network pytorch tensor
我用随机像素做了一个可重复的例子。我试图在卷积层之后展平密集层的张量。问题出在卷积层和密集层的交叉处。我不知道如何放置正确数量的神经元。
tl;dr我正在寻找等效的手册,keras.layers.Flatten()
因为它不存在于pytorch
.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
x = np.random.rand(1_00, 3, 100, 100)
y = np.random.randint(0, 2, 1_00)
if torch.cuda.is_available():
x = torch.from_numpy(x.astype('float32')).cuda()
y = torch.from_numpy(y.astype('float32')).cuda()
class ConvNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
self.fc1 = nn.Linear(128, 1024) # 128 is wrong here
self.fc2 = nn.Linear(1024, 1)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = torch.sigmoid(self.fc2(x))
return x
net = ConvNet()
net.cuda()
optimizer = optim.Adam(net.parameters(), lr=0.03)
loss_function = nn.BCELoss()
class Train:
def __init__(self):
self.len = x.shape[0]
self.x_train = x
self.y_train = y
def __getitem__(self, index):
return x[index], y[index].unsqueeze(0)
def __len__(self):
return self.len
train = Train()
train_loader = DataLoader(dataset=train, batch_size=64, shuffle=True)
epochs = 1
train_losses = list()
for e in range(epochs):
running_loss = 0
for images, labels in train_loader:
optimizer.zero_grad()
log_ps = net(images)
loss = loss_function(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print('It\'s working.')
Run Code Online (Sandbox Code Playgroud)
这是我制作的一个函数,用于在展平卷积张量的同时自动适应正确数量的神经元:
def flatten(w, k=3, s=1, p=0, m=True):
"""
Returns the right size of the flattened tensor after
convolutional transformation
:param w: width of image
:param k: kernel size
:param s: stride
:param p: padding
:param m: max pooling (bool)
:return: proper shape and params: use x * x * previous_out_channels
Example:
r = flatten(*flatten(*flatten(w=100, k=3, s=1, p=0, m=True)))[0]
self.fc1 = nn.Linear(r*r*128, 1024)
"""
return int((np.floor((w - k + 2 * p) / s) + 1) / 2 if m else 1), k, s, p, m
Run Code Online (Sandbox Code Playgroud)
在你的情况下:
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 64, 3)
self.conv3 = nn.Conv2d(64, 128, 3)
r = flatten(*flatten(*flatten(w=100, k=3, s=1, p=0, m=True)))[0]
self.fc1 = nn.Linear(r*r*128, 1024)
self.fc2 = nn.Linear(1024, 1)
def forward(self, x): ...
Run Code Online (Sandbox Code Playgroud)