Ilv*_*ico 8 python deep-learning torchvision
我正在尝试运行以下代码,该代码报告与其他用户一起运行良好,但我发现了此错误。
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import cv2
import numpy as np
import csv
Run Code Online (Sandbox Code Playgroud)
samples = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)
Run Code Online (Sandbox Code Playgroud)
train_len = int(0.8*len(samples))
valid_len = len(samples) - train_len
train_samples, validation_samples = data.random_split(samples, lengths=[train_len, valid_len])
Run Code Online (Sandbox Code Playgroud)
def augment(imgName, angle):
name = 'data/IMG/' + imgName.split('/')[-1]
current_image = cv2.imread(name)
current_image = current_image[65:-25, :, :]
if np.random.rand() < 0.5:
current_image = cv2.flip(current_image, 1)
angle = angle * -1.0
return current_image, angle
class Dataset(data.Dataset):
def __init__(self, samples, transform=None):
self.samples = samples
self.transform = transform
def __getitem__(self, index):
batch_samples = self.samples[index]
steering_angle = float(batch_samples[3])
center_img, steering_angle_center = augment(batch_samples[0], steering_angle)
left_img, steering_angle_left = augment(batch_samples[1], steering_angle + 0.4)
right_img, steering_angle_right = augment(batch_samples[2], steering_angle - 0.4)
center_img = self.transform(center_img)
left_img = self.transform(left_img)
right_img = self.transform(right_img)
return (center_img, steering_angle_center), (left_img, steering_angle_left), (right_img, steering_angle_right)
def __len__(self):
return len(self.samples)
Run Code Online (Sandbox Code Playgroud)
transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 255.0) - 0.5)])
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 4}
training_set = Dataset(train_samples, transformations)
training_generator = data.DataLoader(training_set, **params)
validation_set = Dataset(validation_samples, transformations)
validation_generator = data.DataLoader(validation_set, **params)
Run Code Online (Sandbox Code Playgroud)
NetworkDense 类(nn.Module):
def __init__(self):
super(NetworkDense, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 5, stride=2),
nn.ELU(),
nn.Conv2d(24, 36, 5, stride=2),
nn.ELU(),
nn.Conv2d(36, 48, 5, stride=2),
nn.ELU(),
nn.Conv2d(48, 64, 3),
nn.ELU(),
nn.Conv2d(64, 64, 3),
nn.Dropout(0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=64 * 2 * 33, out_features=100),
nn.ELU(),
nn.Linear(in_features=100, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output
class NetworkLight(nn.Module):
def __init__(self):
super(NetworkLight, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 3, stride=2),
nn.ELU(),
nn.Conv2d(24, 48, 3, stride=2),
nn.MaxPool2d(4, stride=4),
nn.Dropout(p=0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=48*4*19, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output
Run Code Online (Sandbox Code Playgroud)
model = NetworkLight()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.MSELoss()
Run Code Online (Sandbox Code Playgroud)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device is: ', device)
def toDevice(datas, device):
imgs, angles = datas
return imgs.float().to(device), angles.float().to(device)
Run Code Online (Sandbox Code Playgroud)
max_epochs = 22
for epoch in range(max_epochs):
model.to(device)
# Training
train_loss = 0
model.train()
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("training image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
loss.backward()
optimizer.step()
train_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Loss: %.3f '
% (train_loss/(local_batch+1)))
# Validation
model.eval()
valid_loss = 0
with torch.set_grad_enabled(False):
for local_batch, (centers, lefts, rights) in enumerate(validation_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("Validation image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
valid_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Valid Loss: %.3f '
% (valid_loss/(local_batch+1)))
Run Code Online (Sandbox Code Playgroud)
state = {
'model': model.module if device == 'cuda' else model,
}
torch.save(state, 'model.h5')
Run Code Online (Sandbox Code Playgroud)
这是错误消息:
"D:\VICO\Back up\venv\Scripts\python.exe" "D:/VICO/Back up/venv/Scripts/self_driving_car.py"
device is: cpu
Traceback (most recent call last):
File "D:/VICO/Back up/venv/Scripts/self_driving_car.py", line 163, in <module>
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
File "D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py", line 291, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "D:\VICO\Back up\venv\lib\site-packages\torch\utils\data\dataloader.py", line 737, in __init__
w.start()
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 112, in start
self._popen = self._Popen(self)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py", line 89, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <function <lambda> at 0x0000002F2175B048>: attribute lookup <lambda> on __main__ failed
Process finished with exit code 1
Run Code Online (Sandbox Code Playgroud)
我不确定解决该问题的下一步。
pickle不pickle函数对象。它期望通过导入其模块并查找其名称来找到函数对象。lambda 是匿名函数(没有名称),因此不起作用。解决方案是在模块级别命名函数。我在你的代码中发现的唯一 lambda 是
transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 255.0) - 0.5)])
Run Code Online (Sandbox Code Playgroud)
假设这是一个麻烦的函数,你可以
def _my_normalization(x):
return x/255.0 - 0.5
transformations = transforms.Compose([transforms.Lambda(_my_normalization])
Run Code Online (Sandbox Code Playgroud)
您可能会遇到其他问题,因为看起来您正在模块级别工作。如果这是一个多处理事物并且您在 Windows 上运行,则新进程将导入该文件并再次运行所有该模块级代码。这在 linux/mac 上不是问题,因为分叉进程已经从父进程加载了模块。