Ilv*_*ico 10 python deep-learning torchvision
运行以下脚本后出现错误:
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import cv2
import numpy as np
import csv
Run Code Online (Sandbox Code Playgroud)
samples = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)
Run Code Online (Sandbox Code Playgroud)
train_len = int(0.8*len(samples))
valid_len = len(samples) - train_len
train_samples, validation_samples = data.random_split(samples, lengths=[train_len, valid_len])
Run Code Online (Sandbox Code Playgroud)
def augment(imgName, angle):
name = 'data/IMG/' + imgName.split('/')[-1]
current_image = cv2.imread(name)
current_image = current_image[65:-25, :, :]
if np.random.rand() < 0.5:
current_image = cv2.flip(current_image, 1)
angle = angle * -1.0
return current_image, angle
class Dataset(data.Dataset):
def __init__(self, samples, transform=None):
self.samples = samples
self.transform = transform
def __getitem__(self, index):
batch_samples = self.samples[index]
steering_angle = float(batch_samples[3])
center_img, steering_angle_center = augment(batch_samples[0], steering_angle)
left_img, steering_angle_left = augment(batch_samples[1], steering_angle + 0.4)
right_img, steering_angle_right = augment(batch_samples[2], steering_angle - 0.4)
center_img = self.transform(center_img)
left_img = self.transform(left_img)
right_img = self.transform(right_img)
return (center_img, steering_angle_center), (left_img, steering_angle_left), (right_img, steering_angle_right)
def __len__(self):
return len(self.samples)
Run Code Online (Sandbox Code Playgroud)
def _my_normalization(x):
return x/255.0 - 0.5
transformations = transforms.Compose([transforms.Lambda(_my_normalization)])
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 4}
training_set = Dataset(train_samples, transformations)
training_generator = data.DataLoader(training_set, **params)
validation_set = Dataset(validation_samples, transformations)
validation_generator = data.DataLoader(validation_set, **params)
Run Code Online (Sandbox Code Playgroud)
class NetworkDense(nn.Module):
def __init__(self):
super(NetworkDense, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 5, stride=2),
nn.ELU(),
nn.Conv2d(24, 36, 5, stride=2),
nn.ELU(),
nn.Conv2d(36, 48, 5, stride=2),
nn.ELU(),
nn.Conv2d(48, 64, 3),
nn.ELU(),
nn.Conv2d(64, 64, 3),
nn.Dropout(0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=64 * 2 * 33, out_features=100),
nn.ELU(),
nn.Linear(in_features=100, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output
class NetworkLight(nn.Module):
def __init__(self):
super(NetworkLight, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 3, stride=2),
nn.ELU(),
nn.Conv2d(24, 48, 3, stride=2),
nn.MaxPool2d(4, stride=4),
nn.Dropout(p=0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=48*4*19, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output
Run Code Online (Sandbox Code Playgroud)
model = NetworkLight()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.MSELoss()
Run Code Online (Sandbox Code Playgroud)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device is: ', device)
def toDevice(datas, device):
imgs, angles = datas
return imgs.float().to(device), angles.float().to(device)
Run Code Online (Sandbox Code Playgroud)
max_epochs = 22
for epoch in range(max_epochs):
model.to(device)
# Training
train_loss = 0
model.train()
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("training image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
loss.backward()
optimizer.step()
train_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Loss: %.3f '
% (train_loss/(local_batch+1)))
# Validation
model.eval()
valid_loss = 0
with torch.set_grad_enabled(False):
for local_batch, (centers, lefts, rights) in enumerate(validation_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("Validation image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
valid_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Valid Loss: %.3f '
% (valid_loss/(local_batch+1)))
Run Code Online (Sandbox Code Playgroud)
state = {
'model': model.module if device == 'cuda' else model,
}
torch.save(state, 'model.h5')
Run Code Online (Sandbox Code Playgroud)
这是错误消息:
“D:\VICO\Back up\venv\Scripts\python.exe” “D:/VICO/Back up/venv/Scripts/self_driven_car.py” 设备是: cpu 设备是: cpu 回溯(最近一次调用):文件“”,第 1 行,文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 105 行,spawn_main exitcode = _main(fd) 文件“C :\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 114 行,在 _main 准备(preparation_data)文件“C:\Users\isonata\AppData\Local\Programs\Python \Python37\lib\multiprocessing\spawn.py”,第 225 行,在准备 _fixup_main_from_path(data['init_main_from_path']) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn .py”,第 277 行,在 _fixup_main_from_path run_name=" mp_main ") 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\runpy.py”,第 263 行,在 run_path Traceback 中(最新最后调用):文件“D:/VICO/Back up/venv/Scripts/self_driven_car.py”,第 165 行,在 pkg_name=pkg_name, script_name=fname 中) 文件“C:\Users\isonata\AppData\Local\Programs\ Python\Python37\lib\runpy.py”,第 96 行,local_batch 的 _run_module_code 中,枚举(training_generator)中的(中心、左、右):文件“D:\VICO\Back up\venv\lib\site-packages\ torch\utils\data\dataloader.py”,第 291 行,在iter mod_name、mod_spec、pkg_name、script_name 中) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\runpy.py”,第 85 行,在 _run_code exec(code, run_globals) 文件“D:\VICO\Back up\venv\Scripts\self_driven_car.py”中,第 165 行,返回 _MultiProcessingDataLoaderIter(self) 文件“D:\VICO\Back up\venv” \lib\site-packages\torch\utils\data\dataloader.py”,第 737 行,在local_batch 的init中 ,枚举(training_generator)中的(中心、左、右):文件“D:\VICO\Back up\venv \lib\site-packages\torch\utils\data\dataloader.py”,第 291 行,在iter 中 返回 _MultiProcessingDataLoaderIter(self) 文件“D:\VICO\Back up\venv\lib\site-packages\torch\utils\ data\dataloader.py”,第 737 行,在init w.start() 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py”,第 112 行,在 start 中self._popen = self._Popen(self) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 223 行,在 _Popen w.start() 文件中“ C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py”,第 112 行,在开始返回 _default_context.get_context().Process._Popen(process_obj) 文件“C:\Users \isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 322 行,在 _Popen self._popen = self._Popen(self) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 223 行,在 _Popen 返回 Popen(process_obj) 文件“C:\Users\isonata\AppData\Local\Programs\Python \Python37\lib\multiprocessing\popen_spawn_win32.py”,第 89 行,在init return _default_context.get_context().Process._Popen(process_obj) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py”,第 322 行,在 _Popenduction.dump (process_obj, to_child) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\reduction.py”,第 60 行,在转储中返回 Popen(process_obj) 文件“C:\Users\ isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py",第 46 行,在init ForkingPickler(file, protocol).dump(obj) BrokenPipeError: [Errno 32] 损坏的管道 prep_data = spawn.get_preparation_data (process_obj._name) 文件“C:\Users\isonata\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py”,第 143 行,在 get_preparation_data _check_not_importing_main() 文件“C:\Users\isonata\ AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py",第 136 行,在 _check_not_importing_main 中不会被冻结以生成可执行文件。''') RuntimeError: 已尝试启动新的当前进程完成其引导阶段之前的进程。
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
Run Code Online (Sandbox Code Playgroud)
进程已完成,退出代码为 1
我不确定解决问题的下一步
Ilv*_*ico 13
解决了,简单来说:
if __name__ == "__main__":
main()
Run Code Online (Sandbox Code Playgroud)
避免每次循环都重新加载模块。
我遇到了类似的问题,并通过将 DataLoader 中的“num_workers”参数设置回零来修复它:
DataLoader(num_workers=0)
Run Code Online (Sandbox Code Playgroud)
| 归档时间: |
|
| 查看次数: |
7660 次 |
| 最近记录: |