这是一个简单的神经网络,我试图惩罚激活梯度的规范:
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.linear = nn.Linear(64 * 5 * 5, 10)
def forward(self, input):
conv1 = self.conv1(input)
pool1 = self.pool(conv1)
self.relu1 = self.relu(pool1)
self.relu1.retain_grad()
conv2 = self.conv2(relu1)
pool2 = self.pool(conv2)
relu2 = self.relu(pool2)
self.relu2 = relu2.view(relu2.size(0), -1)
self.relu2.retain_grad()
return self.linear(relu2)
model = Net()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
for i in range(1000):
output = model(input)
loss = nn.CrossEntropyLoss()(output, label)
optimizer.zero_grad()
loss.backward(retain_graph=True)
grads = torch.autograd.grad(loss, [model.relu1, model.relu2], create_graph=True)
grad_norm = 0
for grad in grads:
grad_norm += grad.pow(2).sum()
grad_norm.backward()
optimizer.step()
Run Code Online (Sandbox Code Playgroud)
但是,它不会产生所需的正则化效果.如果我对重量(而不是激活)做同样的事情,它运作良好.我做得对吗(就火炬机械而言)?具体来说,grad_norm.backward()调用会发生什么?我只想确保更新重量梯度,而不是激活渐变.目前,当我在该行之前和之后立即打印出权重和激活的渐变时,两者都会改变 - 所以我不确定发生了什么.
我认为您的代码最终会在每个步骤中计算一些梯度两次。我还怀疑它实际上永远不会将激活梯度归零,因此它们会跨步骤累积。
一般来说:
x.backward()计算xwrt 的梯度。计算图叶子(例如权重张量和其他变量),以及wrt。节点明确标记为retain_grad(). 它累积张量属性中计算的梯度.grad。
autograd.grad(x, [y, z])返回xwrt 的梯度。y并且z无论他们通常是否会保留毕业资格。默认情况下,它还会在所有叶子的.grad属性中累积梯度。您可以通过传递来防止这种情况发生only_inputs=True。
我更喜欢仅用于backward()优化步骤,并且autograd.grad()每当我的目标是获得“具体化”梯度作为另一次计算的中间值时。.grad这样,我可以确保在完成处理后,张量的属性中不会残留任何不需要的梯度。
import torch
from torch import nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5)
self.pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.linear = nn.Linear(64 * 5 * 5, 10)
def forward(self, input):
conv1 = self.conv1(input)
pool1 = self.pool(conv1)
self.relu1 = self.relu(pool1)
conv2 = self.conv2(self.relu1)
pool2 = self.pool(conv2)
self.relu2 = self.relu(pool2)
relu2 = self.relu2.view(self.relu2.size(0), -1)
return self.linear(relu2)
model = Net()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
grad_penalty_weight = 10.
for i in range(1000000):
# Random input and labels; we're not really learning anything
input = torch.rand(1, 3, 32, 32)
label = torch.randint(0, 10, (1,))
output = model(input)
loss = nn.CrossEntropyLoss()(output, label)
# This is where the activation gradients are computed
# only_inputs is optional here, since we're going to call optimizer.zero_grad() later
# But it makes clear that we're *only* interested in the activation gradients at this point
grads = torch.autograd.grad(loss, [model.relu1, model.relu2], create_graph=True, only_inputs=True)
grad_norm = 0
for grad in grads:
grad_norm += grad.pow(2).sum()
optimizer.zero_grad()
loss = loss + grad_norm * grad_penalty_weight
loss.backward()
optimizer.step()
Run Code Online (Sandbox Code Playgroud)
这段代码似乎有效,因为激活梯度确实变小了。我无法评论这种技术作为正则化方法的可行性。
| 归档时间: |
|
| 查看次数: |
266 次 |
| 最近记录: |