S26*_*673 9 python reinforcement-learning backpropagation policy-gradient-descent
我用 Python 编写了一个小脚本来解决具有策略梯度的各种 Gym 环境。
import gym, os
import numpy as np
#create environment
env = gym.make('Cartpole-v0')
env.reset()
s_size = len(env.reset())
a_size = 2
#import my neural network code
os.chdir(r'C:\---\---\---\Python Code')
import RLPolicy
policy = RLPolicy.NeuralNetwork([s_size,a_size],learning_rate=0.000001,['softmax']) #a 3layer network might be ([s_size, 5, a_size],learning_rate=1,['tanh','softmax'])
#it supports the sigmoid activation function also
print(policy.weights)
DISCOUNT = 0.95 #parameter for discounting future rewards
#first step
action = policy.feedforward(env.reset)
state,reward,done,info = env.step(action)
for t in range(3000):
done = False
states = [] #lists for recording episode
probs2 = []
rewards = []
while not done:
#env.render() #to visualize learning
probs = policy.feedforward(state)[-1] #calculate probabilities of actions
action = np.random.choice(a_size,p=probs) #choose action from probs
#record and update state
probs2.append(probs)
states.append(state)
state,reward,done,info = env.step(action)
rewards.append(reward) #should reward be before updating state?
#calculate gradients
gradients_w = []
gradients_b = []
for i in range(len((rewards))):
totalReward = sum([rewards[t]*DISCOUNT**t for t in range(len(rewards[i:]))]) #discounted reward
## !! this is the line that I need help with
gradient = policy.backpropagation(states[i],totalReward*(probs2[i])) #what should be backpropagated through the network
## !!
##record gradients
gradients_w.append(gradient[0])
gradients_b.append(gradient[1])
#combine gradients and update the weights and biases
gradients_w = np.array(gradients_w,object)
gradients_b = np.array(gradients_b,object)
policy.weights += policy.learning_rate * np.flip(np.sum(gradients_w,0),0) #np.flip because the gradients are calculated backwards
policy.biases += policy.learning_rate * np.flip(np.sum(gradients_b,0),0)
#reset and record
env.reset()
if t%100==0:
print('t'+str(t),'r',sum(rewards))
Run Code Online (Sandbox Code Playgroud)
应该向后传递什么来计算梯度?我正在使用梯度上升,但我可以将其切换为下降。有些人将奖励函数定义为totalReward*log(probabilities)。这会使分数导数totalReward*(1/probs)或log(probs)还是其他什么?你使用像交叉熵这样的成本函数吗?我试过了
totalReward*np.log(probs)
totalReward*(1/probs)
totalReward*(probs**2)
totalReward*probs
probs = np.zeros(a_size)
probs[action] = 1
totalRewards*probs
Run Code Online (Sandbox Code Playgroud)
和其他几个。最后一个是唯一能够解决其中任何一个问题的方法,并且仅适用于 Cartpole。我已经在 Cartpole、Pendulum 和 MountainCar 上用梯度上升和下降测试了数千集的各种损失或得分函数。有时它会改善一小部分,但它永远不会解决它。我究竟做错了什么?
这是 RLPolicy 代码。它写得不好或伪编码,但我认为这不是问题,因为我用梯度检查多次检查它。但即使我可以将其缩小到神经网络或代码中其他地方的问题,它也会有所帮助。
#Neural Network
import numpy as np
import random, math, time, os
from matplotlib import pyplot as plt
def activation(x,function):
if function=='sigmoid':
return(1/(1+math.e**(-x))) #Sigmoid
if function=='relu':
x[x<0]=0
return(x)
if function=='tanh':
return(np.tanh(x.astype(float))) #tanh
if function=='softmax':
z = np.exp(np.array((x-max(x)),float))
y = np.sum(z)
return(z/y)
def activationDerivative(x,function):
if function=='sigmoid':
return(x*(1-x))
if function=='relu':
x[x<0]==0
x[x>0]==1
return(x)
if function=='tanh':
return(1-x**2)
if function=='softmax':
s = x.reshape(-1,1)
return(np.diagflat(s) - np.dot(s, s.T))
class NeuralNetwork():
def __init__ (self,layers,learning_rate,momentum,regularization,activations):
self.learning_rate = learning_rate
if (isinstance(layers[1],list)):
h = layers[1][:]
del layers[1]
for i in h:
layers.insert(-1,i)
self.layers = layers
self.weights = [2*np.random.rand(self.layers[i]*self.layers[i+1])-1 for i in range(len(self.layers)-1)]
self.biases = [2*np.random.rand(self.layers[i+1])-1 for i in range(len(self.layers)-1)]
self.weights = np.array(self.weights,object)
self.biases = np.array(self.biases,object)
self.activations = activations
def feedforward(self, input_array):
layer = input_array
neuron_outputs = [layer]
for i in range(len(self.layers)-1):
layer = np.tile(layer,self.layers[i+1])
layer = np.reshape(layer,[self.layers[i+1],self.layers[i]])
weights = np.reshape(self.weights[i],[self.layers[i+1],self.layers[i]])
layer = weights*layer
layer = np.sum(layer,1)#,self.layers[i+1]-1)
layer = layer+self.biases[i]
layer = activation(layer,self.activations[i])
neuron_outputs.append(np.array(layer,float))
return(neuron_outputs)
def neuronErrors(self,l,neurons,layerError,n_os):
if (l==len(self.layers)-2):
return(layerError)
totalErr = [] #total error
for e in range(len(layerError)): #-layers
e = e*self.layers[l+2]
a_ws = self.weights[l+1][e:e+self.layers[l+1]]
e = int(e/self.layers[l+2])
err = layerError[e]*a_ws #error
totalErr.append(err)
return(sum(totalErr))
def backpropagation(self,state,loss):
weights_gradient = [np.zeros(self.layers[i]*self.layers[i+1]) for i in range(len(self.layers)-1)]
biases_gradient = [np.zeros(self.layers[i+1]) for i in range(len(self.layers)-1)]
neuron_outputs = self.feedforward(state)
grad = self.individualBackpropagation(loss, neuron_outputs)
return(grad)
def individualBackpropagation(self, difference, neuron_outputs): #number of output
lr = self.learning_rate
n_os = neuron_outputs[:]
w_o = self.weights[:]
b_o = self.biases[:]
w_n = self.weights[:]
b_n = self.biases[:]
gradient_w = []
gradient_b = []
error = difference[:] #error for neurons
for l in range(len(self.layers)-2,-1,-1):
p_n = np.tile(n_os[l],self.layers[l+1]) #previous neuron
neurons = np.arange(self.layers[l+1])
error = (self.neuronErrors(l,neurons,error,n_os))
if not self.activations[l]=='softmax':
error = error*activationDerivative(neuron_outputs[l+1],self.activations[l])
else:
error = error @ activationDerivative(neuron_outputs[l+1],self.activations[l]) #because softmax derivative returns different dimensions
w_grad = np.repeat(error,self.layers[l]) #weights gradient
b_grad = np.ravel(error) #biases gradient
w_grad = w_grad*p_n
b_grad = b_grad
gradient_w.append(w_grad)
gradient_b.append(b_grad)
return(gradient_w,gradient_b)
Run Code Online (Sandbox Code Playgroud)
感谢您的回答,这是我在这里的第一个问题。
mprouveur 的答案是正确的一半,但我觉得我需要解释反向传播的正确内容。我在 ai.stackexchange.com 上的问题的答案是我是如何理解这一点的。反向传播的正确误差是采取行动的对数概率乘以目标奖励。这也可以计算为输出概率与零数组之间的交叉熵损失,所采取的操作为 1。由于交叉熵损失的导数,这将具有仅推动采取的行动更接近于一个。然后,总奖励的乘法使得更好的行动被推向更高的概率。因此,当标签是一个 one-hot 编码向量时,正确的方程是label/probs * totalReward
因为它是交叉熵损失的导数和概率对数的导数。我在其他代码中得到了这个工作,但即使使用这个方程,我也认为我的代码中的其他内容是错误的。这可能与我如何使softmax导数变得过于复杂有关,而不是通过结合交叉熵导数和softmax导数来计算通常的方法。我将很快用正确的代码和更多信息更新这个答案。