Javascript中的神经网络无法正常学习

Łuk*_*.pl 13 javascript artificial-intelligence backpropagation neural-network

我试图重写这里发现的神经网络到javascript.我的javascript代码看起来像这样.

function NeuralFactor(weight) {
    var self = this;
    this.weight = weight;
    this.delta =  0;
}

function Sigmoid(value) {
    return 1 / (1 + Math.exp(-value));
}

function Neuron(isInput) {
    var self = this;
    this.pulse = function() {
        self.output = 0;
        self.input.forEach(function(item) {
            self.output += item.signal.output * item.factor.weight;
        });

        self.output += self.bias.weight;
        self.output = Sigmoid(self.output);
    };

    this.bias = new NeuralFactor(isInput ? 0 : Math.random());
    this.error = 0;
    this.input = [];
    this.output = 0;

    this.findInput = function(signal) {
        var input = self.input.filter(function(input) {
            return signal == input.signal;
        })[0];
        return input;
    };
}

function NeuralLayer() {
    var self = this;
    this.pulse = function() {
        self.neurons.forEach(function(neuron) {
            neuron.pulse();
        });
    };
    this.neurons = [];
    this.train = function(learningRate) {
        self.neurons.forEach(function(neuron) {
            neuron.bias.weight += neuron.bias.delta * learningRate;
            neuron.bias.delta = 0;
            neuron.input.forEach(function(input) {
                input.factor.weight += input.factor.delta * learningRate;
                input.factor.delta = 0;
            })
        })
    }
}

function NeuralNet(inputCount, hiddenCount, outputCount) {
    var self = this;
    this.inputLayer = new NeuralLayer();
    this.hiddenLayer = new NeuralLayer();
    this.outputLayer = new NeuralLayer();
    this.learningRate = 0.5;

    for(var i = 0; i < inputCount; i++)
        self.inputLayer.neurons.push(new Neuron(true));

    for(var i = 0; i < hiddenCount; i++)
        self.hiddenLayer.neurons.push(new Neuron());

    for(var i = 0; i < outputCount; i++)
        self.outputLayer.neurons.push(new Neuron());

    for (var i = 0; i < hiddenCount; i++)
        for (var j = 0; j < inputCount; j++)
            self.hiddenLayer.neurons[i].input.push({
                signal: self.inputLayer.neurons[j],
                factor: new NeuralFactor(Math.random())
            });

    for (var i = 0; i < outputCount; i++)
        for (var j = 0; j < hiddenCount; j++)
            self.outputLayer.neurons[i].input.push({
                signal: self.hiddenLayer.neurons[j],
                factor: new NeuralFactor(Math.random())
            });

    this.pulse = function() {
        self.hiddenLayer.pulse();
        self.outputLayer.pulse();
    };

    this.backPropagation = function(desiredResults) {
        for(var i = 0; i < self.outputLayer.neurons.length; i++) {
            var outputNeuron = self.outputLayer.neurons[i];
            var output = outputNeuron.output;
            outputNeuron.error = (desiredResults[i] - output) * output * (1.0 - output);
        }
        for(var i = 0; i < self.hiddenLayer.neurons.length; i++) {
            var hiddenNeuron = self.hiddenLayer.neurons[i];
            var error = 0;
            for(var j = 0; j < self.outputLayer.neurons.length; j++) {
                var outputNeuron = self.outputLayer.neurons[j];
                error += outputNeuron.error * outputNeuron.findInput(hiddenNeuron).factor.weight * hiddenNeuron.output * (1.0 - hiddenNeuron.output);
            }
            hiddenNeuron.error = error;
        }
        for(var j = 0; j < self.outputLayer.neurons.length; j++) {
            var outputNeuron = self.outputLayer.neurons[j];
            for(var i = 0; i < self.hiddenLayer.neurons.length; i++) {
                var hiddenNeuron = self.hiddenLayer.neurons[i];
                outputNeuron.findInput(hiddenNeuron).factor.delta += outputNeuron.error * hiddenNeuron.output;
            }
            outputNeuron.bias.delta += outputNeuron.error * outputNeuron.bias.weight;
        }
        for(var j = 0; j < self.hiddenLayer.neurons.length; j++) {
            var hiddenNeuron = self.hiddenLayer.neurons[j];
            for(var i = 0; i < self.inputLayer.neurons.length; i++) {
                var inputNeuron = self.inputLayer.neurons[i];
                hiddenNeuron.findInput(inputNeuron).factor.delta += hiddenNeuron.error * inputNeuron.output;
            }
            hiddenNeuron.bias.delta += hiddenNeuron.error * hiddenNeuron.bias.weight;
        }
    };
    this.train = function(input, desiredResults) {
        for(var i = 0; i < self.inputLayer.neurons.length; i++) {
            var neuron = self.inputLayer.neurons[i];
            neuron.output = input[i];
        }

        self.pulse();
        self.backPropagation(desiredResults);

        self.hiddenLayer.train(self.learningRate);
        self.outputLayer.train(self.learningRate);
    };

}
Run Code Online (Sandbox Code Playgroud)

现在我正在尝试学习如何解决XOR问题.我这样教它:

var net = new NeuralNet(2,2,1);

var testInputs = [[0,0], [0,1], [1,0], [1,1]];
var testOutputs = [[1],[0],[0],[1]];

for (var i = 0; i < 1000; i++)
    for(var j = 0; j < 4; j++)
        net.train(testInputs[j], testOutputs[j]);

function UseNet(a, b) {
    net.inputLayer.neurons[0].output = a;
    net.inputLayer.neurons[1].output = b;
    net.pulse();

    return net.outputLayer.neurons[0].output;
}
Run Code Online (Sandbox Code Playgroud)

问题是,无论我使用什么参数,我得到的所有结果都接近0.5并且非常随机.例如:

UseNet(0,0) => 0.5107701166677714
UseNet(0,1) => 0.4801498747476413
UseNet(1,0) => 0.5142463167153447
UseNet(1,1) => 0.4881829364416052
Run Code Online (Sandbox Code Playgroud)

我的代码有什么问题?

Jor*_*ray 2

这个网络对于异或问题来说足够大,而且我看不到任何明显的错误,所以我怀疑它陷入了局部最小值。

\n\n

尝试对训练集进行 10,000 次而不是 1000 次;这使它更有可能突破最小值并收敛。您还可以通过增加隐藏神经元的数量、调整 \xce\xb7 (学习率)或增加动量来大幅提高收敛性。要实现后者,请尝试使用它作为您的训练函数:

\n\n
this.train = function(learningRate) {\n    var momentum = 0 /* Some value, probably fairly small. */;\n    self.neurons.forEach(function(neuron) {\n        neuron.bias.weight += neuron.bias.delta * learningRate;\n        neuron.bias.delta = 0;\n        neuron.input.forEach(function(input) {\n            input.factor.weight += (input.factor.delta * learningRate) + (input.factor.weight * momentum);\n            input.factor.delta = 0;\n        })\n    })\n}\n
Run Code Online (Sandbox Code Playgroud)\n\n

我将学习率更改为 1.5(相当高)并将动量更改为 0.000001(相当小),取得了良好的结果。

\n\n

(顺便说一句,您是否尝试过使用一些不同的种子来运行 .NET 实现?它也可能需要相当长的时间才能收敛!)

\n