-
Notifications
You must be signed in to change notification settings - Fork 9
/
neuron.js
184 lines (168 loc) · 5.98 KB
/
neuron.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
(function() {
'use strict';
const activations = require('./activations');
function Neuron() {
this.activation = null;
this.weights = null,
this.bias = null,
this.weightedInput = null,
this.error = null;
this.gradientsW = null;
this.gradientB = null;
this.output = null;
}
/**
* @description
* Initialise the neuron's type of activation function. If passed type is
* not supported, will default to the SIGMOID function (see activation.js
* for supported activation function types)
*
* @param {String} activation The name of the activation fn (eg. SIGMOID)
*/
Neuron.prototype.initialiseActivationFn = function(activation='SIGMOID') {
this.activation = activations[activation.toUpperCase()];
};
/**
* @description
* Initialise the weights of the neuron.
*
* The weights of a neuron are just random numbers between 0 and 1. Each weight
* is associated to an input in the learning algorithm, which is why the number
* of weights corresponding to a neuron equals the number of its inputs.
*
* @param {Number} numInputs The number of inputs of the neuron
*/
Neuron.prototype.initialiseWeights = function(numInputs, values) {
if(values && values.length) {
this.weights = values;
return;
}
this.weights = [];
for(let i=0; i<numInputs; i++) {
this.weights.push(Math.random());
}
};
/**
* @description
* Initialise the bias of the neuron
*
* The bias is just a random number between 0 and 1
*/
Neuron.prototype.initialiseBias = function(value) {
this.bias = value || Math.random();
};
/**
* @description
* Initialise the error in the neuron
*
* In backpropagation, the error (or delta in academic literature) in a neuron
* is used to compute the gradient (change) of the cost function with respect
* to that neuron's weight/bias
*/
Neuron.prototype.initialiseError = function() {
this.error = 0;
};
/**
* @description
* Compute the weighted input of a neuron.
*
* The weighted input of a neuron n is the sum of products of each of the neuron's
* inputs and its corresponding weight added with the bias of the neuron.
*
* For example, for a neuron with inputs x1, x2, x3 and their corresponding
* weights w1, w2, w3 and with bias b, the weighted input will be:
*
* WI(n) = (x1*w1 + x2*w2 + x3*w3) + b
*
* It is good to note here, that for neurons in the hidden and output layers
* the inputs are represented by the output of each neuron in the previous layer
* (see http://i.stack.imgur.com/76Kuo.png for a good visualization of this concept)
*
* @param {Object} prevLayer The previous layer
*/
Neuron.prototype.updateWeightedInput = function(prevLayer) {
this.weightedInput = this.bias;
prevLayer.nodes.forEach((prevNode, index) =>
this.weightedInput += prevNode.output * this.weights[index]
);
};
/**
* @description
* Compute the output of a neuron
*
* The output of a neuron is computed by applying the cost function to the
* weighted input of the neuron.
*
* Currently, only the sigmoid function is supported as an activation function of a
* neuron. More soon :)
*/
Neuron.prototype.updateOutput = function() {
this.output = this.activation.fn(this.weightedInput);
};
/**
* @description
* Set the error for a neuron in the output layer
*
* This function is based on the first equation of the backpropagation algorithm
* (see http://neuralnetworksanddeeplearning.com/chap2.html for details on the
* four backpropagation equations) and should only be applied for neurons in the
* output layer
*
* @param {Number} expectedOutput The expectedOutput
*/
Neuron.prototype.updateOutputError = function(expectedOutput) {
this.error = (this.output - expectedOutput) * this.activation.fnPrime(this.weightedInput);
};
/**
* @description
* Set the error for a neuron in the hidden layer
*
* This function is based on the second equation of the backpropagation algorithm
* (see http://neuralnetworksanddeeplearning.com/chap2.html for details on the
* four backpropagation equations) and should only be applied for neurons in the
* hidden layers.
*
* @param {Number} crrNodeIndex The index of the current neuron
* @param {Object} nextLayer The next layer
*/
Neuron.prototype.updateError = function(crrNodeIndex, nextLayer) {
nextLayer.nodes.forEach((nextNode) =>
this.error += nextNode.error * nextNode.weights[crrNodeIndex]
);
this.error *= this.activation.fnPrime(this.weightedInput);
}
/**
* @description
* Update the bias of a neuron using the gradient descent update rule in terms
* of the bias component. (see https://en.wikipedia.org/wiki/Gradient_descent
* for details about gradient descent in general and
* http://neuralnetworksanddeeplearning.com/chap1.html#learning_with_gradient_descent
* for gradient descent applied in NN)
*
* @param {Number} learnignRate The learning rate
*
*/
Neuron.prototype.updateBias = function(learningRate) {
this.bias -= learningRate * this.error; // / batchSize;??
}
/**
* @description
* Update the weights of a neuron using the gradient descent update rule in terms
* of the weight component. (see https://en.wikipedia.org/wiki/Gradient_descent
* for details about gradient descent in general and
* http://neuralnetworksanddeeplearning.com/chap1.html#learning_with_gradient_descent
* for gradient descent applied in NN)
*
* @param {Number} learnignRate The learning rate
* @param {Object} prevLayer The previous layer
*/
Neuron.prototype.updateWeights = function(learningRate, prevLayer) {
this.gradientsW = [];
prevLayer.nodes.forEach((prevNode, index) => {
let crrGradient = prevNode.output * this.error;
this.gradientsW.push(crrGradient);
this.weights[index] -= learningRate * crrGradient;
});
}
module.exports = Neuron;
})();