-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbackprop.m
76 lines (70 loc) · 2.09 KB
/
backprop.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
function [W,b,mse] = backprop(p, t, L, s, alpha, epochs, output)
% p = input, t = target, L = number of layers, s = number of neurons
%alpha = learning_rate, epochs = # of epochs to propagate,
%output = number_of_neurons in the output layer
[input_length, input_samples] = size(p);
% store all matricies
W = cell(L,1); %weight
b = cell(L,1); %bias
n = cell(L,1); %output before transfer function
a = cell(L,1); %output for neuron
S = cell(L,1); %sensitivity
%generate Weights and biases
for m = 1:L
if (m == 1)
%calculate the weight, bias, n for first hidden layer
[W{m},b{m}] = generate_weight_bias(s,input_length);
elseif (m == L)
% calculate the weight, bias, n for the last hidden layer
[W{m},b{m}] = generate_weight_bias(output,s);
else
%calculate the weights, and biases between hidden layers
[W{m},b{m}] = generate_weight_bias(s,s);
end
end
tic
%calculate and propagate sensitivites backwards
x = 1;
mse = zeros(epochs,1);
while x < epochs+1
%feedforward
label = zeros(output,1);
mse(x) = 0;
for i = 1:input_samples
for m = 1:L
if (m == 1)
n{m} = W{m}*p(:,i)+b{m};
else
n{m} = W{m}*a{m-1}+b{m};
end
a{m} = logsig(n{m}); %calculate the output for each layer
end
end
%convert expected output to vector
label(t(i)+1) = 1;
%propagate sensitivities backwards
for m = L:-1:1
diff_sig = diag((1-a{m}).*a{m});
if (m == L)
mse(x) = mse(x) + sum((label-a{m}).^2)/output;
S{m} = -2*diff_sig*(label-a{m});
else
S{m} = diff_sig*W{m+1}'*S{m+1};
end
end
label (t(i)+1) = 0;
%update weights and biases
for m = 1:L
if (m == 1)
W{m} = W{m}-alpha*S{m}*p(:,i)';
else
W{m} = W{m}-alpha*S{m}*a{m-1}';
end
b{m} = b{m}-alpha*S{m};
end
end
mse(x) = mse(x)/input_samples;
x=x+1;
end
toc
end