-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathLRtemplate.m
55 lines (46 loc) · 1.63 KB
/
LRtemplate.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
%%
clear all;
close all;
%% load data
load('spiral1_test.mat');
load('spiral1_train.mat');
[n,m] = size(inputs_train);
%learning rate
parameters.learning_rate = 0.001
%weight regularization parameter
parameters.weight_regularization = 0.5
%number of iterations
parameters.num_iterations = 200
%logistics regression weights
weights = -ones(n+1, 1);
% weights = -rand(n+1, 1);
%% verify that your logistic function produces the right gradient, diff should be very close to 0
%this creates small random data with 20 examples and 10 dimensions and checks the gradient on
%that data
nexamples = 20;
ndimensions = 10;
diff = checkgrad('logistic', ...
randn((ndimensions + 1), 1), ... % weights
0.001,... % perturbation
randn(nexamples, ndimensions), ... % data
rand(nexamples, 1), ... % targets
parameters)
% begin learning with gradient descent
for t = 1:parameters.num_iterations
% find the negative log likelihood and derivative w.r.t. weights
[f, df, frac_correct_train] = logistic(weights, inputs_train', target_train(1,:)', parameters);
% find the fraction of correctly classified validation examples
[temp, temp2, frac_correct_valid] = logistic(weights, inputs_test', target_test(1,:)', parameters);
%
if isnan(f) || isinf(f)
error('nan/inf error');
end
% update parameters
weights = weights - parameters.learning_rate .* df;
% print some stats
fprintf(1, 'ITERATION:%4i LOGL:%4.2f TRAIN FRAC:%2.2f VALID FRAC:%2.2f\n',...
t, f, frac_correct_train*100, frac_correct_valid*100);
% plot the log-likelihood vs iteration
hold on;
plot(t, f, 'b*');
end