-
Notifications
You must be signed in to change notification settings - Fork 7
/
logistic_pen.m
40 lines (33 loc) · 1.23 KB
/
logistic_pen.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
%%%%
%Calculate log likelihood and derivatives with respect to weights
% Inputs:
% weights - (M+1) by 1 vector of weights, last element corresponds to bias (intercepts)
% data - N by M data matrix where each row corresponds to one data point
% targets - N by 1 vector of targets class probabilites
% parameters - structure with additional parameters
% Outputs:
% f - cross entropy
% df - (M+1) by 1 vector of derivatives
% frac_correct - fraction of correctly classified examples
%%%%
function [f, df, frac_correct] = logistic_pen(weights, data, targets, parameters)
%get the dimention of the output
[n,m] = size(targets);
%append a column of ones for w0
data = [ones(size(data,1),1), data];
%compute the prob of our classification
p = sigmoid(data*weights);
%compute the cross-entropy for where targets only takes value of 0 or 1
f = -sum(targets .* log(p) + (1 - targets) .* log(1 - p));
%compute the derivative
for k = 1:size(weights)
if(k == 1)
df(k,1) = sum((p-targets) .* data(:, k));
else
df(k,1) = sum((p-targets) .* data(:, k)) - parameters.weight_regularization * weights(k);
end
end
%compute the correctly predicted output
p = (p > 0.5);
frac_correct = sum(targets == p) / size(targets, 1);
end