function [score iter correct falseNeg falsePos unsures] = TestVectorMachine( weight, kernel, labels, bias )
%Test function for vector machien -- takes in a set of training data and
%its labels and tests a given set of alphas and a bias against it
[xn xm] = size(kernel);
[yn ym] = size(labels);
[an am] = size(weight);
%if(xn ~= am || ym ~= xn)
% display('Sorry, this is an idiot proof function. Try again!');
% return;
%end
falseNeg = 0;
falsePos = 0;
unsures = 0;
iter = 0;
for i = 1:xn
fXi = (weight .* labels) * kernel(i,:) + bias;
if (fXi * labels(i)) <= 0
if(fXi > 0)
falsePos = falsePos + 1;
end
if(fXi < 0)
falseNeg = falseNeg + 1;
end
if(fXi == 0)
unsures = unsures + 1;
end
iter = iter + 1;
end
end
score = (xn - iter) / xn * 100;
correct = xm - iter;
Thursday, April 28, 2011
Soft Margin one-norm SVM
function [ weights bias ] = TannSchmidVectorMachineSoftMarginUno( K, H, labels, C)
%Toggle details which kernel we use
[xm xn] = size(K);
[ym yn] = size(labels);
%scale C down
C = C / xm;
%check to make sure training & labels have same dimension and toggle is
%valid
if xm ~= ym
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
%allocate space for different parts
f = zeros(xm, 1);
A = zeros(2 * xm + 4, xm);
b = zeros(2 * xm + 4, 1);
%build constraints matrix
A(1,:) = labels';
A(2,:) = -labels';
A(3,:) = ones(1, xm);
A(4,:) = -ones(1, xm);
for i = 1:xm
A(i+4, i) = 1;
end
for i = 1:xm
A(i+4+xm, i) = -1;
end
b = [0; 0; 1; -1; (C - 10^(-7)) * ones(xm,1); zeros(xm, 1)];
[weights v] = quadprog(H, f, A, b);
%find the bias
bias = GetSoftBiasUno(weights, K, labels, C);
bias = bias / sqrt(weights' * H * weights);
save('recordedResults0', 'weights', 'bias', 'K');
%Toggle details which kernel we use
[xm xn] = size(K);
[ym yn] = size(labels);
%scale C down
C = C / xm;
%check to make sure training & labels have same dimension and toggle is
%valid
if xm ~= ym
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
%allocate space for different parts
f = zeros(xm, 1);
A = zeros(2 * xm + 4, xm);
b = zeros(2 * xm + 4, 1);
%build constraints matrix
A(1,:) = labels';
A(2,:) = -labels';
A(3,:) = ones(1, xm);
A(4,:) = -ones(1, xm);
for i = 1:xm
A(i+4, i) = 1;
end
for i = 1:xm
A(i+4+xm, i) = -1;
end
b = [0; 0; 1; -1; (C - 10^(-7)) * ones(xm,1); zeros(xm, 1)];
[weights v] = quadprog(H, f, A, b);
%find the bias
bias = GetSoftBiasUno(weights, K, labels, C);
bias = bias / sqrt(weights' * H * weights);
save('recordedResults0', 'weights', 'bias', 'K');
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Soft Margin 2-norm SVM
function [ weights bias ] = TannSchmidVectorMachineSoftMarginDos( K, H, labels, C)
%Toggle details which kernel we use
[xm xn] = size(K);
[ym yn] = size(labels);
%scale C down
C = C / xm;
H = (1/2) * (H + (1 / C) * eye(xm, xn));
%check to make sure training & labels have same dimension and toggle is
%valid
if xm ~= ym
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
%allocate space for different parts
f = zeros(xm, 1);
A = zeros(xm + 4, xm);
b = zeros(xm + 4, 1);
%build constraints matrix
A(1,:) = labels';
A(2,:) = -labels';
A(3,:) = ones(1, xm);
A(4,:) = -ones(1, xm);
for i = 1:xm
A(i+4, i) = -1;
end
b = [0; 0; 1; -1; zeros(xm, 1)];
[weights v] = quadprog(H, f, A, b);
%find the bias
bias = GetSoftBiasDos(weights, K, labels, C);
bias = bias / sqrt(weights' * H * weights);
save('recordedResults0', 'weights', 'bias', 'K');
%Toggle details which kernel we use
[xm xn] = size(K);
[ym yn] = size(labels);
%scale C down
C = C / xm;
H = (1/2) * (H + (1 / C) * eye(xm, xn));
%check to make sure training & labels have same dimension and toggle is
%valid
if xm ~= ym
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
%allocate space for different parts
f = zeros(xm, 1);
A = zeros(xm + 4, xm);
b = zeros(xm + 4, 1);
%build constraints matrix
A(1,:) = labels';
A(2,:) = -labels';
A(3,:) = ones(1, xm);
A(4,:) = -ones(1, xm);
for i = 1:xm
A(i+4, i) = -1;
end
b = [0; 0; 1; -1; zeros(xm, 1)];
[weights v] = quadprog(H, f, A, b);
%find the bias
bias = GetSoftBiasDos(weights, K, labels, C);
bias = bias / sqrt(weights' * H * weights);
save('recordedResults0', 'weights', 'bias', 'K');
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Hard Margin one-norm SVM
function [ weights bias ] = TannSchmidVectorMachineHardMarginUno( K, H, labels)
%Toggle details which kernel we use
[xm xn] = size(K);
[ym yn] = size(labels);
%check to make sure training & labels have same dimension and toggle is
%valid
if xm ~= ym
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
%allocate space for different parts
f = -ones(xm, 1);
A = zeros(xm +2, xm);
bias = zeros(xm +2, 1);
%build constraints matrix
A(1,:) = labels';
A(2,:) = -labels';
for i = 1:xm
A(i+2, i) = -1;
end
[weights v] = quadprog(H, f, A, bias);
%find the bias
bias = getHardMarginBias(weights, K, labels);
save('recordedResults0', 'weights', 'bias', 'K');
%Toggle details which kernel we use
[xm xn] = size(K);
[ym yn] = size(labels);
%check to make sure training & labels have same dimension and toggle is
%valid
if xm ~= ym
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
%allocate space for different parts
f = -ones(xm, 1);
A = zeros(xm +2, xm);
bias = zeros(xm +2, 1);
%build constraints matrix
A(1,:) = labels';
A(2,:) = -labels';
for i = 1:xm
A(i+2, i) = -1;
end
[weights v] = quadprog(H, f, A, bias);
%find the bias
bias = getHardMarginBias(weights, K, labels);
save('recordedResults0', 'weights', 'bias', 'K');
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Soft Margin one-norm Bias Calculator
function [ bias ] = GetSoftBiasUno( weights, kernel, labels, C)
[xm xn] = size(kernel);
counter = 0;
bias = 0;
for i = 1:xm
if weights(i) > (10^-10) && weights(i) < (C - 10^(-10)) %calculate first <w xi>
sgnLastY = labels(i) > 0;
partialSum = 0;
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%reset partial sum
wXi = partialSum;
partialSum = 0;
for j = i:xm
if weights(i) > (10^-10) && weights(i) < (C - 10^(-10)) && sgnLastY ~= (labels(j) > 0)
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%save second <w xj>
wXj = partialSum;
bias = bias + -(wXi + wXj) / 2;
counter = counter + 1;
end
end
end
end
bias = bias / counter;
[xm xn] = size(kernel);
counter = 0;
bias = 0;
for i = 1:xm
if weights(i) > (10^-10) && weights(i) < (C - 10^(-10)) %calculate first <w xi>
sgnLastY = labels(i) > 0;
partialSum = 0;
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%reset partial sum
wXi = partialSum;
partialSum = 0;
for j = i:xm
if weights(i) > (10^-10) && weights(i) < (C - 10^(-10)) && sgnLastY ~= (labels(j) > 0)
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%save second <w xj>
wXj = partialSum;
bias = bias + -(wXi + wXj) / 2;
counter = counter + 1;
end
end
end
end
bias = bias / counter;
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Soft-Margin 2-norm Bias Calculator
function [ bias ] = GetSoftBiasDos( weights, kernel, labels, C)
[xm xn] = size(kernel);
counter = 0;
bias = 0;
for i = 1:xm
if weights(i) > (10^-10) %calculate first <w xi>
sgnLastY = labels(i) > 0;
partialSum = 0;
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%reset partial sum
wXi = partialSum;
partialSum = 0;
for j = i:xm
if weights(i) > (10^-10) && sgnLastY ~= (labels(j) > 0)
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%save second <w xj>
wXj = partialSum;
bias = bias + -(wXi + wXj) / 2 - (labels(i) * weights(i) - labels(j) * weights(j)) / (2 * C);
counter = counter + 1;
end
end
end
end
bias = bias / counter;
[xm xn] = size(kernel);
counter = 0;
bias = 0;
for i = 1:xm
if weights(i) > (10^-10) %calculate first <w xi>
sgnLastY = labels(i) > 0;
partialSum = 0;
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%reset partial sum
wXi = partialSum;
partialSum = 0;
for j = i:xm
if weights(i) > (10^-10) && sgnLastY ~= (labels(j) > 0)
for j = 1:xm
partialSum = partialSum + labels(j) * kernel(i,j) * weights(j);
end
%save second <w xj>
wXj = partialSum;
bias = bias + -(wXi + wXj) / 2 - (labels(i) * weights(i) - labels(j) * weights(j)) / (2 * C);
counter = counter + 1;
end
end
end
end
bias = bias / counter;
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Hard Margin Bias Calculator
function [ bias ] = getHardMarginBias(weights, kernel, labels)
%returns the bias
[xm xn] = size(kernel);
counter = 0;
bias = 0;
for i = 1:xm
if weights(i, 1) > 0.000000000001
partialSum = 0;
for j = 1:xm
partialSum = partialSum + label(j) * kernel(i,j) * weights(j);
end
bias = bias + labels(i) - partialSum;
counter = counter + 1;
end
end
bias = bias / counter;
end
%returns the bias
[xm xn] = size(kernel);
counter = 0;
bias = 0;
for i = 1:xm
if weights(i, 1) > 0.000000000001
partialSum = 0;
for j = 1:xm
partialSum = partialSum + label(j) * kernel(i,j) * weights(j);
end
bias = bias + labels(i) - partialSum;
counter = counter + 1;
end
end
bias = bias / counter;
end
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Gaussian Kernel
function [ result ] = GaussKernel( x, y, sigma )
result = norm(x - y)^2;
result = result / sigma;
result = exp(-result);
end
result = norm(x - y)^2;
result = result / sigma;
result = exp(-result);
end
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Default Kernel
function [ result ] = defaultKernel(x, y, A)
%One of many kernel functions. Takes vectors x and y, returns kernel
%function as a dot product using a positive definite matrix A
[xm xn] = size(x);
[ym yn] = size(y);
[R isPosDef] = chol(A);
if isPosDef ~= 0 || xm ~= ym || xn ~= yn || xn == 1
disp('sorry, this function is idiot proof. Please enter in a positive definite matrix A');
result = -1;
return;
end
result = x * (A * y');
%One of many kernel functions. Takes vectors x and y, returns kernel
%function as a dot product using a positive definite matrix A
[xm xn] = size(x);
[ym yn] = size(y);
[R isPosDef] = chol(A);
if isPosDef ~= 0 || xm ~= ym || xn ~= yn || xn == 1
disp('sorry, this function is idiot proof. Please enter in a positive definite matrix A');
result = -1;
return;
end
result = x * (A * y');
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Kernel Creator
function [ K H ] = KernelKreator( training, labels, scale, toggle)
[xm xn] = size(training);
[ym yn] = size(labels);
if xm ~= ym || toggle < 0
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
K = zeros(xm, xm);
H = zeros(xm, xm);
iter = 0;
%build kernel based on toggle used
if toggle == 0 %use regular dot product
for i = 1:xm
for j = i:xm
K(i,j) = (defaultKernel(training(i, :), training(j, :), eye(xn))) / scale;
K(j, i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
iter = iter + 1;
end
end
%put other toggles here for other kernels
elseif toggle == 1
for i = 1:xm
for j = i:xm
K(i,j) = (defaultKernel(training(i, :), training(j, :), eye(xn))) / scale;
K(i,j) = (K(i,j) + 1)^2;
K(j, i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
end
end
elseif toggle == 2
for i = 1:xm
for j = i:xm
K(i,j) = (defaultKernel(training(i, :), training(j, :), eye(xn))) / scale;
K(i,j) = (K(i,j) + 1)^3;
K(j, i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
end
end
elseif toggle == 3
for i = 1:xm
for j = i:xm
K(i,j) = GaussKernel(training(i, :), training(j, :), scale);
K(j,i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
end
end
end
[xm xn] = size(training);
[ym yn] = size(labels);
if xm ~= ym || toggle < 0
display('Sorry, this is an idiot proof function. Try feeding in valid parameters next time, doof!');
return;
end
K = zeros(xm, xm);
H = zeros(xm, xm);
iter = 0;
%build kernel based on toggle used
if toggle == 0 %use regular dot product
for i = 1:xm
for j = i:xm
K(i,j) = (defaultKernel(training(i, :), training(j, :), eye(xn))) / scale;
K(j, i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
iter = iter + 1;
end
end
%put other toggles here for other kernels
elseif toggle == 1
for i = 1:xm
for j = i:xm
K(i,j) = (defaultKernel(training(i, :), training(j, :), eye(xn))) / scale;
K(i,j) = (K(i,j) + 1)^2;
K(j, i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
end
end
elseif toggle == 2
for i = 1:xm
for j = i:xm
K(i,j) = (defaultKernel(training(i, :), training(j, :), eye(xn))) / scale;
K(i,j) = (K(i,j) + 1)^3;
K(j, i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
end
end
elseif toggle == 3
for i = 1:xm
for j = i:xm
K(i,j) = GaussKernel(training(i, :), training(j, :), scale);
K(j,i) = K(i,j);
H(i,j) = (K(i,j) * labels(i) * labels(j));
H(j,i) = H(i,j);
end
end
end
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Support Vector Machines
For our Applied Topics in Mathematics class we had to code up some basic versions of support vector machines. One of my classmates and I coded the following 3: A hard margin, one-margin maximal weight SVM and 2 soft-margin maximal margin SVMs (one-norm & two-norm versions). The next few posts will be the MATLAB code of those machines. Feel free to comment on them and offer any suggestions where appropriate.
Labels:
Computer Science,
CS,
Mathematics,
MATLAB
Subscribe to:
Comments (Atom)