diff --git a/CalculateOSPAMetric.m b/CalculateOSPAMetric.m new file mode 100644 index 0000000..a79c257 --- /dev/null +++ b/CalculateOSPAMetric.m @@ -0,0 +1,99 @@ +%CalculatePerformanceMetric +%Last modified 21 November 2013 +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This is an implementation of the Optimal Subpattern Assignment +%(OSPA) metric proposed by Schuhmacher et al in +%Schuhmacher, D.; Ba-Tuong Vo; Ba-Ngu Vo, "A Consistent Metric for Performance Evaluation of Multi-Object Filters," Signal Processing, IEEE Transactions on , vol.56, no.8, pp.3447,3457, Aug. 2008 +%X is the estimated state in the form [ [x1; y1; vx1; vy1], [x2; y2; vx2; +%vy2] , ...] +%Y is the ground truth in the form [ [x1; y1; vx1; vy1], [x2; y2; vx2; +%vy2] , ...] +%This isn't actually important, as we will swap the labels so that X +%is the label of the shorter vector and Y is the label of the longer. +%cutoff_c and order_p are parameters that control the metric calculation; +%cutoff is a saturation threshold, order controls how punishing it is to +%larger errors versus smaller ones. See the paper by Schuhmacher et al to +%get a handle on these in more detail. +%NOTE: This implementation is still a work in progress and is a bit buggy. Use with caution. +%NOTE: We only use 2D OSPA, for position; we don't use the velocities. +function ospa = CalculateOSPAMetric(X, Y, cutoff_c, order_p) + + m = size(X, 2);%Length of vector X + n = size(Y, 2);%Length of vector Y + + alphas = cutoff_c * ones(1, n);%Initialise to cutoff, overwrite if there is a shorter value + bestOMATCost = -1; + bestOMATDataAssoc_i = []; + + %m (i.e. the length of X) needs to be less than or equal to n (the length of Y) + if(m > n)%Swap them if this is not the case. X and Y are just labels that can be applied to either vector; whichever one is estimate or truth is not important. + tmpX = X; + tmpm = m; + X = Y; + m = n; + Y = tmpX; + n = tmpm; + end + if(m > 0)%If there are other values, we need to find the best data association. + %We calculate all potential combinations (ie. subsampling without + %replacement) + comboSize = m; + valuesSampled = 1:n; + allCombos = combnk(valuesSampled, comboSize); + nCombos = size(allCombos, 1); + + %Then for each combination, we calculate every permutation (i.e. + %different ways to order the numbers) to find all possible data + %associations + for i = 1:nCombos + thisCombo = allCombos(i,:);%The combination we are using + allDataAssocs = perms(thisCombo); + nDataAssocs = size(allDataAssocs, 1); + %We check all the data associations for this combination + for j = 1:nDataAssocs + thisDataAssoc = allDataAssocs(j,:);%An ordered list of the indices of Y to match them against the values in X + thisY = Y(:,thisDataAssoc); + thisOMATCost = CalculateOMATMetric(X, thisY, order_p); + + if(bestOMATCost < 0) || (thisOMATCost < bestOMATCost) %If this is the first iteration, or the best so far, save + bestOMATCost = thisOMATCost; + bestOMATDataAssoc_i = thisDataAssoc; + end + end + end + + %Now that we have the best data association, we use it to calculate + %alphas for the matched points + for i = 1:m + thisX = X(:,i); + thisY = Y(:,bestOMATDataAssoc_i(i));%Use the data association array to pull out the corresponding value in Y + alphas(i) = min(cutoff_c, norm(thisX - thisY) ^ order_p);%New alpha is either the distance or the cutoff, whichever is lower. + end + %Add cutoff for unmatched targets + for i = 1:(n - m) + alphas(m+i) = cutoff_c; + end + + end + alphasSum = sum(alphas) / n; + ospa = alphasSum ^ (1/order_p); +end + +%We assume that the length of X = the length of Y, since we only use this +%for finding the best data association for the OSPA metric. +%X and Y are 2d arrays [[x1; y1], [x2; y2] ...[xN; yN]] +%order_p is an integer greater than zero +function omat = CalculateOMATMetric(X, Y, order_p) + m = size(X, 2); + omat = 0; + for i = 1:m + thisX = X(:,i); + thisY = Y(:,i); + dx = thisX(1) - thisY(1); + dy = thisX(2) - thisY(2); + omat = omat + hypot(dx, dy) ^ order_p; + end + omat = omat / m; + omat = omat ^ (1/order_p);%Since length of X = length of Y, the OMAT distance simplifies to this +end \ No newline at end of file diff --git a/Calculate_Jacobian_H.m b/Calculate_Jacobian_H.m new file mode 100644 index 0000000..bf9c788 --- /dev/null +++ b/Calculate_Jacobian_H.m @@ -0,0 +1,57 @@ +%% Calculate_Jacobian_H +%Calculates the Jacobian of a range-bearing measurement model +%xS and yS are the position of the sensor +%xL, yL, vxL, vyL are the state of the landmark (position and velocities) +%Velocities are not used. +%This function is re-implemented as a one-line anonymous function in +%GM_EKF_PHD_Initialise_Jacobians but I kept this here in case you want more +%info on how it was calculated, or want to change it. +function H = Calculate_Jacobian_H(xS, yS, xL, yL, vxL, vyL) +%H = [d(f_r)/dX; d(f_theta)/dX ] +%H = [d(f_r) / dx d(f_r) / dy d(f_r) / dvx d(f_r) / dvy] +% [d(f_theta) / dx d(f_theta) / dy d(f_theta)/ dvx d(f_theta) / dvy] +%f_r = (delta_x^2 + delta_y^2)^1/2 = ((xL - xR)^2 + (yL - yR)^2)^1/2 +%f_theta = atan(delta_y/delta_x) +%d(f_r) / dx = (xL - xR) / range (where range = hypot(xL - xR, yL - yR)) +%d(f_r) / dy = (yL - yR) / range +%d(f_theta) / dx = (yR - yL) / range^2 +%d(f_theta) / dy = (xL - xR) / range^2 + +% We use wolfram alpha to differentiate things. +% "Derivative of ((x - a)^2 + (y - b)^2)^1/2 with respect to x" gives +% (x - a) / ((x - a)^2 + (y - b)^2)^1/2 +% "Derivative of ((x - a)^2 + (y - b)^2)^1/2 with respect to y" gives +% (y - b) / ((x - a)^2 + (y - b)^2)^1/2 +%"Derivative of atan2((y-a),(x-b)) with respect to x" gives +%(a - y) / ((x - a)^2 + (y - b)^2) <--------- NOTE that this is (a - y) on +%numerator, not (y - a) +%"Derivative of atan2((y-a),(x-b)) with respect to y" gives +%(x - b) / ((x - a)^2 + (y - b)^2) + +range = hypot(xL - xS, yL - yS); +delta_x = xL - xS; +delta_y = yL - yS; + +dfr_dx = delta_x / range; +dfr_dy = delta_y / range; +dfr_dvx = 0; +dfr_dvy = 0; + +dftheta_dx = -delta_y / range^2; +dftheta_dy = delta_x / range^2; +dftheta_dvx = 0; +dftheta_dvy = 0; + +dfvx_dx = 0; +dfvx_dy = 0; +dfvx_dvx = 1; +dfvx_dvy = 0; + +dfvy_dx = 0; +dfvy_dy = 0; +dfvy_dvx = 0; +dfvy_dvy = 1; + +H = [ [dfr_dx, dfr_dy, dfr_dvx, dfr_dvy]; [dftheta_dx, dftheta_dy, dftheta_dvx, dftheta_dvy]; [dfvx_dx, dfvx_dy, dfvx_dvx, dfvx_dvy ]; [dfvy_dx, dfvy_dy, dfvy_dvx, dfvy_dvy] ]; + +end \ No newline at end of file diff --git a/ConvertPlusMinusPi.m b/ConvertPlusMinusPi.m new file mode 100644 index 0000000..591f6de --- /dev/null +++ b/ConvertPlusMinusPi.m @@ -0,0 +1,8 @@ +function theta = ConvertPlusMinusPi(theta) + while(theta > pi) + theta = theta - 2 * pi; + end + while(theta < -pi) + theta = theta + 2 * pi; + end +end \ No newline at end of file diff --git a/GM_EKF_PHD_Construct_Update_Components.m b/GM_EKF_PHD_Construct_Update_Components.m new file mode 100644 index 0000000..d164b15 --- /dev/null +++ b/GM_EKF_PHD_Construct_Update_Components.m @@ -0,0 +1,62 @@ +%% GM_EKF_PHD_Construct_Update_Components +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file creates the components needed for performing an extended Kalman filter update on the +%targets using the measurement. +s = sprintf('Step 3: Constructing update components for all targets, new and existing.'); +disp(s); + +%We need to clear the data structures each iteration +eta = []; +S = []; +K = []; +P_k_k = []; + +xR = x_sensor(1); +yR = x_sensor(2); +hR = x_sensor(3); +R_polar = calculate_R_polar(sigma_r, sigma_theta);%Sensor covariance, polar + +for j = 1:numTargets_Jk_k_minus_1 + + m_j = mk_k_minus_1(:,j); + eta_j = zeros(4,1);%Expected observation + eta_j(1:2) = h(xR,yR,hR,m_j(1), m_j(2));%Observation model. + eta_j(3:4) = [m_j(3); m_j(4)];%Assume constant velocity. + P_range = calculateDataRange4(j); %4x4 array + + H = calculate_Jacobian_H(xR, yR, m_j(1), m_j(2)); + + PHt = Pk_k_minus_1(:,P_range) * H'; %Taken from Tim Bailey's EKF code. 4x2 array + + r = eta_j(1); + theta = eta_j(2); + %We need to extend the sensor covariance R to include the covariance of + %the speed estimation. See GM_PHD_Initialise_Jacobians for details on + %this calculation. + R_cartesian = calculate_R_cartesian(R_polar, r, theta, hR);%sensor covariance, cartesian + R_velocity = calculate_R_velocity_cartesian(Pk_k_minus_1(1:2,P_range(1:2)), R_cartesian, dt);%We need the landmark position uncertainty, the sensor uncertainty (in cartesian space) and the time delta + R = [ [R_polar, zeros(2,2)]; [zeros(2,2), R_velocity] ]; + + %Calculate K via Tim Bailey's method. + S_j = U * R * U' + H * PHt; %U is set in GM_PHD_Initialise_Jacobians. + + %At this point, Tim Bailey's code makes S_j symmetric. In this case, it leads to the matrix being non-positive definite a lot of the time and chol crashes. + %So we won't do that. + SChol= chol(S_j); + + SCholInv = SChol \ eye(size(SChol)); % triangular matrix, invert via left division + W1 = PHt * SCholInv; + + HPHt = H * PHt; + + K_j = W1 * SCholInv'; + + P_j = Pk_k_minus_1(:,P_range) - W1*W1';%4x4 array + %End Tim Bailey's code. + + eta = [eta, eta_j]; + S = [S, S_j]; + K = [K, K_j]; + P_k_k = [P_k_k, P_j]; +end \ No newline at end of file diff --git a/GM_EKF_PHD_Create_Birth.m b/GM_EKF_PHD_Create_Birth.m new file mode 100644 index 0000000..bf20373 --- /dev/null +++ b/GM_EKF_PHD_Create_Birth.m @@ -0,0 +1,156 @@ +%GM_EKF_PHD_Create_Birth +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs the processing on measurements to extract new targets +%and populates the birth lists (mean, weight, covariance) needed to instantiate them next iteration. + +%This is not formally spelt out in Vo&Ma so I have used my best judgement to +%come up with a method to initialise targets. Targets are initialised as +%static (using the position of current measurements) and/or dynamic (using +%two generations of measurements and calculating the movement between). +%If only static targets are added, the number of new targets = the number +%of measurements. +%If only dynamic targets are added, the number of new targets = the number of +%measurements this teration * the number of measurements last iteration +%If both static and dynamic targets are added, the number of new targets is +%equal to the sum of these. +disp('Step 7: Creating new targets from measurements, for birthing next iteration'); +w_birth = []; +m_birth = []; +P_birth = []; +w_spawn = []; +m_spawn = []; +P_spawn = []; +numBirthedTargets = 0; +numSpawnedTargets = 0; + +%We create targets using two generations of measurements. +%The first generation is used to calculate the velocity (dx/dt) to the +%second generation. +%The second generation gives the position. +%We also add a set of targets from the second generation but with velocity +%zero, since some may be unlinked to the first generation, but we have no +%velocity information for them. +if((addVelocityForNewTargets == true) && (k >= 2))%If we want to add targets with initial velocities.If only one iteration complete, cannot calculate velocity + %Each measurement consists of 2 rows + thisMeasRowRange = k; + prevMeasRowRange = k-1; + + thisMeas = simMeasurementHistory{thisMeasRowRange}; + prevMeas = simMeasurementHistory{prevMeasRowRange}; + + for j_this = 1:size(thisMeas,2) + for j_prev = 1:1:size(prevMeas,2)%Match every pair from previous to current + z_this = thisMeas(:,j_this); + z_prev = prevMeas(:, j_prev); + %Calculate and add the velocity. + m_i = inv_h(z_this(1), z_this(2), x_sensor(1), x_sensor(2), x_sensor(3)); + thisV = (m_i(1:2) - inv_h(z_prev(1), z_prev(2), x_sensor(1), x_sensor(2), x_sensor(3))) / dt; + if(abs(thisV(1)) > MAX_V) || (abs(thisV(2)) > MAX_V) + continue;%To reduce the number of targets added, we filter out the targets with unacceptable velocities. + end + %Augment with velocity + m_i = [m_i; thisV]; + + %Decide if the target is birthed (from birth position) + %or spawned (from an existing target) + %Initialise the weight to birth + birthWeight = birth_intensity(m_i); + %Targets can also spawn from existing targets. We will + %take whichever is a higher weight - birthing or + %spawning + nTargets = size(X_k, 2); + maxSpawnWeight = -1; + for targetI = 1:nTargets + thisWeight = spawn_intensity(m_i, X_k(:,targetI)) * X_k_w(targetI);%Spawn weight is a function of proximity to the existing target, and the weight of the existing target. + if(thisWeight > maxSpawnWeight) + maxSpawnWeight = thisWeight; + end + end + %Check if birthing had higher weight. + if(birthWeight > maxSpawnWeight) + %Birth the target + w_i = birthWeight; + %Initialise the covariance + P_i = covariance_birth; + w_birth = [w_birth, w_i]; + m_birth = [m_birth m_i]; + P_birth = [P_birth, P_i]; + numBirthedTargets = numBirthedTargets + 1; + else + %Spawn the target + w_i = maxSpawnWeight; + %Initialise the covariance + P_i = covariance_spawn; + w_spawn = [w_spawn, w_i]; + m_spawn = [m_spawn, m_i]; + P_spawn = [P_spawn, P_i]; + numSpawnedTargets = numSpawnedTargets + 1; + end + end + end +end +%If we want to add targets, treating them as if they are +%static. +if (addStaticNewTargets == true) + thisMeasRowRange = k; + thisMeas = simMeasurementHistory{thisMeasRowRange}; + for j_this = 1:size(thisMeas,2) %Each measurement consists of 2 rows + + %Add a static target + z_this = thisMeas(:,j_this); + m_i = inv_h(z_this(1), z_this(2), x_sensor(1), x_sensor(2), x_sensor(3)); + + m_i(3:4) = [0; 0]; + + %Decide if the target is birthed (from birth position) + %or spawned (from an existing target) + %Initialise the weight to birth + birthWeight = birth_intensity(m_i); + %Targets can also spawn from existing targets. We will + %take whichever is a higher weight - birthing or + %spawning + nTargets = size(X_k, 2); + maxSpawnWeight = -1; + for targetI = 1:nTargets + thisWeight = spawn_intensity(m_i, X_k(:,targetI)) * X_k_w(targetI);%Spawn weight is a function of proximity to the existing target, and the weight of the existing target. + if(thisWeight > maxSpawnWeight) + maxSpawnWeight = thisWeight; + end + end + %Check if birthing had higher weight. + if(birthWeight > maxSpawnWeight) + %Birth the target + w_i = birthWeight; + %Initialise the covariance + P_i = covariance_birth; + w_birth = [w_birth, w_i]; + m_birth = [m_birth m_i]; + P_birth = [P_birth, P_i]; + numBirthedTargets = numBirthedTargets + 1; + else + %Spawn the target + w_i = maxSpawnWeight; + %Initialise the covariance + P_i = covariance_spawn; + w_spawn = [w_spawn, w_i]; + m_spawn = [m_spawn m_i]; + P_spawn= [P_spawn, P_i]; + numSpawnedTargets = numSpawnedTargets + 1; + end + end +end + + +if VERBOSE == 1 + for j = 1:numBirthedTargets + thisM = m_birth(:,j); + s = sprintf('Target to birth %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_birth(j)); + disp(s); + end + for j = 1:numSpawnedTargets + thisM = m_spawn(:,j); + s = sprintf('Target to spawn %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_spawn(j)); + disp(s); + end +end diff --git a/GM_EKF_PHD_Initialise_Jacobians.m b/GM_EKF_PHD_Initialise_Jacobians.m new file mode 100644 index 0000000..5f81747 --- /dev/null +++ b/GM_EKF_PHD_Initialise_Jacobians.m @@ -0,0 +1,294 @@ +%GM_EKF_PHD_Initialise_Jacobians +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%In this file we calculate the Jacobians for the EKF in the PHD filter. +%These are for the motion and observation models, and the noise on each of +%these. + +%Many of these are implemented as anonymous functions; if you are not familiar +%with these, see http://www.mathworks.com.au/help/matlab/matlab_prog/anonymous-functions.html + +%% SENSOR (MEASUREMENT) (OBSERVATION) MODELLING +%We use the terms sensor model, measurement model and observation model +%interchangeably throughout the code. + +%For a linear Kalman filter, the observation matrix is H. +%If we assume that we can directly observe all the dimensions of +%the state, H = eye(4) since the state X = [x; y; vx; vy] +%If we assume we can only observe position and not velocity, +%H = [eye(2), zeros(2)] + +%For a nonlinear extended Kalman filter: +% - the observation model is h. +% z = h(X) +% h maps from the state space to the measurement space. +% - the inverse model is h^-1 or inv_h. +% X = inv_h(z) +% inv_h maps from the measurement space to the state space. +% - the jacobian is h' or H +% H = h' = dh/dX + +%% Observation model h +%h converts a state to an expected observation +% z = h(X) +%It is a function of the target state AND sensor noise AND the sensor position. +%So the full function is z = h(X, epsilon, x_sensor) +%We only consider epsilon in Jacobian calculation, but we use x_sensor in he function h(). +%We are using a range-bearing sensor z = [r; theta] for r in m, theta in radians +%State-space X = [x; y; vx; vy] but we neglect velocity for now. +%Bearing 0 is straight ahead of the sensor, positive is left, negative is right +%We will treat the sensor as being oriented to point along the global +%X-axis (i.e. sensor points to right in global frame). +%As in this diagram: +% __O <--target +% ___/ | +% ___/ |delta_y +%/ t | t is theta +%S--------->| +% <-delta_x-> +%^sensor is in the bottom left corner of the triangle +%Therefore the mapping is: +%r = (delta_x^2 + delta_y^2)^1/2 (Pythagoras' theorem, where delta_x and delta_y are the range from the sensor to the target in X and Y in local cartesian coordinates) +%theta = atan2(delta_y / delta_x) (trigonometry) + +%xS and yS are the global coordinates of the sensor in X and Y +%hS is the heading of the sensor (relative to the global frame, 0 = to the right along the global X axis, positive left to pi, negative right to -pi). +%xL and yL are the global coordinates of the landmark in X and Y +%The returned range and bearing are in local sensor-centric coordinates +%(bearing 0 is directly in front of the sensor, positive is to the left) +h = @(xS, yS, hS, xL, yL) [hypot(xL - xS, yL - yS); atan2(yL - yS, xL - xS) - hS];%Subtract hS to convert to local coordinates, as the sensor would observe + +%% Jacobian H of observation model calculations +%H = dh(x_k,0)/dx_k where x_k = m_k|k-1 for observation model z = h(x_k, eps_k) where x_k is +%target state and eps_k is the noise. +%There are three ways of calculating this: +%1. Numerically, using GM_EKF_PHD_Numerical_Jacobian.m +%2. Analytically, using the function Calculate_Jacobian_H.m +%3. Analytically, using an anonymous function that's identical to +%Calculate_Jacobian_H.m but has less documentation. +%To compare performance of these three, see Test_Jacobian_Calculation.m +%(the run-and-time feature is particularly useful to see how the numerical +%method is slower and the other two are equivalent). +%The advantage of the numerical method is that you can change the +%observation model without having to re-calculate the Jacobian manually +%(but you need to make sure that it takes in arguments the same way, or +%change GM_EKF_PHD_Numerical_Jacobian to take them in however you want). +%The disadvantage of the numerical method is that it is much slower (about +%7 times slower when I tested it). +%For now, I'll use the anonymous method as it takes up less space, but if you +%want to know how I derived it (it involves plugging equations into Wolfram +%Alpha) see Calculate_Jacobian_H.m +%If you want to change which one is used, uncomment/recomment as +%appropriate. +%Anonymous Jacobian calculation function: +calculate_Jacobian_H = @(xR,yR,xL,yL)[ [(xL - xR) / hypot(xL - xR,yL - yR) , (yL - yR) / hypot(xL - xR,yL - yR), 0, 0]; [ (yR - yL)/((xL - xR)^2 + (yL - yR)^2), (xL - xR)/((xL - xR)^2 + (yL - yR)^2), 0, 0]; [0 0 1 0]; [0 0 0 1] ]; +%Jacobian calculation function: +% calculate_Jacobian_H = @(xR,yR,xL,yL) Calculate_Jacobian_H(xR, yR, xL, yL); +%Numerical Jacobian function: +% calculate_Jacobian_H = @(xR,yR,xL,yL) GM_PHD_Numerical_Jacobian(h, [xR,yR,0],[xL,yL, 0, 0]);%h is the observation model, defined above. + +%% Inverse observation model inv_h +%inv_h converts an observation to an expected state +% X = inv_h(z) +% z = [r; theta] in metres and radians. +%r >= 0 and is univseral for local or global coordinates +% -pi < theta <= pi and is in local coordinates (local = sensor relative). +%Positive theta is CCW (pi/2 is left, 0 is up, -pi/2 is right) +%Even though state X is four-dimensional (x, y, vx, vy) we cannot infer +%velocity information from a single observation; it requires at least two. +%Therefore we will return a shortened state vector [x; y] from inv_h and +%pad it with zeros later. +%x right is positive, y up is positive, and both are in global coordinates. +%Zero X at theta = +-pi/2, positive X at -pi/2 < theta < pi/2 negative X at abs(theta) > pi/2 +%x = r * cos(theta + pi/2) +%Zero Y at theta = 0 or pi, positive theta at 0 < theta < pi, negative, +%negative Y at theta < 0 +%y = r * sin(theta + pi/2) +%r is range to target (rad), theta is bearing to target (rad), hS is bearing of +%sensor in global coordinates (rad) +inv_h = @(r, theta, xS, yS, hS) [r .* cos(theta + hS) + xS; r .* sin(theta + hS) + yS]; + +%% Measurement covariance R calculation +%Measurement is 2x1 matrix +%Z = [r; theta] +%We augment this to include an implicit velocity observation, taken from a +%sequence of two consecutive sets of measurements, v = (newPos - oldPos)/dt +%for newPos and oldPos in global coordinates. +%Z = [r; theta; vx; vy]; +%Including noise (the covariance of which is R) +%Z = [r + sigma_r; theta + sigma_theta; vx + sigma_vx; vy + sigma_vy] + +%R = [ [sigma_r^2, 0]; [0, sigma_theta^2] ]; is the covariance of a range and bearing measurement in polar coordinates +%For sigma_r: For this simulation the value of sigma_r is kind of +%arbitrary. Let's just say 5m for simplicity. +sigma_r = 5; %Sensor range standard deviation in m +%For sigma_theta: Also kind of arbitrary, so we'll say 2 degrees +sigma_theta = deg2rad(2); %Sensor bearing standard deviation in radians +%NOTE: These values are also used in GM_EKF_PHD_Simulate_Measurements to +%generate noise for measurements, so will have an effect on the simulation. This is most +%noticeable at long ranges from the sensor, where the error can grow quite +%large due to bearing error. +calculate_R_polar = @(sigma_r, sigma_theta) [[sigma_r^2, 0]; [0, sigma_theta^2]]; + +%For sigma_vx and sigma_vy: +%V = (inv_h(Z) - m_i(1:2))/dt +% V = f(X, dt) = [vx; vy] where f is a function that calculates velocity. +% X = [X1; X2] +% X1 = m_i = [x1; y1] +% X2 = inv_h(z) = [x2; y2] +% z = [r; theta]; +% dt = either measured from a sensor or a constant, we assume it has +% covariance of zero (our clock is perfectly accurate) +% V = f(X, dt) = [ (x2 - x1)/dt; (y2 - y1)/dt] + +%The covariance of the velocity is dependent on the covariance of m_i which +%is stored in P_i) and the covariance of Z (which is R) + +% Jacobian_of_velocity_f = dV/dX = [dvx/dX; dvy/dX] +% Jacobian_of_velocity_f = [dvx/dx1, dvx/dx2 dvx/dy1 dvx/dy2] +% [dvy/dx1, dvy/dx2 dvy/dy1 dvy/dy2] +% dvx/dX = [-1/dt 1/dt 0 0] +% dvy/dX = [0 0 -1/dt 1/dt] +% Jacobian_of_velocity_f = @(dt) [ [-1/dt 1/dt 0 0]; [0 0 -1/dt 1/dt] +Jacobian_of_velocity_f = @(dt) [ [-1/dt 1/dt 0 0]; [0 0 -1/dt 1/dt] ]; + +% Covariance of V = sigma_v = Jacobian_of_velocity_f * P_x * Jacobian_of_velocity_f' where +% P_x = [covariance_of_x1; covariance_of_x2]; +% Covariance_of_x1 is stored in P (i.e. the uncertainty in target position) +% x2 = z = [r; theta] in polar coordinates +% Covariance_of_x2 = R, but it is in polar coordinates. + +% To convert R to Cartesian coordinates +% z_c = inv_h(x2) = [r * cos(theta); r * sin(theta)]; +% R_c = Jacobian_of_inv_h * R * Jacobian_of_h' +% Jacobian_of_inv_h = dinv_h/dx2 = dinv_h/dz since x2 = measurement vector +% dinv_h/dr = [cos(theta); sin(theta)]; +% dinv_h/dtheta = [-rsin(theta); rcos(theta)]; +% Jacobian_of_inv_h = [ [cos(theta), sin(theta)]; [-rsin(theta); rcos(theta)] ]; +% Jacobian_of_inv_h = [ dinv_h_x/dr dinv_h_x/dtheta] +% [ dinv_h_y/dr dinv_h_y/dtheta] +%dinv_h_x / dr = cos(theta) +%dinv_h_x / dtheta = -r*sin(theta) +%dinv_h_y / dr = sin(theta) +%dinv_h_y / dtheta = r*cos(theta) +% Jacobian_of_inv_h = [ [cos(theta), -rsin(theta)]; [sin(theta), r*cos(theta)]]; +calculate_Jacobian_of_inv_h = @(r, theta, rH) [ [cos(theta + rH), -r*sin(theta + rH)]; [sin(theta + rH), r*cos(theta + rH)] ]; +% Therefore +% R_c = Jacobian_of_inv_h * R * Jacobian_of_inv_h'; +% P_x = [P_x1; R_c]; +% sigma_v = Jacobian_of_f * P_x * Jacobian_of_f' +calculate_R_cartesian = @(R, r, theta, rH) calculate_Jacobian_of_inv_h(r, theta, rH) * R * calculate_Jacobian_of_inv_h(r, theta, rH)'; + +%To calculate sigma_v, calculate R_cartesian and pass it through along with +%dt and the 2x2 covariance of the landmark. +calculate_R_velocity_cartesian = @(P_landmark, R_c, dt) Jacobian_of_velocity_f(dt) * [[P_landmark, zeros(2,2)]; [zeros(2,2), R_c]] * Jacobian_of_velocity_f(dt)'; %2x4 * 4x4 * 4x2 = 2x2 matrix [[sigma_vx^2, 0]; [0, sigma_vy^2]] + +%% Jacobian U (Jacobian of measurement noise) calculation +%U = dh(m_k|k-1,eps_k)/deps_k where eps_k = 0 for observation model z = h(x_k, eps_k) where x_k is +%target state and eps_k is the noise. +%h = @(xR, yR, hR, xL, yL, vxL, vyL) [hypot(xL - xR, yL - yR) + eps_r; atan2(yL - yR, xL - xR) - pi/2 - hR + eps_theta; vxL + eps_vx; vyL + eps_vy]; +%We assume that the noise is independent of the state; range error is not +%proportional to range, velocity noise is not proportional to velocity, +%etc. +%dh1/deps_r = 1 +%dh1/deps_theta = 0 +%dh1/deps_vx = 0 +%dh1/deps_vy = 0 +%dh2/deps_r = 0 +%dh2/deps_theta = 1 +%dh2/deps_vx = 0 +%dh2/deps_vy = 0 +%etc; we end up with an identity matrix +%Jacobian U is dh/d_epsilon = eye(4) +U = eye(4); + +%% PREDICTION (MOTION) MODELLING +%% Jacobian F calculations +%We use the terms prediction model and motion model interchangeably +%throughout the code. + +%For a linear Kalman filter, the prediction matrix is F_k-1 +%For a nonlinear extended Kalman filter, the prediction function is phi and +%it takes the state as an argument. +%phi(m_k, dt) = phi([x; y; vx; vy], dt) = [x + vx * dt; y + vy * dt; vx; vy] +%This is for a point target moving with near-constant velocity, and neglects acceleration ax and ay, angular velocity omega, angular +%acceleration alpha (and probably a bunch of other terms). +%TODO: UPDATE. A constant velocity model is simple to implement but not +%ideal. +%We take a Jacobian F = d_phi / dx |x = x_k-1 +%i.e. going across differentiate phi1 by x1, x2 ... +%going down differentiate phi1, phi2 ... by x1 +%So row i is the gradient of the ith component function + +%State X = [x; y; vx; vy] +%Prediction function phi(X,dt) = [x + vx * dt; y + vy * dt; vx; vy] +%We take a Jacobian F = d_phi / dx |x = x_k-1 +%i.e. going across differentiate phi1 by x1, x2 ... +%going down differentiate phi1, phi2 ... by x1 +%So row i is the gradient of the ith component function +%F = [ d(phi_x) / dx d(phi_x) / dy d(phi_x) / dvx d(phi_x) / dvy ] +% [ d(phi_y) / dx d(phi_y) / dy d(phi_y) / dvx d(phi_y) / dvy ] +% [ d(phi_vx) / dx d(phi_vx) / dy d(phi_vx) / dvx d(phi_vx) / dvy ] +% [ d(phi_vy) / dx d(phi_vy) / dy d(phi_vy) / dvx d(phi_vy) / dvy ] +%F = [1 0 dt 0 ] +% [0 1 0 dt ] +% [0 0 1 0 ] +% [0 0 0 1 ] +%So, Jacobian F of prediction model +calculate_Jacobian_F = @(dt) [ [1 0 dt 0]; [0 1 0 dt]; [0 0 1 0]; [0 0 0 1] ]; + +%% Jacobian G (Jacobian of prediction covariance) calculations +%The previous prediction function in the working for F was simplified to +%not include noise. Now we include noise (upsilon) in our calculations. +%Including noise, prediction function is: + +%phi_x = x + upsilon_x + (vx + upsilon_vx) * dt +%where estimated velocity is vx, true velocity is vx + upsilon_vx. +%upsilon_x is some sort of disturbance in position unrelated to velocity. + +%phi_y = y + upsilon_y + (vy + upsilon_vy) * dt +%Estimated velocity is vy, true velocity is vy + upsilon_vy. +%upsilon_y is some sort of disturbance in position unrelated to velocity. + +%phi_vx = vx + upsilon_vx +%Estimated velocity is vx, true velocity is vx + upsilon_vx where +%upsilon_vx is some sort of disturbance in velocity; in our simplified +%constant-velocity model it's probably largely due to acceleration + +%phi_vy = vy + upsilon_vy +%Estimated velocity is vy, true velocity is vy + %upsilon_vy, where +%upsilon_vy is some sort of disturbance in velocity; in our simplified +%constant-velocity model it's probably largely due to acceleration + +%We need GQG' to be a 4x4 matrix. Therefore we will make G a 4x4 matrix +%Differentiating prediction model phi([x; y; vx; vy], dt) by the error +%model [upsilon_x; upsilon_y; upsilon_vx; upsilon_vy] +%phi = [f_x; f_y; f_vx; f_vy] +%upsilon = [upsilon_x; upsilon_y; upsilon_vx; upsilon_vy] +%phi_x, phi_y, phi_vx and phi_vy including upsilon terms are defined above. + +%G = d_phi / d_upsilon +%G = [dphi_x / dupsilon_x, dphi_x / dupsilon_y, dphi_x / dupsilon_vx, dphi_x / dupsilon_vy] +% [dphi_y / dupsilon_x, dphi_y / dupsilon_y, dphi_y / dupsilon_vx, dphi_y / dupsilon_vy] +% [dphi_vx / dupsilon_x, dphi_vx / dupsilon_y, dphi_vx / dupsilon_vx, dphi_vx / dupsilon_vy] +% [dphi_vy / dupsilon_x, dphi_vy / dupsilon_y, dphi_vy / dupsilon_vx, dphi_vy / dupsilon_vy] +%G = [1 0 dt 0] +% [0 1 0 dt] +% [0 0 1 0 ] +% [0 0 0 1 ] +calculate_Jacobian_G = @(dt) [[ 1, 0, dt, 0]; [0, 1, 0, dt]; [0, 0, 1, 0]; [0, 0, 0, 1] ]; + +%% Covariance matrix Q calculation +%Q is the covariance of the prediction. The one given by Vo & Ma for the +%linear problem doesn't seem to work with this EKF implementation; we end up with too many +%false targets. I might look into this eventually and come up with a better calculation. +%The values below are an effective hack and easy to modify if you want to. +upsilon_x = 1; +upsilon_y = 1; +upsilon_vx = 1; +upsilon_vy = 1; +Q = eye(4); +Q(1,1) = upsilon_x^2; +Q(2,2) = upsilon_y^2; +Q(3,3) = upsilon_vx^2; +Q(4,4) = upsilon_vy^2; \ No newline at end of file diff --git a/GM_EKF_PHD_Predict_Birth.m b/GM_EKF_PHD_Predict_Birth.m new file mode 100644 index 0000000..5ba91a9 --- /dev/null +++ b/GM_EKF_PHD_Predict_Birth.m @@ -0,0 +1,57 @@ +%GM_EKF_PHD_Predict_Birth +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs prediction for newly birthed and spawned targets +%This is necessary since their positions were initialised in a previous timestep and they may +%have moved between then and now +%We iterate through j = 1..J_b,k where J_b,k is number of birthed landmarks +%at time k (but the landmarks correspond to the measurement from time k-1) +%and j = 1...J_s,k where J_s,k is the number of spawned landmarks at time k +%(but the landmarks correspond to the measurement from time k-1). +%For this implementation, birthed and spawned targets are identical except +%they have weights that are calculated from different functions, and +%different starting covariances. A target is spawned or birthed depending +%on which weighting function will give it a higher starting weight. + +%The means of these will be the position in Cartesian space where they are detected +%The covariances & weights are calculated according to Vo&Ma +s = sprintf('Step 1: Prediction for birthed and spawned targets.'); +disp(s); + +m_birth_before_prediction = [m_birth, m_spawn]; %Need to store these BEFORE prediction for use in the update step. + +%Perform prediction for birthed targets using birthed velocities. +for j = 1:numBirthedTargets + i = i + 1; + + F = calculate_Jacobian_F(dt); + G = calculate_Jacobian_G(dt); + m_birth(:,j) = F * m_birth(:,j); + + P_range = calculateDataRange4(j); + P_birth(:,P_range) = G * Q * G' + F * P_birth(:,P_range) * F'; +end + +%Perform prediction for spawned targets using spawned velocities. +for j = 1:numSpawnedTargets + F = calculate_Jacobian_F(dt); + G = calculate_Jacobian_G(dt); + m_spawn(:,j) = F * m_spawn(:,j); + + P_range = calculateDataRange4(j); + P_spawn(:,P_range) = G * Q * G' + F * P_spawn(:,P_range) * F'; +end + +if(VERBOSE == 1) + for j = 1:numBirthedTargets + thisM = m_birth(:,j); + s = sprintf('Birthed target %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_birth(j)); + disp(s); + end + for j = 1:numSpawnedTargets + thisM = m_spawn(:,j); + s = sprintf('Spawned target %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_spawn(j)); + disp(s); + end +end + diff --git a/GM_EKF_PHD_Predict_Existing.m b/GM_EKF_PHD_Predict_Existing.m new file mode 100644 index 0000000..d073e5b --- /dev/null +++ b/GM_EKF_PHD_Predict_Existing.m @@ -0,0 +1,49 @@ +%GM_EKF_PHD_Predict_Existing +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs prediction for existing targets using a simple +%constant-velocity model. +s = sprintf('Step 2: Prediction for existing targets.'); +disp(s); + +mk_k_minus_1_before_prediction = mk_minus_1; + +for j = 1:size(mk_minus_1,2) + F = calculate_Jacobian_F(dt);%Jacobian of prediction + G = calculate_Jacobian_G(dt);% + + wk_minus_1(j) = prob_survival * wk_minus_1(j); + mk_minus_1(:,j) = F * mk_minus_1(:,j); %Assume constant velocity. + + P_range = calculateDataRange4(j); + + %Q is set in GM_PHD_Initialise_Jacobians + P_i = G * Q * G' + F * Pk_minus_1(:,P_range) * F'; + + prevState = mk_k_minus_1_before_prediction(:,j); + newState = mk_minus_1(:,j); + + Pk_minus_1(:,P_range) = P_i; + + if(VERBOSE == 1) + s = sprintf('\t\tExisting target %d. Previously at %3.4f %3.4f, now at %3.4f %3.4f.', j, prevState(1), prevState(2), newState(1), newState(2)); + disp(s); + + s = sprintf('\t\tP was %3.4f %3.4f, NOW %3.4f %3.4f', Pk_minus_1(1,P_range(1)), Pk_minus_1(2,P_range(2)), P_i(1,1), P_i(2,2)); + disp(s); + end +end + +%% Now we combine the birthed targets with the existing ones. +%Append newly birthed and spawned targets to back of old ones +wk_k_minus_1 = [wk_minus_1, w_birth, w_spawn]; +mk_k_minus_1 = [mk_minus_1, m_birth, m_spawn]; +Pk_k_minus_1 = [Pk_minus_1, P_birth, P_spawn]; +numTargets_Jk_k_minus_1 = numTargets_Jk_minus_1 + numBirthedTargets + numSpawnedTargets; +%Create a backup to allow for augmenting the measurement in the update +mk_k_minus_1_before_prediction = [mk_k_minus_1_before_prediction, m_birth_before_prediction];%m_birth_before_prediction also contains the spawned targets before prediction + +if(VERBOSE == 1) + s = sprintf('\tPerformed prediction for %d birthed and existing targets in total.', numTargets_Jk_k_minus_1); + disp(s); +end \ No newline at end of file diff --git a/GM_EKF_PHD_Simulate_Initialise.m b/GM_EKF_PHD_Simulate_Initialise.m new file mode 100644 index 0000000..d774da0 --- /dev/null +++ b/GM_EKF_PHD_Simulate_Initialise.m @@ -0,0 +1,79 @@ +%GM_EKF_PHD_Simulate_Initialise +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%We implement the simulation described in example 1 of Vo&Ma 2006, the one +%designed for the linear KF PHD filter. Although a second, different +%example is listed for the EKF version, I haven't gotten around to +%implementing it yet. + +%Most of this is unchanged from the example used in the linear KF PHD +%filter. The big changes for the EKF are in +%GM_EKF_PHD_Initialise_Jacobians. + +%% Control parameters +x_sensor = [0; 0; 0];%The position of the simulated range-bearing sensor. Heading 0 is along the positive X-axis. Counter clockwise is positive, clockwise is negative, bearings are bounded -pi < theta <= pi +noiseScaler = 1.0;%Adjust the strength of the noise on the measurements by adjusting this +nClutter = 50; %Assume constant 50 clutter measurements. Since clutter is Poisson distrbuted it might be more accurate to use nClutter = poissrnd(50) if you have the required Matlab toolbox. Constant 50 clutter works well enough for simulation purposes. + +%% Target parameters +endTime = 100;%Duration of main loop. +simTarget1Start = birth_mean1; +simTarget2Start = birth_mean2; + +simTarget1End = [500, -900]'; +simTarget2End = [900, -500]'; +simTarget3End = [-200, -750]'; + +simTarget1Vel = (simTarget1End(1:2) - simTarget1Start(1:2)) / endTime; +simTarget2Vel = (simTarget2End(1:2) - simTarget2Start(1:2)) / endTime; +simTarget3Vel = [-20; -4]; +simTarget1Start(3:4) = simTarget1Vel; +simTarget2Start(3:4) = simTarget2Vel; +simTarget3Start(3:4) = simTarget3Vel; + +simTarget1History = simTarget1Start; +simTarget2History = simTarget2Start; +simTarget3History = []; + +simTrueMeasurementHistory = []; +simNoisyMeasurementHistory = {};%We use a cell array so that we can have rows of varying length. +simVehicleStateHistory = {};%We store a history of vehicle positions at the same time as measurements are made. + +simTarget1State = simTarget1Start; +simTarget2State = simTarget2Start; +simTarget3State = []; + +simTarget3SpawnTime = 66;%Target 3 is spawned from target 1 at t = 66s + +%Set up for plot +%Measurements and targets plot +figure(1); +clf; +hold on; +axis([-1000 1000 -1000 1000]); +xlim([-1000 1000]); +ylim([-1000 1000]); + +%X and Y measurements plot +figure(2); +clf; +subplot(2,1,1); +hold on; +axis([0, 100, -1000, 1000]); +xlabel('Simulation step'); +ylabel('X position of measurement (m)'); +title('Measurement X coordinates'); +subplot(2,1,2); +hold on; +axis([0, 100, -1000, 1000]); +xlabel('Simulation step'); +ylabel('Y position of measurement (m)'); +title('Measurement Y coordinates'); + +%Performance metric plot +if(CALCULATE_OSPA_METRIC == 1) + figure(3); + clf; + xlabel('Simulation step'); + ylabel('OSPA error metric (higher is worse)'); +end \ No newline at end of file diff --git a/GM_EKF_PHD_Simulate_Measurements.m b/GM_EKF_PHD_Simulate_Measurements.m new file mode 100644 index 0000000..c1bb214 --- /dev/null +++ b/GM_EKF_PHD_Simulate_Measurements.m @@ -0,0 +1,98 @@ +%GM_PHD_Simulate_Measurements +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file generates simulated measurement data for the linear KF simulation +%described in Vo&Ma. There is a nonlinear simulation described in the paper +%but I have not gotten around to implementing it. +%The sensor measurements are range-bearing measurements, similar to those +%from an FMCW radar. The expected error magnitude is independent of +%measurement (i.e. range error is not proportional to range, bearing error +%is not proportional to bearing). + +%There will be gaussian noise on the measurement and Poisson-distributed clutter +%in the environment. +%Note: It is possible to get no measurements if the target is not detected +%and there is no clutter + +%If you want to use this PHD filter implementation for another problem, you +%will need to replace this script with another one that populates Z, +%zTrue, and simMeasurementHistory (Z is used in a lot of the update code, +%zTrue and simMeasurementHistory are used in GM_PHD_Simulate_Plot, and +%simMeasurementHistory is used in GM_EKF_PHD_Create_Birth) + +%There will be gaussian noise on the measurement and Poisson clutter +%in the environment. +%Note: It is possible to get no measurements if the target is not detected +%and there is no clutter +s = sprintf('Step Sim: Simulating measurements.'); +disp(s); + +%Simulate target movement +F = calculate_Jacobian_F(dt); +simTarget1State = F * simTarget1State; +simTarget2State = F * simTarget2State; +if(~isempty(simTarget3State)) + simTarget3State = F * simTarget3State; +end +%Spawn target 3 when k = 66 +if(k == simTarget3SpawnTime) + simTarget3State = simTarget1State; + simTarget3State(3:4) = simTarget3Vel; +end + +%Save target movement +simTarget1History = [simTarget1History, simTarget1State]; +simTarget2History = [simTarget2History, simTarget2State]; +simTarget3History = [simTarget3History, simTarget3State]; + +%First, we generate some clutter in the environment. +clutter = zeros(2,nClutter);%The observations are of the form [x; y] +for i = 1:nClutter + clutterX = rand * (xrange(2) - xrange(1)) + xrange(1); %Random number between xrange(1) and xrange(2), uniformly distributed. + clutterY = rand * (yrange(2) - yrange(1)) + yrange(1); %Random number between xrange(1) and xrange(2), uniformly distributed. + + clutterRTheta = h(x_sensor(1), x_sensor(2), x_sensor(3), clutterX, clutterY); + + clutter(:,i) = clutterRTheta; +end + +%We are not guaranteed to detect the target - there is only a probability +%that we will, controlled by prob_detection +detect1 = rand; +detect2 = rand; +detect3 = rand; +%Target 1 +if(detect1 > prob_detection) + measR1 = []; + measTheta1 = []; +else + simMeas1 = h(x_sensor(1), x_sensor(2), x_sensor(3), simTarget1State(1), simTarget1State(2));%Generate measurement + measR1 = simMeas1(1) + sigma_r * randn * noiseScaler;%Add gaussian noise. We could add the noise at the measurement-generating stage, by adding it to the simulated target state. + measTheta1 = simMeas1(2) + sigma_theta * randn * noiseScaler;%Add gaussian noise. We could add the noise at the measurement-generating stage, by adding it to the simulated target state. +end +%Target 2 +if(detect2 > prob_detection) + measR2 = []; + measTheta2 = []; +else + simMeas2 = h(x_sensor(1), x_sensor(2), x_sensor(3), simTarget2State(1), simTarget2State(2)); + measR2 = simMeas2(1) + sigma_r * randn * noiseScaler; + measTheta2 = simMeas2(2) + sigma_theta * randn * noiseScaler; +end +%Target 3 +if(k >= simTarget3SpawnTime) && (detect3 <= prob_detection) + simMeas3 = h(x_sensor(1), x_sensor(2), x_sensor(3), simTarget3State(1), simTarget3State(2)); + measR3 = simMeas3(1) + sigma_r * randn * noiseScaler; + measTheta3 = simMeas3(2) + sigma_theta * randn * noiseScaler; +else + measR3 = []; + measTheta3 = []; +end + +%Generate true measurement +Z = [ [measR1 measR2 measR3]; [measTheta1 measTheta2 measTheta3] ]; +zTrue = Z; + +%Append clutter +Z = [Z, clutter]; +simMeasurementHistory{k} = Z; \ No newline at end of file diff --git a/GM_EKF_PHD_Simulate_Plot.m b/GM_EKF_PHD_Simulate_Plot.m new file mode 100644 index 0000000..540db22 --- /dev/null +++ b/GM_EKF_PHD_Simulate_Plot.m @@ -0,0 +1,94 @@ +%GM_EKF_PHD_Simulate_Plot +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au +%error_ellipse is by AJ Johnson, taken from Matlab Central http://www.mathworks.com.au/matlabcentral/fileexchange/4705-errorellipse + +%This file plots the current measurements, true target position and estimated target +%position, as well as a history of estimated target positions. + +%Measurements + +figure(1) +%Plot all measurements, including clutter, as black 'x' +if(~isempty(Z)) + Z_xy = inv_h(Z(1,:), Z(2,:), zeros(size(Z(1,:))) + x_sensor(1), zeros(size(Z(1,:))) + x_sensor(2), zeros(size(Z(1,:))));%Add x_sensor(1) and x_sensor(2) to convert to global coordinates for plotting + plot(Z_xy(1,:), Z_xy(2,:) , 'xk'); +end + +%Plot noisy measurements of true target position(s), as black 'o' +if(~isempty(zTrue)) + Z_xy_true = inv_h(zTrue(1,:), zTrue(2,:), zeros(size(zTrue(1,:))) + x_sensor(1), zeros(size(zTrue(1,:))) + x_sensor(2), zeros(size(zTrue(1,:))));%Add x_sensor(1) and x_sensor(2) to convert to global coordinates for plotting + plot(Z_xy_true(1,:), Z_xy_true(2,:) , 'ok'); +end + +%Plot target 1 true position as red dots +plot(simTarget1History(1,:), simTarget1History(2,:), '.-r'); +%Plot target 2 true position as blue dots +plot(simTarget2History(1,:), simTarget2History(2,:), '.-b'); +%Plot target 3 true position as green dots +if(~isempty(simTarget3History)) + plot(simTarget3History(1,:), simTarget3History(2,:), '.-g'); +end +%Plot tracked targets as magenta circles +if(~isempty(X_k_history)) + plot(X_k_history(1,:), X_k_history(2,:), 'om'); +end + +xlabel('X position'); +ylabel('Y position'); +title('Simulated targets and measurements'); +axis square; + +%For extracted targets, plot latest target as cyan triangle, and draw an +%error ellipse to show uncertaintyty +if(~isempty(X_k)) + plot(X_k(1,:), X_k(2,:), '^c'); + [nRows, nCols] = size(X_k); + for c = 1:nCols + thisMu = X_k(1:2, c); + covRange = calculateDataRange4(c); + thisCov = X_k_P(:,covRange); + thisCov = thisCov(1:2, 1:2); %We only care about position + error_ellipse(thisCov, thisMu); + end + if(DRAW_VELOCITIES == 1) + %Draw velocities of targets + quiver(X_k(1,:), X_k(2,:), X_k(3,:), X_k(4,:)) + end +end + +%Measurements +figure(2); +hold on; +subplot(2,1,1); +if(~isempty(Z)) + plot(k, Z_xy(1,:), 'xk');%X coord of clutter measurements +end +if(~isempty(zTrue)) + plot(k, Z_xy_true(1,:), 'ok');%X coord of true measurement +end +if(~isempty(X_k)) + plot(k, X_k(1,:), 'om');%Estimated target +end +subplot(2,1,2); +hold on; +if(~isempty(Z)) + plot(k, Z_xy(2,:), 'xk');%Y coord of clutter measurements +end +if(~isempty(zTrue)) + plot(k, Z_xy_true(2,:), 'ok');%Y coord of true measurement +end +if(~isempty(X_k)) + plot(k, X_k(2,:), 'om');%Estimated target +end + +%OSPA error metric plot +if(CALCULATE_OSPA_METRIC == 1) + figure(3); + clf; + hold on; + plot(metric_history, 'x-b'); + axis([0 100 0 cutoff_c]); + xlabel('Simulation step'); + ylabel('OSPA error metric (higher is worse)'); + title('OSPA error metric for this test'); +end diff --git a/GM_EKF_PHD_Update.m b/GM_EKF_PHD_Update.m new file mode 100644 index 0000000..ea01f64 --- /dev/null +++ b/GM_EKF_PHD_Update.m @@ -0,0 +1,131 @@ +%% GM_EKF_PHD_Update +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs a PHD filter update on the targets +%This is basically a brute-force Kalman update of every target with every +%measurement and creating a new target from the update results. + +s = sprintf('Step 4: Performing update.'); +disp(s); + +%Set up matrices for post-update filter state +w_k = zeros(1, numTargets_Jk_k_minus_1 * size(Z, 2) + numTargets_Jk_k_minus_1); +m_k = zeros(4, numTargets_Jk_k_minus_1 * size(Z, 2) + numTargets_Jk_k_minus_1); +P_k = zeros(size(Pk_k_minus_1,1), size(Pk_k_minus_1,1) * (numTargets_Jk_k_minus_1 * size(Z, 2) + numTargets_Jk_k_minus_1)); + +%First we assume that we failed to detect all targets. +%We scale all weights by probability of missed detection +%We already did the prediction step for these so their position & +%covariance will have been updated. What remains is to rescale their +%weight. +for j = 1:numTargets_Jk_k_minus_1 + w_k(j) = (1 - prob_detection) * wk_k_minus_1(j); + m_k(:,j) = mk_k_minus_1(:,j); + P_range = calculateDataRange4(j); + newP = Pk_k_minus_1(:,P_range); + P_k(:,P_range) = newP; +end + +%Now we update all combinations of matching every observation with every target in the +%map. +%First we expand the observation to include velocity (by simple v = delta_x/delta_t for +%delta_x = measured_position - position_before_prediction. +%That is, how fast would the target have needed +%to move to get from where it was to where it was seen now? +L = 0; +for zi = 1:size(Z,2) + L = L + 1;%L is used to calculate an offset from previous updates. It maxes out at L = number_of_measurements. A more elegant solution would be to set L = zi but I retain this method for compatibility with Vo&Ma + if(VERBOSE == 1) + s = sprintf('Matching targets against measurement %d', zi); + disp(s); + end + + for j = 1:numTargets_Jk_k_minus_1 + + m_j = mk_k_minus_1(:,j); + %Augment the measurement of position with calculated velocity + thisZ = Z(:,zi);%This consists of only the observed position. But we need to extract the equivalent velocity observation + prevX = mk_k_minus_1_before_prediction(1:2,j);%Get the old (pre-prediction) position of the target + thisV = (inv_h(thisZ(1),thisZ(2),x_sensor(1), x_sensor(2), x_sensor(3)) - prevX(1:2)) / dt;%velocity = dx / dt. Since Z and x(1:2) are 2d, V = [Vx Vy] + thisZ = [thisZ; thisV];%So we pretend to observe velocity as well as position + thisAugmentedZ(:, j + (zi - 1) * numTargets_Jk_k_minus_1) = thisZ; + + %If this is the first time a target has been observed, it will have + %no velocity stored. + %Therefore it will be missing a prediction update, which will + %impact both its position and its covariance. + thisIndex = L * numTargets_Jk_k_minus_1 + j; + + old_P_range = calculateDataRange4(j);%Returns 4 columns + new_P_range = 4 * L * numTargets_Jk_k_minus_1 + old_P_range; + S_range = calculateDataRange4(j); + + %Recalculate weight. + w_new = prob_detection * wk_k_minus_1(j) * mvnpdf(thisZ(weightDataRange), eta(weightDataRange,j), S(weightDataRange,S_range(weightDataRange)));%Hoping normpdf is the function I need + w_k(thisIndex) = w_new; + + %Update mean + delta = thisZ - eta(:,j); + K_range = calculateDataRange4(j); + m_new = m_j + K(:,K_range) * delta; + m_k(:,thisIndex) = m_new; + + %Update covariance + P_new = P_k_k(:,old_P_range); + P_k(:,new_P_range) = P_new; + + if(VERBOSE == 1) + s1 = sprintf('Observation: %3.4f %3.4f %3.4f %3.4f', thisZ(1), thisZ(2), thisZ(3), thisZ(4)); + disp(s1); + thisEta = eta(:,j); + s2 = sprintf('Expected Obs: %3.4f %3.4f %3.4f %3.4f', thisEta(1), thisEta(2), thisEta(3), thisEta(4)); + disp(s2); + + s1 = sprintf('Before Update: %3.4f %3.4f %3.4f %3.4f %3.4f %3.4f', m_k(1), m_k(2), m_k(3), m_k(4)); + s2 = sprintf('After Update: %3.4f %3.4f %3.4f %3.4f', m_new(1), m_new(2), m_new(3), m_new(4)); + s3 = sprintf('True State: %3.4f %3.4f %3.4f %3.4f', simTarget1State(1), simTarget1State(2), simTarget1Vel(1), simTarget1Vel(2)); + disp(s1); + disp(s2); + disp(s3); + end + + end + + %Sum up weights for use in reweighting + weight_tally = 0; + for i = 1:numTargets_Jk_k_minus_1 + thisIndex = L * numTargets_Jk_k_minus_1 + i; + weight_tally = weight_tally + w_k(thisIndex); + end + + %Recalculate weights + if(VERBOSE == 1) + s = sprintf('Calculating new weights for observation %d', zi); + disp(s); + end + for j = 1:numTargets_Jk_k_minus_1 + old_weight = w_k(L * numTargets_Jk_k_minus_1 + j); +% measZ = [Z(1,zi), Z(2,zi)]; +% new_weight = old_weight / (clutter_intensity(measZ) + weight_tally);%Normalise + measZ_cartesian = inv_h(Z(1,zi), Z(2,zi), x_sensor(1), x_sensor(2), x_sensor(3)); + new_weight = old_weight / (clutter_intensity(measZ_cartesian') + weight_tally);%Normalise + w_k(L * numTargets_Jk_k_minus_1 + j) = new_weight; + + S_range = calculateDataRange4(j);%Data range for S, which is 4x4 matrix + if(VERBOSE == 1) + s = sprintf('\tNew target %d: Matching old target %d at %3.4f %3.4f to measurement %d at %3.4f %3.4f. Prob detection %3.4f. Old Weight %3.8f New Weight %3.8f Normalised New Weight: %3.8f', L * numTargets_Jk_k_minus_1 + j, j, mk_k_minus_1(1, j), mk_k_minus_1(2, j), zi, measZ_cartesian(1), measZ_cartesian(2), mvnpdf(Z(1:2,zi), eta(1:2,j), S(1:2,S_range(1:2))), wk_k_minus_1(j), old_weight, new_weight); + disp(s); + end + end + +end +numTargets_Jk = L * numTargets_Jk_k_minus_1 + numTargets_Jk_k_minus_1; + +if(VERBOSE == 1) + for j = 1:numTargets_Jk + thisPos = m_k(:,j); + thisW = w_k(j); + s = sprintf('Target %d: %3.4f %3.4f %3.4f %3.4f, Weight %3.9f', j, thisPos(1), thisPos(2), thisPos(3), thisPos(4), thisW); + disp(s); + end +end \ No newline at end of file diff --git a/GM_PHD_Calculate_Performance_Metric.m b/GM_PHD_Calculate_Performance_Metric.m new file mode 100644 index 0000000..692da11 --- /dev/null +++ b/GM_PHD_Calculate_Performance_Metric.m @@ -0,0 +1,17 @@ +%GM_PHD_Calculate_Performance_Metric +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This uses the Optimal Subpattern Assignment (OSPA) metric proposed by +%Schuhmacher, D.; Ba-Tuong Vo; Ba-Ngu Vo, "A Consistent Metric for Performance Evaluation of Multi-Object Filters," Signal Processing, IEEE Transactions on , vol.56, no.8, pp.3447,3457, Aug. 2008 +%cutoff_c and order_p are tuning parameters set in GM_PHD_Initialisation +if(CALCULATE_OSPA_METRIC == 1) + X = X_k; + Y = [simTarget1History(:,k), simTarget2History(:,k)]; + if(k >= simTarget3SpawnTime) + Y = [Y, simTarget3History(:,k-simTarget3SpawnTime+1)]; + end + + metric = ospa_dist(X, Y, cutoff_c, order_p);%Use Ba-Ngu Vo's implementation + metric_history = [metric_history, metric]; + +end \ No newline at end of file diff --git a/GM_PHD_Construct_Update_Components.m b/GM_PHD_Construct_Update_Components.m new file mode 100644 index 0000000..c8ce565 --- /dev/null +++ b/GM_PHD_Construct_Update_Components.m @@ -0,0 +1,41 @@ +%GM_PHD_Construct_Update_Components +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file creates the components needed for performing a Kalman filter update on the +%targets using the measurement. +s = sprintf('Step 3: Constructing update components for all targets, new and existing.'); +disp(s); + +%We need to clear the data structures each iteration +eta = []; +S = []; +K = []; +P_k_k = []; + +for j = 1:numTargets_Jk_k_minus_1 + m_j = mk_k_minus_1(:,j); + eta_j = H2 * m_j;%Observation model. Assume we see position AND velocity of the target. + + P_range = calculateDataRange4(j); %4x4 array + + PHt = Pk_k_minus_1(:,P_range) * H2'; %Taken from Tim Bailey's EKF code. 4x4 array + + %Calculate K via Tim Bailey's method. + S_j = R2 + H2 * PHt; + %At this point, Tim Bailey's code makes S_j symmetric. In this case, it leads to the matrix being non-positive definite a lot of the time and chol() crashes. + %So we won't do that. + SChol= chol(S_j); + + SCholInv= SChol \ eye(size(SChol)); % triangular matrix, invert via left division + W1 = PHt * SCholInv; + + K_j = W1 * SCholInv'; + + P_j = Pk_k_minus_1(:,P_range) - W1*W1';%4x4 array + %End Tim Bailey's code. + + eta = [eta, eta_j]; + S = [S, S_j]; + K = [K, K_j]; + P_k_k = [P_k_k, P_j]; +end \ No newline at end of file diff --git a/GM_PHD_Create_Birth.m b/GM_PHD_Create_Birth.m new file mode 100644 index 0000000..abc2f38 --- /dev/null +++ b/GM_PHD_Create_Birth.m @@ -0,0 +1,155 @@ +%GM_PHD_Create_Birth +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs the processing on measurements to extract new targets +%and populates the birth lists (mean, weight, covariance) needed to instantiate them next iteration. + +%This is not formally spelt out in Vo&Ma so I have used my best judgement to +%come up with a method to initialise targets. Targets are initialised as +%static (using the position of current measurements) and/or dynamic (using +%two generations of measurements and calculating the movement between). +%If only static targets are added, the number of new targets = the number +%of measurements. +%If only dynamic targets are added, the number of new targets = the number of +%measurements this teration * the number of measurements last iteration +%If both static and dynamic targets are added, the number of new targets is +%equal to the sum of these. +disp('Step 7: Creating new targets from measurements, for birthing next iteration'); +w_birth = []; +m_birth = []; +P_birth = []; +w_spawn = []; +m_spawn = []; +P_spawn = []; +numBirthedTargets = 0; +numSpawnedTargets = 0; + +%We create targets using two generations of measurements. +%The first generation is used to calculate the velocity (dx/dt) to the +%second generation. +%The second generation gives the position. +%We also add a set of targets from the second generation but with velocity +%zero, since some may be unlinked to the first generation, but we have no +%velocity information for them. +if((addVelocityForNewTargets == true) && (k >= 2))%If we want to add targets with initial velocities.If only one iteration complete, cannot calculate velocity + %Each measurement consists of 2 rows + thisMeasRowRange = k; + prevMeasRowRange = k-1; + + thisMeas = simMeasurementHistory{thisMeasRowRange}; + prevMeas = simMeasurementHistory{prevMeasRowRange}; + + for j_this = 1:size(thisMeas,2) + for j_prev = 1:1:size(prevMeas,2)%Match every pair from previous to current + m_this = thisMeas(:,j_this); + m_prev = prevMeas(:, j_prev); + %Calculate and add the velocity. + m_i = m_this; + thisV = (m_this(1:2) - m_prev(1:2)) / dt; + if(abs(thisV(1)) > MAX_V) || (abs(thisV(2)) > MAX_V) + continue;%To reduce the number of targets added, we filter out the targets with unacceptable velocities. + end + + m_i = [m_i; thisV]; + + %Decide if the target is birthed (from birth position) + %or spawned (from an existing target) + %Initialise the weight to birth + birthWeight = birth_intensity(m_i); + %Targets can also spawn from existing targets. We will + %take whichever is a higher weight - birthing or + %spawning + nTargets = size(X_k, 2); + maxSpawnWeight = -1; + for targetI = 1:nTargets + thisWeight = spawn_intensity(m_i, X_k(:,targetI)) * X_k_w(targetI);%Spawn weight is a function of proximity to the existing target, and the weight of the existing target. + if(thisWeight > maxSpawnWeight) + maxSpawnWeight = thisWeight; + end + end + %Check if birthing had higher weight. + if(birthWeight > maxSpawnWeight) + %Birth the target + w_i = birthWeight; + %Initialise the covariance + P_i = covariance_birth; + w_birth = [w_birth, w_i]; + m_birth = [m_birth m_i]; + P_birth = [P_birth, P_i]; + numBirthedTargets = numBirthedTargets + 1; + else + %Spawn the target + w_i = maxSpawnWeight; + %Initialise the covariance + P_i = covariance_spawn; + w_spawn = [w_spawn, w_i]; + m_spawn = [m_spawn, m_i]; + P_spawn = [P_spawn, P_i]; + numSpawnedTargets = numSpawnedTargets + 1; + end + end + end +end + + +%If we want to add targets, treating them as if they are +%static. +if (addStaticNewTargets == true) + thisMeasRowRange = k; + thisMeas = simMeasurementHistory{thisMeasRowRange}; + for j_this = 1:size(thisMeas,2) %Each measurement consists of 2 rows + + %Add a static target + m_i = thisMeas(:,j_this); + m_i(3:4) = [0; 0]; + + %Decide if the target is birthed (from birth position) + %or spawned (from an existing target) + %Initialise the weight to birth + birthWeight = birth_intensity(m_i); + %Targets can also spawn from existing targets. We will + %take whichever is a higher weight - birthing or + %spawning + nTargets = size(X_k, 2); + maxSpawnWeight = -1; + for targetI = 1:nTargets + thisWeight = spawn_intensity(m_i, X_k(:,targetI)) * X_k_w(targetI);%Spawn weight is a function of proximity to the existing target, and the weight of the existing target. + if(thisWeight > maxSpawnWeight) + maxSpawnWeight = thisWeight; + end + end + %Check if birthing had higher weight. + if(birthWeight > maxSpawnWeight) + %Birth the target + w_i = birthWeight; + %Initialise the covariance + P_i = covariance_birth; + w_birth = [w_birth, w_i]; + m_birth = [m_birth m_i]; + P_birth = [P_birth, P_i]; + numBirthedTargets = numBirthedTargets + 1; + else + %Spawn the target + w_i = maxSpawnWeight; + %Initialise the covariance + P_i = covariance_spawn; + w_spawn = [w_spawn, w_i]; + m_spawn = [m_spawn m_i]; + P_spawn= [P_spawn, P_i]; + numSpawnedTargets = numSpawnedTargets + 1; + end + end +end + +if VERBOSE == 1 + for j = 1:numBirthedTargets + thisM = m_birth(:,j); + s = sprintf('Target to birth %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_birth(j)); + disp(s); + end + for j = 1:numSpawnedTargets + thisM = m_spawn(:,j); + s = sprintf('Target to spawn %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_spawn(j)); + disp(s); + end +end diff --git a/GM_PHD_Estimate.m b/GM_PHD_Estimate.m new file mode 100644 index 0000000..6e9093d --- /dev/null +++ b/GM_PHD_Estimate.m @@ -0,0 +1,61 @@ +%GM_PHD_Estimate +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file estimates the positions of targets tracked by the PHD filter. +%We need to extract the likely target positions from the PHD (i.e. we need to find the peaks of the PHD). +%This is actually fairly tricky. The naive approach is to pull out targets with the +%highest weights, but this is FAR from the best approach. A large covariance will pull down +%the peak size, and when targets are close together or have high covariances, there can be +%superposition effects which shift the peak. + +%This just implements the method in Vo&Ma, which is pulling out every target with a weight over +%weightThresholdToBeExtracted (defined in GM_PHD_Initialisation). There is +%the option of repeatedly printing out targets with rounded weights greater +%than 1 (i.e. if two or more strong targets are mergde and the weight +%rounds to 2/3/4/etc, display the target at that point multiple times when +%VERBOSE is set to 1). This will NOT change filter performance as the +%extracted state estimate is not fed back into the filter. +s = sprintf('Step 6: Estimate target states'); +disp(s); +X_k = []; +X_k_P = []; +X_k_w = []; + +%OUTPUT_MULTIPLE_HIGH_WEIGHT_TARGETS is set in GM_PHD_Initialisation +if(OUTPUT_MULTIPLE_HIGH_WEIGHT_TARGETS == 0) + i = find(w_bar_k > weightThresholdToBeExtracted); + X_k = m_bar_k(:,i); + X_k_w = w_bar_k(:,i); + for j = 1:length(i) + thisI = i(j); + P_range = calculateDataRange4(thisI); + + thisP = P_bar_k(:,P_range); + X_k_P = [X_k_P, thisP]; + end +else + %If a target has a rounded weight greater than 1, output it multiple + %times. VERBOSE must be set to 1 to see the effects of this. + for i = 1:size(w_bar_k,2) + for j = 1:round(w_bar_k(i)) + X_k = [X_k, m_bar_k(:,i)]; + X_k_w = [X_k_w, w_bar_k(i)]; + P_range = calculateDataRange4(i); + thisP = P_bar_k(:,P_range); + X_k_P = [X_k_P, thisP]; + end + end +end + +if(VERBOSE == 1) + s = sprintf('\t%d targets beleived to be valid:', size(X_k, 2)); + disp(s); + for i = 1:size(X_k, 2) + P_range = calculateDataRange4(i); + s = sprintf('\t\tTarget %d at %3.4f %3.4f, P %3.4f %3.4f, W %3.5f', i, X_k(1, i), X_k(2,i), X_k_P(1, P_range(1)), X_k_P(2, P_range(2)), X_k_w(i)); + disp(s); + end +end + +%Store history for plotting. +X_k_history = [X_k_history, X_k]; \ No newline at end of file diff --git a/GM_PHD_Filter.m b/GM_PHD_Filter.m new file mode 100644 index 0000000..a5430fc --- /dev/null +++ b/GM_PHD_Filter.m @@ -0,0 +1,131 @@ +%GM_PHD_Filter +%Version 1.10, last modified 7th January 2014 +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au +%With: +%- some Kalman filter update code by Tim Bailey, taken from his website http://www-personal.acfr.usyd.edu.au/tbailey/software/ +%- error_ellipse by AJ Johnson, taken from Matlab Central http://www.mathworks.com.au/matlabcentral/fileexchange/4705-errorellipse + +%The GM-PHD algorithm is from Ba-Ngu Vo & Wing-Kin Ma in: +%B.-N. Vo, W.-K. Ma, "The Gaussian Mixture Probability Hypothesis Density +%Filter", IEEE Transactions on Signal Processing, Vol 54, No. 11, November 2006, pp4091-4104 + +%I have implemented both the linear Kalman filter and extended Kalman +%filter versions of this algorithm; switch between these by +%setting/clearing the variable USE_EKF in GM_PHD_Initialisation. + +%Ba-Ngu Vo has kindly allowed me to include his implementation of the Optimal Subpattern Assignment +%(OSPA) metric proposed by D. Schuhmacher, Ba-Tuong Vo & Ba-Ngu Vo in +% Schuhmacher, D.; Ba-Tuong Vo; Ba-Ngu Vo, "A Consistent Metric for Performance Evaluation of Multi-Object Filters," Signal Processing, IEEE Transactions on , vol.56, no.8, pp.3447,3457, Aug. 2008 +%This uses +%- ospa_dist by Ba-Ngu Vo, taken from http://ba-ngu.vo-au.com/vo/OSPA_for_Tracks.zip +%- Hungarian by Alex Melin (to whom I am also much obliged), also taken from http://ba-ngu.vo-au.com/vo/OSPA_for_Tracks.zip + +%The OSPA metric is not essential for the functioning of the filter but +%provides a nice way of analysing performance. + +%See the README.txt, the comments and the Vo & Ma paper for more +%information about what this code actually does. + +clear all; +close all; +clc; + +%Step 0: Initialisation +%The EKF version must initialise the Jacobian functions used to linearise +%the observation/prediction models. The simulated measurements are also very +%different; the linear KF is direct observations of target state, the EKF is +%range-bearing measurements. The observation and prediction covariances are +%also different. +GM_PHD_Initialisation; +if USE_EKF == 0 + GM_PHD_Simulate_Initialise; +else + GM_EKF_PHD_Simulate_Initialise; + GM_EKF_PHD_Initialise_Jacobians; +end + +%In Vo&Ma, the targets are known at filter initialisation. +%If we want to know about them, set KNOWN_TARGET to 1 in GM_PHD_Initialisation. +%Otherwise they should be initialised after being detected a few times. +%HOWEVER this is not guaranteed - sometimes due to noise or missed +%detections, one or both of the targets will not be tracked. This is just +%part of the filter and the birth_intensity function. +if KNOWN_TARGET == 1 + t1start = [simTarget1Start(1:2); simTarget1Vel]; + t2start = [simTarget2Start(1:2); simTarget2Vel]; + m_birth = [t1start, t2start]; + w_birth = [birth_intensity(t1start), birth_intensity(t2start)]; + P_birth = [covariance_birth, covariance_birth]; + numBirthedTargets = 2; +end + +%Main loop +while (k < endTime)%k = timestep + k = k + 1; + s = sprintf('======ITERATION %d======', k); + disp(s); + + %Step Sim: Generate sensor Measurements + %If you want to use this code with your own data or for a different problem, + %replace this function with your own. + if USE_EKF == 0 + GM_PHD_Simulate_Measurements; %Linear KF measurements are simulated direct observations [X; Y] of the target positions + else + GM_EKF_PHD_Simulate_Measurements; %EKF measurements are simuated range-bearing [r; theta] of the target position from a fixed sensor. + end + + %Linear KF use fixed matrices for prediction and update. + %EKF calculates Jacobians to linearise the prediction and observation + %models. There is need for an inverse sensor model to map from sensor + %to target space for some calculations. + if(USE_EKF == 0) + %Step 1: Prediction for birthed/spawned targets + GM_PHD_Predict_Birth; + %Step 2: Prediction for existing targets + GM_PHD_Predict_Existing; + %Step 3: Construction of PHD update components + GM_PHD_Construct_Update_Components; + %Step 4: Update targets with measurements + GM_PHD_Update; + else + %Step 1: Prediction for birthed/spawned targets + GM_EKF_PHD_Predict_Birth; %EKF prediction uses Jacobians to linearise the prediction model and predict the targets forward in time + %Step 2: Prediction for existing targets + GM_EKF_PHD_Predict_Existing; + %Step 3: Construction of PHD update components + GM_EKF_PHD_Construct_Update_Components; + %Step 4: Update targets with measurements + GM_EKF_PHD_Update; + end + + %Step 5: Prune targets + GM_PHD_Prune; + %Step 6: Estimate position of targets + GM_PHD_Estimate + + %Step 7: Create birthed-targets-list to add next iteration in Step 1. + %Not a formal part of Vo&Ma but an essential step! + %The EKF version uses an inverse sensor model. + if(USE_EKF == 0) + GM_PHD_Create_Birth; + else + GM_EKF_PHD_Create_Birth; + end + + %Step Metric: Calculate performance metric + GM_PHD_Calculate_Performance_Metric; + + %Step Plot: Generate graphs + %The EKF version uses an inverse sensor model. + if USE_EKF == 0 + GM_PHD_Simulate_Plot; + else + GM_EKF_PHD_Simulate_Plot; + end + + if(VERBOSE == true) + pause;%Pause to allow reading of the text + end + +end + diff --git a/GM_PHD_Initialisation.m b/GM_PHD_Initialisation.m new file mode 100644 index 0000000..21c4a88 --- /dev/null +++ b/GM_PHD_Initialisation.m @@ -0,0 +1,167 @@ +%GM_PHD_Initialisation +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file initialises most of the variables that we use for the filter. + +%If you want to use this GM-PHD filter for your own problem, you will need +%to replace a lot of this script with your own code. + +%% Control Variables +%These variables control some aspect of filter function and performance +VERBOSE = 0;%Set to 1 for much more text output. This can be used to give more information about the state of the filter, but slows execution and makes the code less neat by requiring disp() statements everywhere. +KNOWN_TARGET = 0;%Set to 1 to have the first two targets already known to the filter on initialisation. Otherwise tracks for them will need to be instantiated. +PLOT_ALL_MEASUREMENTS = 0;%Set to 1 to maintain the plot of the full measurement history, including clutter and error ellipses. Set to 0 to just plot the most recent measurement. +OUTPUT_MULTIPLE_HIGH_WEIGHT_TARGETS = 0;%Set to 1 to implement Vo & Ma's state extraction where targets with rounded weight greater than 1 are output multiple times. This does NOT change filter processing as extracted targets are not fed back into the filter, it is merely for display. VERBOSE must be set to 1 to see the effects of this. Personally I think this is a bit redundant but I've included it to match the Vo & Ma algorithm. +CALCULATE_OSPA_METRIC = 1; %Set to 1 to calculate the OSPA performance metric for each step. This is not essential for the GM-PHD filter but provides a rough indication of how well the filter is performing at any given timestep. +USE_EKF = 0;%Set to 1 to use extended Kalman filter. Set to 0 to use linear KF. +DRAW_VELOCITIES = 0;%Set to 1 to draw velocity arrows on the output plot. +USE_REAL_DATA = 1; +%Target initialisation: when we add a new target, we can use a two-step +%initialisation where every target is added after two observations, so it +%is added with a position (the second observation position) and a velocity +%(the velocity from the first to the second observation). Or we can add +%targets as static objects after one observation, and let the velocity be +%filled in by subsequent observations. The big difference is the first +%prediction step - with no velocity initially they will be expected to stay +%in the same location, as opposed to move in a straight line. +%We can add both types of target, but using a two-step initialisation: so a +%after the second observation, a static target is added and a target with +%velocity. +addVelocityForNewTargets = 1; +addStaticNewTargets = 0; +%When recalculating the weights of targets, we consider the match between +%observed and predicted position. We could use the observed and predicted +%velocity as well, but I haven't gotten around to implementing that. So for +%reweighting, we only use the first two values of the state. +weightDataRange = 1:2; +stateDataRange = 1:4; +temp = []; +%Filtering velocity: We throw away observed targets with velocities greater +%than this. +if USE_REAL_DATA == 0 + MAX_V = 100; +else + MAX_V = 400; +end + +%% Utility functions +calculateDataRange2 = @(j) (2*j-1):(2*j);%Used to calculate the indices of two-dimensional target j in a long list of two-dimensional targets +calculateDataRange4 = @(j) (4*(j-1)+1):(4*j);%Used to calculate the indices of four-dimensional target j in a long list of four-dimensional targets + +%% Data structures and variables +%Step 0: Initialisation +k = 0; %Time step + +%Step 1: Prediction for birthed targets +numBirthedTargets = 0; +numSpawnedTargets = 0; +m_birth_before_prediction = [];%We store the birth/spawn position from before the prediction. This is used in augmenting the measurement vector to calculate velocity, for the update. + +%Step 2: Prediction for existing targets +%We store the existing position from before the prediction. This is used in augmenting measurement vector to calculate velocity, for the update. +mk_k_minus_1_before_prediction = [];%Used in augmenting measurement vector to calculate velocity, for update. +numTargets_Jk_k_minus_1 = 0;%Number of targets given previous. J_k|k-1. Set at end of Step 2 (prediction of existing targets) +prob_survival = 0.99; %Probability of target survival. Used in GM_PHD_Predict_Existing for weight calculation + +%These are used for storing the state of existing targets after prediction. +wk_k_minus_1 = [];%Weights of gaussians, previous, predicted. w_k|k-1. +mk_k_minus_1 = []; %Means of gaussians, previous, predicted. m_k|k-1 +Pk_k_minus_1 = []; %Covariances of gaussians, previous, predicted. P_k|k-1 + +%Step 3: Construct update component. +%There is nothing we need to initialise here because the arrays are initialised +%each iteration. (This is actually true for a lot of the arrays being initialised in this script arrays, but not +%all, and rather than try to separate them it's easiest to initialise nearly all +%of them here). + +%Step 4: Update +%The posterior weight, mean and covariance after the update +w_k = [];%Weights of gaussians, updated. w_k|k +m_k = [];%Means of gaussians, updated. m_k|k +P_k = [];%covariances of gaussians, updated. P_k|k +score_k = []; %Score of target +numTargets_Jk = 0;%Number of targets after update. J_k. Set at end of step 4 (update) + +%Step 5: Prune +numTargets_J_pruned = 0;%Number of targets after pruning +numTargets_Jk_minus_1 = 0; %Number of targets, previous. J_k-1. Set in end of GM_PHD_Prune +%Merge and prune constants +%These numbers have a HUGE impact on the performance and unfortunately need to be +%manually tuned if we make changes. +%These particular values come from Vo&Ma +T = 10^-5;%Weight threshold. Value the weight needs to be above to be considered a target rather than be deleted immediately. +mergeThresholdU = 4; %Merge threshold. Points with Mahalanobis distance of less than this between them will be merged. +weightThresholdToBeExtracted = 0.5;%Value the weight needs to be above to be considered a 'real' target. +maxGaussiansJ = 100;%Maximum number of Gaussians after pruning. NOT USED in this implementation. + +%The previous iteration's mean/weight/covariance. Set in GM_PHD_Prune +%after pruning. Used as input for the prediction step. +wk_minus_1 = []; %Weights from previous iteration +mk_minus_1 = []; %Means from previous iteration +Pk_minus_1 = []; %Covariances from previous iteration + +%Step 6: Estimate/extract states +%We store the history of all points X_k (extracted states) for plotting +%purposes. This is updated in the end of GM_PHD_Estimate +X_k_history = []; + +%Step 7: Create birthed/spawned targets to append to list next iteration +%These are set in GM_PHD_Create_Birth +%Births occur at fixed positions; spawns occur at existing targets +w_birth = [];%New births' weights +m_birth = [];%New births' means +P_birth = [];%New births' covariances +w_spawn = [];%New spawns' weights +m_spawn = [];%New spawns' means +P_spawn = [];%New spawns' covariances + +%Step Sim: Generate simulated measurements +%Detected clutter is a Poisson RFS +xrange = [-1000 1000];%X range of measurements +yrange = [-1000 1000];%Y range of measurements +V = 4 * 10^6; %Volume of surveillance region +lambda_c = 12.5 * 10^-6; %average clutter returns per unit volume (50 clutter returns over the region) +clutter_intensity = @(z_cartesian) lambda_c * V * unifpdf_2d(xrange, yrange, z_cartesian);%Generate clutter function. There are caveats to its use for clutter outside of xrange or yrange - see the comments in unifpdf_2d.m + +%Step Metric: Calculate performance metric +%The order_p and cutoff_c control filter performance; read the paper by +%Schuhmacher et al to understand these in greater detail. The parameters +%given here by default are not particularly well tuned for this problem but +%they work alright. +order_p = 1;%The order determines how punishing the metric calculation is to larger errors; as p increases, outliers are more heavily penailsed +cutoff_c = 200;%Cutoff determines the maximum error for a point. +metric_history = [];%History of the OSPA performance metric + +%% MODELS for prediction and observation +%Prediction models - used in steps 1 & 2 for prediction +I2 = eye(2);%2x2 identify matrix, used to construct matrices +Z2 = zeros(2);%2x2 zero matrix, used to construct matrices +dt = 1; %One-second sampling period +F = [ [I2, dt*I2]; [Z2 I2] ];%State transition matrix (motion model) +sigma_v = 5; %Standard deviation of process noise is 5 m/(s^2) +Q = sigma_v^2 * [ [1/4*dt^4*I2, 1/2*dt^3*I2]; [1/2*dt^3* I2, dt^2*I2] ]; %Process noise covariance, given in Vo&Ma. + +%% Birth model. This is a Poisson random finite set. +%We only use the first two elements as the initial velocities are unknown +%and it is not specified by Vo&Ma whether they are used (I am fairly sure +%they aren't, or they would have said otherwise) +%Birth and spawn models +birth_mean1 = [250, 250, 0, 0]';%Used in birth_intensity function +birth_mean2 = [-250, -250, 0, 0]';%Used in birth_intensity function +covariance_birth = diag([100, 100, 25, 25]');%Used in birth_intensity function +covariance_spawn = diag([100, 100, 400,400]');%Used in spawn_intensity function +covariance_spawn = max(covariance_spawn, 10^-6);%Used in spawn_intensity function +birth_intensity = @(x) (0.1 * mvnpdf(x(1:2)', birth_mean1(1:2)', covariance_birth(1:2,1:2)) + 0.1 * mvnpdf(x(1:2)', birth_mean2(1:2)', covariance_birth(1:2,1:2)));%Generate birth weight. This only takes into account the position, not the velocity, as Vo&Ma don't say if they use velocity and I assume that they don't. Taken from page 8 of their paper. +spawn_intensity = @(x, targetState) 0.05 * mvnpdf(x, targetState, covariance_spawn);%Spawn weight, from page 8 of Vo&Ma. + +prob_detection = 0.98; %Probability of target detection. Used in recalculating weights in GM_PHD_Update + +%Detection models for the linear Kalman filter. The extended Kalman filter +%uses different models. +if(USE_EKF == 0) + H = [I2, Z2];%Observation matrix for position. Not used, but if you wanted to cut back to just tracking position, might be useful. + H2 = eye(4);%Observation matrix for position and velocity. This is the one we actually use, in GM_PHD_Construct_Update_Components + sigma_r = 10; %Standard deviation of measurement noise is 10m. Used in creating R matrix (below) + R = sigma_r^2 * I2;%Sensor noise covariance. used in R2 (below) + R2 = [ [R, Z2]; [Z2, R] ];%Measurement covariance, expanded to both position & velocity. Used in GM_PHD_Construct_Update_Components. NOTE: This assumes that speed measurements have the same covariance as position measurements. I have no mathematical justification for this. +end \ No newline at end of file diff --git a/GM_PHD_Predict_Birth.m b/GM_PHD_Predict_Birth.m new file mode 100644 index 0000000..29e198a --- /dev/null +++ b/GM_PHD_Predict_Birth.m @@ -0,0 +1,51 @@ +%GM_PHD_Predict_Birth +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs prediction for newly birthed and spawned targets +%This is necessary since their positions were initialised in a previous timestep and they may +%have moved between then and now +%We iterate through j = 1..J_b,k where J_b,k is number of birthed landmarks +%at time k (but the landmarks correspond to the measurement from time k-1) +%and j = 1...J_s,k where J_s,k is the number of spawned landmarks at time k +%(but the landmarks correspond to the measurement from time k-1). +%For this implementation, birthed and spawned targets are identical except +%they have weights that are calculated from different functions, and +%different starting covariances. A target is spawned or birthed depending +%on which weighting function will give it a higher starting weight. + +%The means of these will be the position in Cartesian space where they are detected +%The covariances & weights are calculated according to Vo&Ma +s = sprintf('Step 1: Prediction for birthed and spawned targets.'); +disp(s); + +m_birth_before_prediction = [m_birth, m_spawn]; %Need to store these BEFORE prediction for use in the update step. + +%Perform prediction for birthed targets using birthed velocities. +for j = 1:numBirthedTargets + i = i + 1; + %w_birth was already instantiated in GM_PHD_Create_Birth + m_birth(:,j) = F * m_birth(:,j); + P_range = calculateDataRange4(j); + P_birth(:,P_range) = Q + F * P_birth(:,P_range) * F'; +end +%Perform prediction for spawned targets using spawned velocities. +for j = 1:numSpawnedTargets + i = i + 1; + %w_spawn was already instantiated in GM_PHD_Create_Birth + m_spawn(:,j) = F * m_spawn(:,j); + P_range = calculateDataRange4(j); + P_spawn(:,P_range) = Q + F * P_spawn(:,P_range) * F'; +end + +if(VERBOSE == 1) + for j = 1:numBirthedTargets + thisM = m_birth(:,j); + s = sprintf('Birthed target %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_birth(j)); + disp(s); + end + for j = 1:numSpawnedTargets + thisM = m_spawn(:,j); + s = sprintf('Spawned target %d: %3.4f %3.4f %3.4f %3.4f Weight %3.9f', j, thisM(1), thisM(2), thisM(3), thisM(4), w_spawn(j)); + disp(s); + end +end diff --git a/GM_PHD_Predict_Existing.m b/GM_PHD_Predict_Existing.m new file mode 100644 index 0000000..4ad4918 --- /dev/null +++ b/GM_PHD_Predict_Existing.m @@ -0,0 +1,43 @@ +%GM_PHD_Predict_Existing +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs prediction for existing targets +s = sprintf('Step 2: Prediction for existing targets.'); +disp(s); + +mk_k_minus_1_before_prediction = mk_minus_1; + +for j = 1:size(mk_minus_1,2) + wk_minus_1(j) = prob_survival * wk_minus_1(j); + mk_minus_1(:,j) = F * mk_minus_1(:,j); %Assume constant velocity. + + P_range = calculateDataRange4(j); + P_i = Q + F * Pk_minus_1(:,P_range) * F'; + + prevState = mk_k_minus_1_before_prediction(:,j); + newState = mk_minus_1(:,j); + + Pk_minus_1(:,P_range) = P_i; + + if(VERBOSE == 1) + s = sprintf('\t\tExisting target %d. Previously at %3.4f %3.4f, now at %3.4f %3.4f.', j, prevState(1), prevState(2), newState(1), newState(2)); + disp(s); + + s = sprintf('\t\tP was %3.4f %3.4f, NOW %3.4f %3.4f', Pk_minus_1(1,P_range(1)), Pk_minus_1(2,P_range(2)), P_i(1,1), P_i(2,2)); + disp(s); + end +end + +%% Now we combine the birthed targets with the existing ones. +%Append newly birthed targets (in m_k_minus_1) to back of old ones +wk_k_minus_1 = [wk_minus_1, w_birth, w_spawn]; +mk_k_minus_1 = [mk_minus_1, m_birth, m_spawn]; +Pk_k_minus_1 = [Pk_minus_1, P_birth, P_spawn]; +numTargets_Jk_k_minus_1 = numTargets_Jk_minus_1 + numBirthedTargets + numSpawnedTargets; +%Create a backup to allow for augmenting the measurement in the update +mk_k_minus_1_before_prediction = [mk_k_minus_1_before_prediction, m_birth_before_prediction];%m_birth_before_prediction also contains the spawned targets before prediction + +if(VERBOSE == 1) + s = sprintf('\tPerformed prediction for %d birthed and existing targets in total.', numTargets_Jk_k_minus_1); + disp(s); +end \ No newline at end of file diff --git a/GM_PHD_Prune.m b/GM_PHD_Prune.m new file mode 100644 index 0000000..a86011b --- /dev/null +++ b/GM_PHD_Prune.m @@ -0,0 +1,99 @@ +%GM_PHD_Prune +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs merging and pruning after the PHD update. +%The PHD update creates a combinatorial explosion in the number of targets. +%We prune the low-weight ones and merge the close-together ones. The weight +%threshold T and distance threshold L that control this process are set in +%GM_PHD_Initialisation. +s = sprintf('Step 5: Prune and merge targets.'); +disp(s); + +%% Prune out the low-weight targets +I = find(w_k >= T);%Find targets with high enough weights +if(VERBOSE == 1) + s = sprintf('The only tracks with high enough weights are:'); + disp(s); + disp(I); +end + +%% Merge the close-together targets +l = 0;%Counts number of features +w_bar_k = []; +m_bar_k = []; +P_bar_k = []; +while isempty(I) == false %We delete from I as we merge + l = l + 1; + %Find j, which is i corresponding to highest w_k for all i in I + highWeights = w_k(:,I); + [maxW, j] = max(highWeights); + j = j(1); %In case of two targets with equal weight + %j is an index of highWeights (i.e. a position in I) + %We want the index in w_k + j = I(j); + + %Find all points with Mahalanobis distance less than U from point + %m_k(j) + L = [];%A vector of indexes of w_k + for iterateI = 1:length(I) + thisI = I(iterateI); + + delta_m = m_k(:,thisI) - m_k(:,j); + P_range = calculateDataRange4(thisI); + mahal_dist = delta_m' * (P_k(:,P_range) \ delta_m);%Invert covariance via left division + if(mahal_dist <= mergeThresholdU) + L = [L, thisI]; + end + end + if(VERBOSE == 1) + s = sprintf('\tMerging target %d with these targets:', j); + disp(s); + disp(L); + end + %The new weight is the sum of the old weights + w_bar_k_l = sum(w_k(L)); + + %The new mean is the weighted average of the merged means + m_bar_k_l = 0; + for i = 1:length(L) + thisI = L(i); + m_bar_k_l = m_bar_k_l + 1 / w_bar_k_l * (w_k(thisI) * m_k(:,thisI)); + end + + %Calculating covariance P_bar_k is a bit trickier + P_val = zeros(4,4); + for i = 1:length(L) + thisI = L(i); + delta_m = m_bar_k_l - m_k(:,thisI); + P_range = calculateDataRange4(thisI); + P_val = P_val + w_k(thisI) * (P_k(:,P_range) + delta_m * delta_m'); + tmpP = P_k(:,P_range); + end + P_bar_k_l = P_val / w_bar_k_l; + + old_P_range = calculateDataRange4(j); + oldP = P_k(:,old_P_range); + + %Now delete the elements in L from I + for i = 1:length(L) + iToRemove = find(I == L(i)); + I(iToRemove) = []; + end + + %Append the new values to the lists + w_bar_k = [w_bar_k, w_bar_k_l]; + m_bar_k = [m_bar_k, m_bar_k_l]; + P_bar_k = [P_bar_k, P_bar_k_l]; +end + +numTargets_J_pruned = size(w_bar_k,2);%The number of targets after pruning + +%Here you could do some check to see if numTargets_J_pruned > maxGaussiansJ +%and if needed delete some of the weaker gaussians. I haven't bothered but +%it might be useful for your implementation. + +numTargets_Jk_minus_1 = numTargets_J_pruned;%Number of targets in total, passed into the next filter iteration +%Store the weights, means and covariances for use next iteration. +wk_minus_1 = w_bar_k; %Weights from this iteration +mk_minus_1 = m_bar_k; %Means from this iteration +Pk_minus_1 = P_bar_k; %Covariances from this iteration diff --git a/GM_PHD_Simulate_Initialise.m b/GM_PHD_Simulate_Initialise.m new file mode 100644 index 0000000..c748a89 --- /dev/null +++ b/GM_PHD_Simulate_Initialise.m @@ -0,0 +1,79 @@ +%GM_PHD_Simulate_Initialise +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file initialises the simulation described in example 1 of Vo&Ma 2006. +%The velocity and starting position values are rough estimates obtained by visual +%inspection of the simulation. +%They can probably be changed without things breaking. + +%If you want to use this GM-PHD filter for your own problem, you will need +%to replace this script with your own. + +%%Control parameters +noiseScaler = 1.0; %Adjust the strength of the noise on the measurements by adjusting this. Useful for debugging. +nClutter = 50; %Assume constant 50 clutter measurements. Since clutter is Poisson distrbuted it might be more accurate to use nClutter = poissrnd(50) if you have the required Matlab toolbox. Constant 50 clutter works well enough for simulation purposes. + +%I haven't included descriptions of every variable because their names are +%fairly self-explanatory +endTime = 100;%Duration of main loop +simTarget1Start = birth_mean1; +simTarget2Start = birth_mean2; +simTarget1End = [500, -900]'; +simTarget2End = [900, -500]'; +simTarget3End = [-200, -750]'; +simTarget1Vel = (simTarget1End - simTarget1Start(1:2)) / endTime; +simTarget2Vel = (simTarget2End - simTarget2Start(1:2)) / endTime; +simTarget3Vel = [-20; -4]; +simTarget1Start(3:4) = simTarget1Vel; +simTarget2Start(3:4) = simTarget2Vel; +simTarget3Start(3:4) = simTarget3Vel; + +%History arrays are mostly used for plotting. +simTarget1History = simTarget1Start; +simTarget2History = simTarget2Start; +simTarget3History = []; + +simMeasurementHistory = {};%We use a cell array so that we can have rows of varying length. + +simTarget1State = simTarget1Start; +simTarget2State = simTarget2Start; +simTarget3State = []; + +simTarget3SpawnTime = 66;%Target 3 is spawned from target 1 at t = 66s. + +%Set up for plot +%Measurements and targets plot +figure(1); +clf; +hold on; +axis([-1000 1000 -1000 1000]); +xlim([-1000 1000]); +ylim([-1000 1000]); + +%X and Y measurements plot +xlabel('X position'); +ylabel('Y position'); +title('Simulated targets and measurements'); +axis square; + +figure(2); +subplot(2,1,1); +hold on; +axis([0 100 -1000 1000]); +xlabel('Simulation step'); +ylabel('X position of measurement (m)'); +title('Measurement X coordinates'); +subplot(2,1,2); +hold on; +axis([0 100 -1000 1000]); +xlabel('Simulation step'); +ylabel('Y position of measurement (m)'); +title('Measurement Y coordinates'); + +%Performance metric plot +if(CALCULATE_OSPA_METRIC == 1) + figure(3); + clf; + xlabel('Simulation step'); + ylabel('OSPA error metric (higher is worse)'); +end \ No newline at end of file diff --git a/GM_PHD_Simulate_Measurements.m b/GM_PHD_Simulate_Measurements.m new file mode 100644 index 0000000..ffbcbf9 --- /dev/null +++ b/GM_PHD_Simulate_Measurements.m @@ -0,0 +1,83 @@ +%GM_PHD_Simulate_Measurements +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file generates simulated measurement data for the simulation +%described in Vo&Ma. +%There will be gaussian noise on the measurement and Poisson-distributed clutter +%in the environment. + +%If you want to use this PHD filter implementation for another problem, you +%will need to replace this script with another one that populates Z, +%zTrue, and simMeasurementHistory (Z is used in a lot of the update code, +%zTrue and simMeasurementHistory are used in GM_PHD_Simulate_Plot) + +%Note: It is possible to get no measurements if the target is not detected +%and there is no clutter +s = sprintf('Step Sim: Simulating measurements.'); +disp(s); + +%Simulate target movement +simTarget1State = F * simTarget1State; +simTarget2State = F * simTarget2State; +if(~isempty(simTarget3State)) + simTarget3State = F * simTarget3State; +end +%Spawn target 3 when k = 66 +if(k == simTarget3SpawnTime) + simTarget3State = simTarget1State; + simTarget3State(3:4) = simTarget3Vel; +end + +%Save target movement for plotting +simTarget1History = [simTarget1History, simTarget1State]; +simTarget2History = [simTarget2History, simTarget2State]; +simTarget3History = [simTarget3History, simTarget3State]; + +%First, we generate some clutter in the environment. +clutter = zeros(2,nClutter);%The observations are of the form [x; y] +for i = 1:nClutter + clutterX = rand * (xrange(2) - xrange(1)) + xrange(1); %Random number between xrange(1) and xrange(2), uniformly distributed. + clutterY = rand * (yrange(2) - yrange(1)) + yrange(1); %Random number between yrange(1) and yrange(2), uniformly distributed. + + clutter(1,i) = clutterX; + clutter(2,i) = clutterY; +end + +%We are not guaranteed to detect the target - there is only a probability + +detect1 = rand; +detect2 = rand; +detect3 = rand; + +if(detect1 > prob_detection) + measX1 = []; + measY1 = []; +else + measX1 = simTarget1State(1) + sigma_r * randn * noiseScaler; + measY1 = simTarget1State(2) + sigma_r * randn * noiseScaler; +end +if(detect2 > prob_detection) + measX2 = []; + measY2 = []; +else + measX2 = simTarget2State(1) + sigma_r * randn * noiseScaler; + measY2 = simTarget2State(2) + sigma_r * randn * noiseScaler; +end + +if(k >= simTarget3SpawnTime) && (detect3 <= prob_detection) + measX3 = simTarget3State(1) + sigma_r * randn * noiseScaler; + measY3 = simTarget3State(2) + sigma_r * randn * noiseScaler; +else + measX3 = []; + measY3 = []; +end + +%Generate true measurement +Z = [ [measX1 measX2 measX3]; [measY1 measY2 measY3] ]; +zTrue = Z;%Store for plotting + +%Append clutter +Z = [Z, clutter]; + +%Store history +simMeasurementHistory{k} = Z; \ No newline at end of file diff --git a/GM_PHD_Simulate_Plot.m b/GM_PHD_Simulate_Plot.m new file mode 100644 index 0000000..6cb0162 --- /dev/null +++ b/GM_PHD_Simulate_Plot.m @@ -0,0 +1,91 @@ +%GM_PHD_Simulate_Plot +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au +%error_ellipse is by AJ Johnson, taken from Matlab Central http://www.mathworks.com.au/matlabcentral/fileexchange/4705-errorellipse + +%This file plots the current measurements, true target position and estimated target +%position, as well as a history of estimated target positions. + +disp('Plotting...') +%Measurements +figure(1); +if(PLOT_ALL_MEASUREMENTS == false) + clf;%Only plot the most recent measurement. +end +hold on; + +%Plot all measurements, including clutter, as black 'x' +if(~isempty(Z)) + plot(Z(1,:), Z(2,:), 'xk'); +end +%Plot noisy measurements of true target position(s), as black 'o' +if(~isempty(zTrue)) + plot(zTrue(1,:), zTrue(2,:), 'ok'); +end + +%Plot target 1 true position as red dots +plot(simTarget1History(1,:), simTarget1History(2,:), '.-r'); +%Plot target 2 true position as blue dots +plot(simTarget2History(1,:), simTarget2History(2,:), '.-b'); +%Plot target 3 true position as green dots +if(~isempty(simTarget3History)) + plot(simTarget3History(1,:), simTarget3History(2,:), '.-g'); +end +%Plot tracked targets as magenta circles +if(~isempty(X_k_history)) + plot(X_k_history(1,:), X_k_history(2,:), 'om'); +end +xlabel('X position'); +ylabel('Y position'); +title('Simulated targets and measurements'); +axis square; + +%For extracted targets, plot latest target(s) as cyan triangle, and draw an +%error ellipse to show uncertainty +if(~isempty(X_k)) + plot(X_k(1,:), X_k(2,:), '^c'); + [nRows, nCols] = size(X_k); + for c = 1:nCols + thisMu = X_k(1:2, c); + covRange = calculateDataRange4(c); + thisCov = X_k_P(:,covRange); + thisCov = thisCov(1:2, 1:2); %We only care about position + error_ellipse(thisCov, thisMu); + end + if(DRAW_VELOCITIES == 1) + %Draw velocities of targets + quiver(X_k(1,:), X_k(2,:), X_k(3,:), X_k(4,:)) + end +end + +%Individual X and Y components of measurements +figure(2); +subplot(2,1,1); +plot(k, Z(1,:), 'xk');%X coord of clutter measurements +if(~isempty(zTrue)) + plot(k, zTrue(1,:), 'ok'); +end +plot(k, zTrue(1,:), 'xk');%X coord of true measurement +if(~isempty(X_k)) + plot(k, X_k(1,:), 'om'); +end +subplot(2,1,2); +plot(k, Z(2,:), 'xk');%Y coord of clutter measurements +if(~isempty(zTrue)) + plot(k, zTrue(2,:), 'ok'); +end +if(~isempty(X_k)) + plot(k, X_k(2,:), 'om'); +end + +%OSPA error metric plot +if(CALCULATE_OSPA_METRIC == 1) + figure(3); + clf; + hold on; + plot(metric_history, 'x-b'); + + axis([0 100 0 cutoff_c]); + xlabel('Simulation step'); + ylabel('OSPA error metric (higher is worse)'); + title('OSPA error metric for this test'); +end \ No newline at end of file diff --git a/GM_PHD_Update.m b/GM_PHD_Update.m new file mode 100644 index 0000000..61c2fbc --- /dev/null +++ b/GM_PHD_Update.m @@ -0,0 +1,133 @@ +%GM_PHD_Update +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%This file performs a PHD filter update on the targets +%This is basically a brute-force Kalman update of every target with every +%measurement and creating a new target from the update results. + +s = sprintf('Step 4: Performing update.'); +disp(s); + +%Set up matrices for post-update filter state +w_k = zeros(1, numTargets_Jk_k_minus_1 * size(Z, 2) + numTargets_Jk_k_minus_1); +m_k = zeros(4, numTargets_Jk_k_minus_1 * size(Z, 2) + numTargets_Jk_k_minus_1); +P_k = zeros(size(Pk_k_minus_1,1), size(Pk_k_minus_1,1) * (numTargets_Jk_k_minus_1 * size(Z, 2) + numTargets_Jk_k_minus_1)); +score_k = zeros(1, numTargets_Kk_k_minus_1 * size(Z,2) + numTargets_Kk_k_minus_1); +%First we assume that we failed to detect all targets. +%We scale all weights by probability of missed detection +%We already did the prediction step for these so their position & +%covariance will have been updated. What remains is to rescale their +%weight. +for j = 1:numTargets_Jk_k_minus_1 + w_k(j) = (1 - prob_detection) * wk_k_minus_1(j); + m_k(:,j) = mk_k_minus_1(:,j); + P_range = calculateDataRange4(j); + newP = Pk_k_minus_1(:,P_range); + P_k(:,P_range) = newP; +end + +%Now we update all combinations of matching every observation with every target in the +%map. +%First we expand the observation to include velocity (by simple v = delta_x/delta_t for +%delta_x = measured_position - position_before_prediction. +%That is, how fast would the target have needed +%to move to get from where it was to where it was seen now? +L = 0; +for zi = 1:size(Z,2) + L = L + 1;%L is used to calculate an offset from previous updates. It maxes out at L = number_of_measurements. A more elegant solution would be to set L = zi but I retain this method for compatibility with Vo&Ma + if(VERBOSE == 1) + s = sprintf('Matching targets against measurement %d', zi); + disp(s); + end + + for j = 1:numTargets_Jk_k_minus_1 + + m_j = mk_k_minus_1(:,j); + %Augment the measurement of position with calculated velocity + thisZ = Z(:,zi);%This consists of only the observed position. But we need to extract the equivalent velocity observation + prevX = mk_k_minus_1_before_prediction(1:2,j);%Get the old (pre-prediction) position of the target + thisV = (thisZ - prevX) / dt;%velocity = dx / dt. Since Z and x are 2d, V = [Vx Vy] + thisZ = [thisZ; thisV];%So we pretend to observe velocity as well as position + + %If this is the first time a target has been observed, it will have + %no velocity stored. + %Therefore it will be missing a prediction update, which will + %impact both its position and its covariance. + thisIndex = L * numTargets_Jk_k_minus_1 + j; + + old_P_range = calculateDataRange4(j);%Returns 4 columns + new_P_range = 4 * L * numTargets_Jk_k_minus_1 + old_P_range; + S_range = calculateDataRange4(j); + + %Recalculate weight. + %weightDataRange is used to control which dimensions we want to + %reweight over. In my experience, reweighting over all four + %produces unacceptably low weights most of the time, so we reweight + %over 2D. + w_new = prob_detection * wk_k_minus_1(j) * mvnpdf(thisZ(weightDataRange), eta(weightDataRange,j), S(weightDataRange,S_range(weightDataRange)));%Hoping normpdf is the function I need + w_k(thisIndex) = w_new; + + %Update mean + delta = thisZ - eta(:,j); + K_range = calculateDataRange4(j); + m_new = m_j + K(:,K_range) * delta; + m_k(:,thisIndex) = m_new; + + %Update covariance + P_new = P_k_k(:,old_P_range); + P_k(:,new_P_range) = P_new; + + if(VERBOSE == 1) + s1 = sprintf('Observation: %3.4f %3.4f %3.4f %3.4f', thisZ(1), thisZ(2), thisZ(3), thisZ(4)); + disp(s1); + thisEta = eta(:,j); + s2 = sprintf('Expected Obs: %3.4f %3.4f %3.4f %3.4f', thisEta(1), thisEta(2), thisEta(3), thisEta(4)); + disp(s2); + + s1 = sprintf('Before Update: %3.4f %3.4f %3.4f %3.4f %3.4f %3.4f', m_k(1), m_k(2), m_k(3), m_k(4)); + s2 = sprintf('After Update: %3.4f %3.4f %3.4f %3.4f', m_new(1), m_new(2), m_new(3), m_new(4)); + s3 = sprintf('True State: %3.4f %3.4f %3.4f %3.4f', simTarget1State(1), simTarget1State(2), simTarget1Vel(1), simTarget1Vel(2)); + disp(s1); + disp(s2); + disp(s3); + end + + end + + %Sum up weights for use in reweighting + weight_tally = 0; + for i = 1:numTargets_Jk_k_minus_1 + thisIndex = L * numTargets_Jk_k_minus_1 + i; + weight_tally = weight_tally + w_k(thisIndex); + end + + %Recalculate weights + if(VERBOSE == 1) + s = sprintf('Calculating new weights for observation %d', zi); + disp(s); + end + for j = 1:numTargets_Jk_k_minus_1 + old_weight = w_k(L * numTargets_Jk_k_minus_1 + j); + measZ = [Z(1,zi), Z(2,zi)]; + new_weight = old_weight / (clutter_intensity(measZ) + weight_tally);%Normalise + + w_k(L * numTargets_Jk_k_minus_1 + j) = new_weight; + + S_range = calculateDataRange4(j);%Data range for S, which is 4x4 matrix + if(VERBOSE == 1) + s = sprintf('\tNew target %d: Matching old target %d at %3.4f %3.4f to measurement %d at %3.4f %3.4f. Prob detection %3.4f. Old target weight %3.8f New target weight %3.8f Normalised new target weight: %3.8f', L * numTargets_Jk_k_minus_1 + j, j, mk_k_minus_1(1, j), mk_k_minus_1(2, j), zi, measZ(1), measZ(2), mvnpdf(Z(1:2,zi), eta(1:2,j), S(1:2,S_range(1:2))), wk_k_minus_1(j), old_weight, new_weight); + disp(s); + end + end + +end +numTargets_Jk = L * numTargets_Jk_k_minus_1 + numTargets_Jk_k_minus_1; + +if(VERBOSE == 1) + for j = 1:numTargets_Jk + thisPos = m_k(:,j); + thisW = w_k(j); + s = sprintf('Target %d: %3.4f %3.4f %3.4f %3.4f, Weight %3.9f', j, thisPos(1), thisPos(2), thisPos(3), thisPos(4), thisW); + disp(s); + end +end \ No newline at end of file diff --git a/Hungarian.m b/Hungarian.m new file mode 100644 index 0000000..cc4f4ce --- /dev/null +++ b/Hungarian.m @@ -0,0 +1,281 @@ +%This code is available online at http://ba-ngu.vo-au.com/vo/OSPA_for_Tracks.zip +function [Matching,Cost] = Hungarian(Perf) +% +% [MATCHING,COST] = Hungarian_New(WEIGHTS) +% +% A function for finding a minimum edge weight matching given a MxN Edge +% weight matrix WEIGHTS using the Hungarian Algorithm. +% +% An edge weight of Inf indicates that the pair of vertices given by its +% position have no adjacent edge. +% +% MATCHING return a MxN matrix with ones in the place of the matchings and +% zeros elsewhere. +% +% COST returns the cost of the minimum matching + +% Written by: Alex Melin 30 June 2006 + + + % Initialize Variables + Matching = zeros(size(Perf)); + +% Condense the Performance Matrix by removing any unconnected vertices to +% increase the speed of the algorithm + + % Find the number in each column that are connected + num_y = sum(~isinf(Perf),1); + % Find the number in each row that are connected + num_x = sum(~isinf(Perf),2); + + % Find the columns(vertices) and rows(vertices) that are isolated + x_con = find(num_x~=0); + y_con = find(num_y~=0); + + % Assemble Condensed Performance Matrix + P_size = max(length(x_con),length(y_con)); + P_cond = zeros(P_size); + P_cond(1:length(x_con),1:length(y_con)) = Perf(x_con,y_con); + if isempty(P_cond) + Cost = 0; + return + end + + % Ensure that a perfect matching exists + % Calculate a form of the Edge Matrix + Edge = P_cond; + Edge(P_cond~=Inf) = 0; + % Find the deficiency(CNUM) in the Edge Matrix + cnum = min_line_cover(Edge); + + % Project additional vertices and edges so that a perfect matching + % exists + Pmax = max(max(P_cond(P_cond~=Inf))); + P_size = length(P_cond)+cnum; + P_cond = ones(P_size)*Pmax; + P_cond(1:length(x_con),1:length(y_con)) = Perf(x_con,y_con); + +%************************************************* +% MAIN PROGRAM: CONTROLS WHICH STEP IS EXECUTED +%************************************************* + exit_flag = 1; + stepnum = 1; + while exit_flag + switch stepnum + case 1 + [P_cond,stepnum] = step1(P_cond); + case 2 + [r_cov,c_cov,M,stepnum] = step2(P_cond); + case 3 + [c_cov,stepnum] = step3(M,P_size); + case 4 + [M,r_cov,c_cov,Z_r,Z_c,stepnum] = step4(P_cond,r_cov,c_cov,M); + case 5 + [M,r_cov,c_cov,stepnum] = step5(M,Z_r,Z_c,r_cov,c_cov); + case 6 + [P_cond,stepnum] = step6(P_cond,r_cov,c_cov); + case 7 + exit_flag = 0; + end + end + +% Remove all the virtual satellites and targets and uncondense the +% Matching to the size of the original performance matrix. +Matching(x_con,y_con) = M(1:length(x_con),1:length(y_con)); +Cost = sum(sum(Perf(Matching==1))); + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% STEP 1: Find the smallest number of zeros in each row +% and subtract that minimum from its row +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +function [P_cond,stepnum] = step1(P_cond) + + P_size = length(P_cond); + + % Loop throught each row + for ii = 1:P_size + rmin = min(P_cond(ii,:)); + P_cond(ii,:) = P_cond(ii,:)-rmin; + end + + stepnum = 2; + +%************************************************************************** +% STEP 2: Find a zero in P_cond. If there are no starred zeros in its +% column or row start the zero. Repeat for each zero +%************************************************************************** + +function [r_cov,c_cov,M,stepnum] = step2(P_cond) + +% Define variables + P_size = length(P_cond); + r_cov = zeros(P_size,1); % A vector that shows if a row is covered + c_cov = zeros(P_size,1); % A vector that shows if a column is covered + M = zeros(P_size); % A mask that shows if a position is starred or primed + + for ii = 1:P_size + for jj = 1:P_size + if P_cond(ii,jj) == 0 && r_cov(ii) == 0 && c_cov(jj) == 0 + M(ii,jj) = 1; + r_cov(ii) = 1; + c_cov(jj) = 1; + end + end + end + +% Re-initialize the cover vectors + r_cov = zeros(P_size,1); % A vector that shows if a row is covered + c_cov = zeros(P_size,1); % A vector that shows if a column is covered + stepnum = 3; + +%************************************************************************** +% STEP 3: Cover each column with a starred zero. If all the columns are +% covered then the matching is maximum +%************************************************************************** + +function [c_cov,stepnum] = step3(M,P_size) + + c_cov = sum(M,1); + if sum(c_cov) == P_size + stepnum = 7; + else + stepnum = 4; + end + +%************************************************************************** +% STEP 4: Find a noncovered zero and prime it. If there is no starred +% zero in the row containing this primed zero, Go to Step 5. +% Otherwise, cover this row and uncover the column containing +% the starred zero. Continue in this manner until there are no +% uncovered zeros left. Save the smallest uncovered value and +% Go to Step 6. +%************************************************************************** +function [M,r_cov,c_cov,Z_r,Z_c,stepnum] = step4(P_cond,r_cov,c_cov,M) + +P_size = length(P_cond); + +zflag = 1; +while zflag + % Find the first uncovered zero + row = 0; col = 0; exit_flag = 1; + ii = 1; jj = 1; + while exit_flag + if P_cond(ii,jj) == 0 && r_cov(ii) == 0 && c_cov(jj) == 0 + row = ii; + col = jj; + exit_flag = 0; + end + jj = jj + 1; + if jj > P_size; jj = 1; ii = ii+1; end + if ii > P_size; exit_flag = 0; end + end + + % If there are no uncovered zeros go to step 6 + if row == 0 + stepnum = 6; + zflag = 0; + Z_r = 0; + Z_c = 0; + else + % Prime the uncovered zero + M(row,col) = 2; + % If there is a starred zero in that row + % Cover the row and uncover the column containing the zero + if sum(find(M(row,:)==1)) ~= 0 + r_cov(row) = 1; + zcol = find(M(row,:)==1); + c_cov(zcol) = 0; + else + stepnum = 5; + zflag = 0; + Z_r = row; + Z_c = col; + end + end +end + +%************************************************************************** +% STEP 5: Construct a series of alternating primed and starred zeros as +% follows. Let Z0 represent the uncovered primed zero found in Step 4. +% Let Z1 denote the starred zero in the column of Z0 (if any). +% Let Z2 denote the primed zero in the row of Z1 (there will always +% be one). Continue until the series terminates at a primed zero +% that has no starred zero in its column. Unstar each starred +% zero of the series, star each primed zero of the series, erase +% all primes and uncover every line in the matrix. Return to Step 3. +%************************************************************************** + +function [M,r_cov,c_cov,stepnum] = step5(M,Z_r,Z_c,r_cov,c_cov) + + zflag = 1; + ii = 1; + while zflag + % Find the index number of the starred zero in the column + rindex = find(M(:,Z_c(ii))==1); + if rindex > 0 + % Save the starred zero + ii = ii+1; + % Save the row of the starred zero + Z_r(ii,1) = rindex; + % The column of the starred zero is the same as the column of the + % primed zero + Z_c(ii,1) = Z_c(ii-1); + else + zflag = 0; + end + + % Continue if there is a starred zero in the column of the primed zero + if zflag == 1; + % Find the column of the primed zero in the last starred zeros row + cindex = find(M(Z_r(ii),:)==2); + ii = ii+1; + Z_r(ii,1) = Z_r(ii-1); + Z_c(ii,1) = cindex; + end + end + + % UNSTAR all the starred zeros in the path and STAR all primed zeros + for ii = 1:length(Z_r) + if M(Z_r(ii),Z_c(ii)) == 1 + M(Z_r(ii),Z_c(ii)) = 0; + else + M(Z_r(ii),Z_c(ii)) = 1; + end + end + + % Clear the covers + r_cov = r_cov.*0; + c_cov = c_cov.*0; + + % Remove all the primes + M(M==2) = 0; + +stepnum = 3; + +% ************************************************************************* +% STEP 6: Add the minimum uncovered value to every element of each covered +% row, and subtract it from every element of each uncovered column. +% Return to Step 4 without altering any stars, primes, or covered lines. +%************************************************************************** + +function [P_cond,stepnum] = step6(P_cond,r_cov,c_cov) +a = find(r_cov == 0); +b = find(c_cov == 0); +minval = min(min(P_cond(a,b))); + +P_cond(find(r_cov == 1),:) = P_cond(find(r_cov == 1),:) + minval; +P_cond(:,find(c_cov == 0)) = P_cond(:,find(c_cov == 0)) - minval; + +stepnum = 4; + +function cnum = min_line_cover(Edge) + + % Step 2 + [r_cov,c_cov,M,stepnum] = step2(Edge); + % Step 3 + [c_cov,stepnum] = step3(M,length(Edge)); + % Step 4 + [M,r_cov,c_cov,Z_r,Z_c,stepnum] = step4(Edge,r_cov,c_cov,M); + % Calculate the deficiency + cnum = length(Edge)-sum(r_cov)-sum(c_cov); \ No newline at end of file diff --git a/README.txt b/README.txt new file mode 100644 index 0000000..8fbde79 --- /dev/null +++ b/README.txt @@ -0,0 +1,49 @@ +GM_PHD_Filter +Version 1.09, 13th December 2013 + +Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au with: +- some Kalman filter update code by Tim Bailey, taken from his website http://www-personal.acfr.usyd.edu.au/tbailey/software/ +- error_ellipse by AJ Johnson, taken from Matlab Central http://www.mathworks.com.au/matlabcentral/fileexchange/4705-errorellipse +Algorithm by Ba-Ngu Vo & Wing-Kin Ma in: +B.-N. Vo, W.-K. Ma, "The Gaussian Mixture Probability Hypothesis Density Filter", IEEE Transactions on Signal Processing, Vol 54, No. 11, November 2006, pp4091-4104 + +This is an implementation of a gaussian mixture probability hypothesis density filter (GM-PHD) for a simulated tracking problem. The problem specification is given in the above paper - in summary, two targets move through the environment, there is a lot of clutter on the measurement, and about halfway through a third target spawns off one of the two targets. + +I made a few changes, either because I couldn't understand how Vo&Ma did it, or because I wanted to make it closer to my target problem. I extended the measurement vector to include both target position AND velocity (the filter they describe tracks position and velocity but only observes position). Velocity is observed as just being dx/dt, change in position over time, between this new observation and the previous position of this target. Targets are either birthed or spawned depending on which initialisation weight function would give a higher weight; they are given the appropriate initialisation covariance, but apart from this there is no other difference in instantiation. Vo & Ma's state estimation involves repeatedly outputting targets that have a rounded weight greater than 1; I have this off by default but it can be turned on by setting OUTPUT_MULTIPLE_HIGH_WEIGHT_TARGETS and VERBOSE to 1 in GM_PHD_Initialisation. + +I've also implemented the Extended Kalman Filter version specified by Vo & Ma, but not the EKF simulation. To switch from linear KF to EKF, set USE_KF in GM_PHD_Initialisation to 1. Everything runs out of the same file, GM_PHD_Filter. The EKF uses either totally separate or totally identical files to the linear version. I considered intermingling the EKF code into the KF using a bunch of IF statements to make maintenance easier, but I was worried this would come at the cost of readability. Therefore the only place you will see IF(USE_EKF == 1) is in GM_PHD_Filter. + +I've also included Ba-Ngu Vo's implementation of the Optimal Subpattern Assignment (OSPA) metric proposed by Schuhmacher et al in +Schuhmacher, D.; Ba-Tuong Vo; Ba-Ngu Vo, "A Consistent Metric for Performance Evaluation of Multi-Object Filters," Signal Processing, IEEE Transactions on , vol.56, no.8, pp.3447,3457, Aug. 2008 +This uses +- ospa_dist by Ba-Ngu Vo, taken from http://ba-ngu.vo-au.com/vo/OSPA_for_Tracks.zip +- Hungarian by Alex Melin, also taken from http://ba-ngu.vo-au.com/vo/OSPA_for_Tracks.zip + +The files are mostly implemented as scripts. The main file is GM_PHD_Filter. The structure is pretty straightforward, as it mostly just calls the other scripts in a big loop. + +For the linear Kalman filter, there are two files that initialise everything - GM_PHD_Initialise and GM_PHD_Simulate_Initialise. For the extended Kalman filter, there is GM_PHD_Initialisation, GM_EKF_PHD_Simulate_Initialise, and GM_EKF_PHD_Initialise_Jacobains. Changing values in these will change how the filter runs. + +The other files are pretty easy to follow if you have a look in GM_PHD_Filter.m + +There isn't much output on the console by default, most of it is in the graphs. Have a read of GM_PHD_Simulate_Plot to understand and edit what the different coloured dots mean. In summary, black X's are measurements, black X's with a circle around them are measurements corresponding to true targets, red/green/blue dots are true target positions, magenta circles are tracked targets, cyan triangles are the targets at the current filter iteration. If you set DRAW_VELOCITIES to 1 in GM_PHD_Initialisation, velocity arrows will be drawn on. The arrow shows direction, the length indicates magnitude (long arrow = high speed). + +To see more filter output, set VERBOSE to 1 in GM_PHD_Initialisation. I can't remember exactly what this prints out, but it's more information about the inner working of the filter, and you can use it to add your own output statements wherever you want. I cut most of mine out because they were cluttering up the code. + +To see the performance metric, set CALCULATE_OSPA_METRIC to 1 in GM_PHD_Initialisation. It appears as an extra line graph. High values on the line graph = high error value. + +See license.txt for licensing information. + +See ReleaseNotes.txt for changes between versions. + +===A few remarks on the Kalman filter vs the extended Kalman filter=== +If you've gotten this far I'm going to assume you understand what a KF and EKF are, so I am going to focus on the specifics of my implementation. The extended Kalman filter uses Jacobians to linearise the nonlinear prediction/observation models. These Jacobians can be calculated analytically (by hand or using a computer program; I use Wolfram Alpha as it's very easy to use) or numerically (very easy to code up but it runs much slower). I've included code for both but commented out the ones not being used. You can play around with or replace them as you see fit; see GM_EKF_PHD_Initialise_Jacobians and Test_Jacobian_Calculation. + +The nonlinear observation model h() is used to map from the state space [x; y; vx; vy] to the measurement space [r; theta], primarily to generate expected observations of a target at a certain position. We also need to have an inverse observation inv_h() to perform the reverse, mapping from the measurement space [r; theta] to the state space [x; y; vx; vy] (but since we don't observe the velocity, it doesn't output velocity information and this has to be added later or not at all). The observation model is used in GM_EKF_PHD_Simulate_Measurements and GM_EKF_PHD_Construct_Update_Components. The inverse observation model is used in GM_EKF_PHD_Update, GM_EKF_PHD_Simulate_Plot and GM_PHD_Create_Birth. + +If you want to replace these with different observation models, you're going to have to change these occurrences as well as the respective Jacobians. + +I haven't implemented the EKF simulation given in Vo & Ma; both the EKF and KF run on the KF simulation. The prediction covariance Q given for this works well for the linear KF but not for the EKF. I set Q to eye(4) and it works; this value could probably use some further investigation and tweaking but this code is only a simulation, and a real application will probably have more grounded information on what values to set here. + +Just in case someone doesn't want to use the EKF version, I'm including a zip file containing the linear-Kalman-filter-only version of the GM-PHD (version 1.05). + +-- Bryan Clarke, b.clarke@acfr.usyd.edu.au \ No newline at end of file diff --git a/ReleaseNotes.txt b/ReleaseNotes.txt new file mode 100644 index 0000000..457f4f3 --- /dev/null +++ b/ReleaseNotes.txt @@ -0,0 +1,108 @@ +Version 1.10, 7th January 2014 +==Bug Fixes== +Bug fixes in GM_PHD_Create_Birth +As pointed out by Hannes Pessentheiner, in GM_PHD_Create_Birth the weight of the existing target was not being used to scale the weight of spawned targets in the static-spawn case. This is fixed now. +________________________________________________________________________________________________________________ +Version 1.09, 13th December 2013 +== Bug Fixes== +Ba-Ngu Vo has kindly allowed me to use his OSPA calculation code ospa_dist.m so CalculateOSPAMetric has been removed and replaced with ospa_dist.m. This uses Hungarian.m by Alex Melin, to whom I am also much obliged. This code is also available online at http://ba-ngu.vo-au.com/vo/OSPA_for_Tracks.zip +Apologies for putting up CalculateOSPAMetric while it was still buggy, I'll rush less in future. + +I've also made this change to the linear-KF-only version 1.05 that is also included in the zip, and documented it in the respective GM_PHD_Filter.m. For versioning convenience it's now version 1.05a. I hope that this is the last update I need to make to it. +________________________________________________________________________________________________________________ +Version 1.08, 12th December 2013 +==Bug Fixes== +These fixes focus on the OSPA metric calculation. It's much less buggy now than it was before, but still not entirely correct. Use with caution. Hat tip to Abdulkadir Eryildirim for identifying something amiss. + +Bug fixes in GM_PHD_Calculate_Performance_Metric +Corrected it so that data is passed from simTarget3History instead of simTarget2History. + +Bug fixes in CalculateOSPAMetric +Added a loop to include cost for elements of Y that are not matched in X (i.e. for (m+1:n)). +________________________________________________________________________________________________________________ +Version 1.07, 10th December 2013 +==Bug Fixes== +Bug fixes in GM_PHD_Create_Birth and GM_EKF_PHD_Create_Birth +Restructured so that static targets will be added from the first measurement, not waiting until the second like the moving targets. +________________________________________________________________________________________________________________ +Version 1.06, 2nd December 2013 +==Bug Fixes== +Increased the speed of target 3 so that it more closesly matches the trajectory in Vo & Ma's example. It's still not perfect (it's a straight line rather than curving) but it's better than it was before. + +==New Features== +Extended Kalman Filter +I have added an implementation of the extended Kalman filter algorithm from Vo & Ma's paper, for use with a simulated range-bearing sensor. The simulation is still the same, with a simulated range-bearing sensor pointing along the X-axis at the origin. The linear KF and EKF can be switched between by setting/clearing USE_EKF in GM_PHD_Initialisation. This will control which scripts are called in GM_PHD_Filter. The structure of the algorithm is still the same. + +Just in case someone doesn't want to use the EKF and wants just the linear version, I'll include a zipped version of the linear-Kalman-filter-only GM-PHD filter. + +Velocity arrows +Velocity arrows can be drawn on the simulated target plot using quiver() in both linear and EKF. To turn this on and off, set/clear DRAW_VELOCITIES in GM_PHD_Initialisation. +________________________________________________________________________________________________________________ +Version 1.05, 21 November 2013 +Hat-tip to Abdulkadir Eryildirim and Kevin Nickels for identifying bugs and missing features in the code. + +==Bug Fixes== +Bug fixes in GM_PHD_Predict_Birth +Fixed a few comments and a print statement that had birth and spawn mixed up; this would have caused crashes when VERBOSE was set to 1. +Changed variable v in GM_PHD_Create_Birth to thisV in case I ever use a variable named v elsewhere. + +Bug fixes in GM_PHD_Update +Changed variable V in GM_PHD_Update to thisV. V was already being used for the monitored volume. This may have been degrading filter performance. + +Simulation plots +Removed some extra plots from GM_PHD_Simulate_Initialise and GM_PHD_Simulate_Plot; I haven't kept them updated and they're not that useful. +simTargetState3 was being predicted forward twice in GM_PHD_Simulate_Measurement. Now it is only happening once. All this means is that it moves at half the speed it did previously. + +==New Features== +State Estimation in GM_PHD_Estimate +Added in the extra-display in GM_PHD_Estimate to better match Vo & Ma's algorithm. I'm not sure whether this is really necessary; it doesn't change filter performance, just output, but I want to match the pseudocode. This is controlled by setting the control variable OUTPUT_MULTIPLE_HIGH_WEIGHT_TARGETS in GM_PHD_Initialisation to 1. VERBOSE also needs to be set to 1 to see the output. + +Optimal Subpattern Assignment (OSPA) metric implementation +I have added an implementation of the OSPA metric described by Schuhmacher et al in +"A Consistent Metric for Performance Evaluation of Multi-Object Filters," Signal Processing, IEEE Transactions on , vol.56, no.8, pp.3447,3457, Aug. 2008 +It's not necessary for the Vo & MA PHD filter to work but provides a nice way of visualising filter performance. It's enabled in GM_PHD_Initialisation by setting CALCULATE_OSPA_METRIC to 1. The calculated metric appears as a line graph. Be advised, it slows down the filter. + +==Comments== +Added comments in unifpdf_2d.m about the risks involved in using the implementation of unifpdf_2d that I have written outside of its current operating conditions. It returns zero outside the region of interest, when it should probably return 100% clutter likelihood for measurements outside that region. This should have no effect with the current simulator as all measurements are inside the region and the state estimate is never predicted or updated to be outside the region, but if you were using this code in another environment, or had a different/buggy prediction/update equation this can and will cause problems for you. You do not want the weight of a target increasing when it is outside the observable range of the sensor. + +Added comments in GM_PHD_Simulate_Initialise regarding nClutter; it would probably be better to use poissrnd(50) rather than constant 50 but the latter works well enough for this simulation. + +________________________________________________________________________________________________________________ +Version 1.04, 12 September 2013 +Bug fixes in GM_PHD_Prune +Two issues kindly pointed out by Hannes Pessentheiner. i changed to thisI to access the correct values of P_k. inv changed to slash for inversion of covariance. + +Bug fix in GM_PHD_Construct_Update_Components +inv changed to slash. + +Version 1.03 2 September 2013 +Spawning +Previously a spawned target had its weight calculated using the spawn function but was functionally identical to a birthed target in all other respects. Now a spawned target is stored in its own data structure and with the spawn covariance defined in Vo&Ma. +Additionally, there was an error in the calculation of the spawn weight (it was not taking into account the weight of the target that the new target was spawned from); this has been fixed. These changes are to GM_PHD_Create_Birth, GM_PHD_Predict_Birth and GM_PHD_Estimate (to store the weights of the estimated features). + +Plotting +Measurements corresponding to real targets now have a black 'o' as well as the black 'x' that all measurements have. +An extra control flag has been added in GM_PHD_Initialisation.m, PLOT_ALL_MEASUREMENTS to select whether to wipe the plot of all measurements or retain them. This allows all historical measurements and error ellipses to be seen at once but can make the plot cluttered. + +VERBOSE +A pause has been added to the bottom of the main loop when VERBOSE is set to true; this allows all the output data to be read easily. + +Version 1.02 27 August 2013 +Renaming and deleting +simNoisyMeasurementHistory is now simMeasurementHistory. +Deleted a couple of variables that weren't being used. + +Documentation +Added a few comments to hopefully make it clearer how to modify this code for different applications. + +Version 1.01 31 July 2013 +Spawning +Previously, all targets were treated as being birthed. Now they can also be spawned. The only difference is that a different weight is assigned to them on initialisation (the birth weight is high at certain fixed birth locations and low further away, the spawning weight is high at the location of existing targets and low further away). Whichever weight function gives a higher weight is the one assigned in GM_PHD_Create_Birth.m. Currently, there is no difference in prediction for spawned and birthed targets. +This brings the implementation better in line with the Vo&Ma paper. +This does not seem to have too great of an impact on filter performance; target 3 seemed to be created fairly reliably as a fork off from target 1 even though it should theoretically have been assigned a low birth weight. This may make the filter more reliable if the problem changes. + +Verbose +Setting VERBOSE = 1 in GM_PHD_Initialisation produces more onscreen information from more parts of the filter. The added output blocks are generally at the bottom of the scripts so as not to clutter it up too much. + +Version 1.00 22 July 2013 +Original version \ No newline at end of file diff --git a/Test_Jacobian_Calculation.m b/Test_Jacobian_Calculation.m new file mode 100644 index 0000000..0ac703c --- /dev/null +++ b/Test_Jacobian_Calculation.m @@ -0,0 +1,80 @@ +%% Test_Jacobian_Calculation +%Last modified 3rd September 2013 + +%Tests observation Jacobians, and contains calculations for manual +%calculation of observation Jacobian. To see performance differences +%between numerical and analytical implementations, use Run and Time. They +%should all produce the same output, but GM_PHD_Numerical_Jacobian should +%run slower. + +% The observation Jacobian is the derivative of the observation function with respect to the state +% Each matrix cell is the partial derivative of one observation function by one state variable. +% Each matrix row is one observation function. +% Each matrix column is one state variable. +% Each matrix row is the gradient of one observation function along a certain axis. +% If state is X and observation function is F, +% Jacobian H = dF/dX +% If F = [f_r; f_theta] +% and X = [xL; yL; vxL; vyL] or just [x; y; vx; vy] +% and the sensor state is xR, yR, hR +% H1 = [df_r/dx, df_r/dy df_r/dvx df_r/dvy] +% H2 = [df_theta/dx, df_theta/dy df_theta/dvx df_theta/dvy] + +%f_r = ((xL - xR)^2 + (yL - yR)^2) ^ 1/2 +%f_theta = atan(delta_y/delta_x) +%d(f_r) / dx = (xL - xR) / range (where range = hypot(xL - xR, yL - yR)) +%d(f_r) / dy = (yL - yR) / range +%d(f_theta) / dx = (yR - yL) / range^2 +%d(f_theta) / dy = (xL - xR) / range^2 + +% We use wolfram alpha to differentiate things. +% "Derivative of ((x - a)^2 + (y - b)^2)^1/2 with respect to x" gives +% (x - a) / ((x - a)^2 + (y - b)^2)^1/2 +% "Derivative of ((x - a)^2 + (y - b)^2)^1/2 with respect to y" gives +% (y - b) / ((x - a)^2 + (y - b)^2)^1/2 +%"Derivative of atan2((y-a),(x-b)) with respect to x" gives +%(a - y) / ((x - a)^2 + (y - b)^2) <--------- NOTE that this is (a - y) on +%numerator, not (y - a) +%"Derivative of atan2((y-a),(x-b)) with respect to y" gives +%(x - b) / ((x - a)^2 + (y - b)^2) + +% When the target is straight ahead/to side and moving towards/away, there is zero change in bearing and high change in range +% H1 = [1 +% And derivatives of anything with respect to vx and vy will be 0. +% +% What values do we expact? +% When sensor is pointing along the X-axis, +% - any movement on the X-axis should have maximum change in range and minimum change in bearing +% - any movement on the Y-axis should have less change in range than X (due to triangle inequality) and maximum change in bearing +% - any change in vx or vy should have no change +% So when radar is 0,0,0 and target is [1, 0] +% H1 = [high low 0 0] +%H2 = [low high 0 0] + +GM_EKF_PHD_Initialise_Jacobians;%Initialise observation model h +%Initialise sensor position +xS = 0;%X position in m. X is positive right +yS = 0;%Y position in m. Y is positive up +hS = 0;%Heading in radians. 0 is to the right, positive is left. +%Initialise landmark position. Ideally, don't put this as the exact same +%position as the sensor +xL = 1.5;%X position in m +yL = 2.8;%Y position in m +vxL = 0;%X velocity in m/s +vyL = 0;%Y velocity in m/s + +x_sensor = [xS, yS, hS]; +x_landmark = [xL, yL, vxL, vyL]; + +%Anonymous function +calculate_Jacobian_H = @(xR,yR,xL,yL)[ [(xL - xR) / hypot(xL - xR,yL - yR) , (yL - yR) / hypot(xL - xR,yL - yR), 0, 0]; [ (yR - yL)/((xL - xR)^2 + (yL - yR)^2), (xL - xR)/((xL - xR)^2 + (yL - yR)^2), 0, 0]; [0 0 1 0]; [0 0 0 1] ]; + +J1 = Calculate_Jacobian_H(xS, yS, xL, yL);%Analytical function +J2 = calculate_Jacobian_H(xS,yS,xL,yL);%Anonymous analytical function +J3 = GM_EKF_PHD_Numerical_Jacobian(h, x_sensor, x_landmark);%Numerical function +J1 +J2 +J3 + + + diff --git a/error_ellipse.m b/error_ellipse.m new file mode 100644 index 0000000..232bd84 --- /dev/null +++ b/error_ellipse.m @@ -0,0 +1,277 @@ +function h=error_ellipse(varargin) +% ERROR_ELLIPSE - plot an error ellipse, or ellipsoid, defining confidence region +% ERROR_ELLIPSE(C22) - Given a 2x2 covariance matrix, plot the +% associated error ellipse, at the origin. It returns a graphics handle +% of the ellipse that was drawn. +% +% ERROR_ELLIPSE(C33) - Given a 3x3 covariance matrix, plot the +% associated error ellipsoid, at the origin, as well as its projections +% onto the three axes. Returns a vector of 4 graphics handles, for the +% three ellipses (in the X-Y, Y-Z, and Z-X planes, respectively) and for +% the ellipsoid. +% +% ERROR_ELLIPSE(C,MU) - Plot the ellipse, or ellipsoid, centered at MU, +% a vector whose length should match that of C (which is 2x2 or 3x3). +% +% ERROR_ELLIPSE(...,'Property1',Value1,'Name2',Value2,...) sets the +% values of specified properties, including: +% 'C' - Alternate method of specifying the covariance matrix +% 'mu' - Alternate method of specifying the ellipse (-oid) center +% 'conf' - A value betwen 0 and 1 specifying the confidence interval. +% the default is 0.5 which is the 50% error ellipse. +% 'scale' - Allow the plot the be scaled to difference units. +% 'style' - A plotting style used to format ellipses. +% 'clip' - specifies a clipping radius. Portions of the ellipse, -oid, +% outside the radius will not be shown. +% +% NOTES: C must be positive definite for this function to work properly. + +default_properties = struct(... + 'C', [], ... % The covaraince matrix (required) + 'mu', [], ... % Center of ellipse (optional) + 'conf', 0.5, ... % Percent confidence/100 + 'scale', 1, ... % Scale factor, e.g. 1e-3 to plot m as km + 'style', '', ... % Plot style + 'clip', inf); % Clipping radius + +if length(varargin) >= 1 & isnumeric(varargin{1}) + default_properties.C = varargin{1}; + varargin(1) = []; +end + +if length(varargin) >= 1 & isnumeric(varargin{1}) + default_properties.mu = varargin{1}; + varargin(1) = []; +end + +if length(varargin) >= 1 & isnumeric(varargin{1}) + default_properties.conf = varargin{1}; + varargin(1) = []; +end + +if length(varargin) >= 1 & isnumeric(varargin{1}) + default_properties.scale = varargin{1}; + varargin(1) = []; +end + +if length(varargin) >= 1 & ~ischar(varargin{1}) + error('Invalid parameter/value pair arguments.') +end + +prop = getopt(default_properties, varargin{:}); +C = prop.C; + +if isempty(prop.mu) + mu = zeros(length(C),1); +else + mu = prop.mu; +end + +conf = prop.conf; +scale = prop.scale; +style = prop.style; + +if conf <= 0 | conf >= 1 + error('conf parameter must be in range 0 to 1, exclusive') +end + +[r,c] = size(C); +if r ~= c | (r ~= 2 & r ~= 3) + error(['Don''t know what to do with ',num2str(r),'x',num2str(c),' matrix']) +end + +x0=mu(1); +y0=mu(2); + +% Compute quantile for the desired percentile +k = sqrt(qchisq(conf,r)); % r is the number of dimensions (degrees of freedom) + +hold_state = get(gca,'nextplot'); + +if r==3 & c==3 + z0=mu(3); + + % Make the matrix has positive eigenvalues - else it's not a valid covariance matrix! + if any(eig(C) <=0) + error('The covariance matrix must be positive definite (it has non-positive eigenvalues)') + end + + % C is 3x3; extract the 2x2 matricies, and plot the associated error + % ellipses. They are drawn in space, around the ellipsoid; it may be + % preferable to draw them on the axes. + Cxy = C(1:2,1:2); + Cyz = C(2:3,2:3); + Czx = C([3 1],[3 1]); + + [x,y,z] = getpoints(Cxy,prop.clip); + h1=plot3(x0+k*x,y0+k*y,z0+k*z,prop.style);hold on + [y,z,x] = getpoints(Cyz,prop.clip); + h2=plot3(x0+k*x,y0+k*y,z0+k*z,prop.style);hold on + [z,x,y] = getpoints(Czx,prop.clip); + h3=plot3(x0+k*x,y0+k*y,z0+k*z,prop.style);hold on + + + [eigvec,eigval] = eig(C); + + [X,Y,Z] = ellipsoid(0,0,0,1,1,1); + XYZ = [X(:),Y(:),Z(:)]*sqrt(eigval)*eigvec'; + + X(:) = scale*(k*XYZ(:,1)+x0); + Y(:) = scale*(k*XYZ(:,2)+y0); + Z(:) = scale*(k*XYZ(:,3)+z0); + h4=surf(X,Y,Z); + colormap gray + alpha(0.3) + camlight + if nargout + h=[h1 h2 h3 h4]; + end +elseif r==2 & c==2 + % Make the matrix has positive eigenvalues - else it's not a valid covariance matrix! + if any(eig(C) <=0) + error('The covariance matrix must be positive definite (it has non-positive eigenvalues)') + end + + [x,y,z] = getpoints(C,prop.clip); + h1=plot(scale*(x0+k*x),scale*(y0+k*y),prop.style); + set(h1,'zdata',z+1) + if nargout + h=h1; + end +else + error('C (covaraince matrix) must be specified as a 2x2 or 3x3 matrix)') +end +%axis equal + +set(gca,'nextplot',hold_state); + +%--------------------------------------------------------------- +% getpoints - Generate x and y points that define an ellipse, given a 2x2 +% covariance matrix, C. z, if requested, is all zeros with same shape as +% x and y. +function [x,y,z] = getpoints(C,clipping_radius) + +n=100; % Number of points around ellipse +p=0:pi/n:2*pi; % angles around a circle + +[eigvec,eigval] = eig(C); % Compute eigen-stuff +xy = [cos(p'),sin(p')] * sqrt(eigval) * eigvec'; % Transformation +x = xy(:,1); +y = xy(:,2); +z = zeros(size(x)); + +% Clip data to a bounding radius +if nargin >= 2 + r = sqrt(sum(xy.^2,2)); % Euclidian distance (distance from center) + x(r > clipping_radius) = nan; + y(r > clipping_radius) = nan; + z(r > clipping_radius) = nan; +end + +%--------------------------------------------------------------- +function x=qchisq(P,n) +% QCHISQ(P,N) - quantile of the chi-square distribution. +if nargin<2 + n=1; +end + +s0 = P==0; +s1 = P==1; +s = P>0 & P<1; +x = 0.5*ones(size(P)); +x(s0) = -inf; +x(s1) = inf; +x(~(s0|s1|s))=nan; + +for ii=1:14 + dx = -(pchisq(x(s),n)-P(s))./dchisq(x(s),n); + x(s) = x(s)+dx; + if all(abs(dx) < 1e-6) + break; + end +end + +%--------------------------------------------------------------- +function F=pchisq(x,n) +% PCHISQ(X,N) - Probability function of the chi-square distribution. +if nargin<2 + n=1; +end +F=zeros(size(x)); + +if rem(n,2) == 0 + s = x>0; + k = 0; + for jj = 0:n/2-1; + k = k + (x(s)/2).^jj/factorial(jj); + end + F(s) = 1-exp(-x(s)/2).*k; +else + for ii=1:numel(x) + if x(ii) > 0 + F(ii) = quadl(@dchisq,0,x(ii),1e-6,0,n); + else + F(ii) = 0; + end + end +end + +%--------------------------------------------------------------- +function f=dchisq(x,n) +% DCHISQ(X,N) - Density function of the chi-square distribution. +if nargin<2 + n=1; +end +f=zeros(size(x)); +s = x>=0; +f(s) = x(s).^(n/2-1).*exp(-x(s)/2)./(2^(n/2)*gamma(n/2)); + +%--------------------------------------------------------------- +function properties = getopt(properties,varargin) +%GETOPT - Process paired optional arguments as 'prop1',val1,'prop2',val2,... +% +% getopt(properties,varargin) returns a modified properties structure, +% given an initial properties structure, and a list of paired arguments. +% Each argumnet pair should be of the form property_name,val where +% property_name is the name of one of the field in properties, and val is +% the value to be assigned to that structure field. +% +% No validation of the values is performed. +% +% EXAMPLE: +% properties = struct('zoom',1.0,'aspect',1.0,'gamma',1.0,'file',[],'bg',[]); +% properties = getopt(properties,'aspect',0.76,'file','mydata.dat') +% would return: +% properties = +% zoom: 1 +% aspect: 0.7600 +% gamma: 1 +% file: 'mydata.dat' +% bg: [] +% +% Typical usage in a function: +% properties = getopt(properties,varargin{:}) + +% Process the properties (optional input arguments) +prop_names = fieldnames(properties); +TargetField = []; +for ii=1:length(varargin) + arg = varargin{ii}; + if isempty(TargetField) + if ~ischar(arg) + error('Propery names must be character strings'); + end + f = find(strcmp(prop_names, arg)); + if length(f) == 0 + error('%s ',['invalid property ''',arg,'''; must be one of:'],prop_names{:}); + end + TargetField = arg; + else + % properties.(TargetField) = arg; % Ver 6.5 and later only + properties = setfield(properties, TargetField, arg); % Ver 6.1 friendly + TargetField = ''; + end +end +if ~isempty(TargetField) + error('Property names and values must be specified in pairs.'); +end diff --git a/ospa_dist.m b/ospa_dist.m new file mode 100644 index 0000000..c0b0b8f --- /dev/null +++ b/ospa_dist.m @@ -0,0 +1,72 @@ +%This code is available online at http://ba-ngu.vo-au.com/vo/OSPA_for_Tracks.zip +function [dist varargout]= ospa_dist(X,Y,c,p) +% +%B. Vo. 26/08/2007 +%Compute Schumacher distance between two finite sets X and Y +%as described in the reference +%[1] D. Schuhmacher, B.-T. Vo, and B.-N. Vo, "A consistent metric for performance evaluation in multi-object +%filtering," IEEE Trans. Signal Processing, Vol. 56, No. 8 Part 1, pp. 3447– 3457, 2008. +% +%Inputs: X,Y- matrices of column vectors +% c - cut-off parameter (see [1] for details) +% p - p-parameter for the metric (see [1] for details) +%Output: scalar distance between X and Y +%Note: the Euclidean 2-norm is used as the "base" distance on the region +% + +if nargout ~=1 & nargout ~=3 + error('Incorrect number of outputs'); +end + +if isempty(X) & isempty(Y) + dist = 0; + + if nargout == 3 + varargout(1)= {0}; + varargout(2)= {0}; + end + + return; +end + +if isempty(X) | isempty(Y) + dist = c; + + if nargout == 3 + varargout(1)= {0}; + varargout(2)= {c}; + end + + return; +end + + +%Calculate sizes of the input point patterns +n = size(X,2); +m = size(Y,2); + +%Calculate cost/weight matrix for pairings - fast method with vectorization +XX= repmat(X,[1 m]); +YY= reshape(repmat(Y,[n 1]),[size(Y,1) n*m]); +D = reshape(sqrt(sum((XX-YY).^2)),[n m]); +D = min(c,D).^p; + +% %Calculate cost/weight matrix for pairings - slow method with for loop +% D= zeros(n,m); +% for j=1:m +% D(:,j)= sqrt(sum( ( repmat(Y(:,j),[1 n])- X ).^2 )'); +% end +% D= min(c,D).^p; + +%Compute optimal assignment and cost using the Hungarian algorithm +[assignment,cost]= Hungarian(D); + +%Calculate final distance +dist= ( 1/max(m,n)*( c^p*abs(m-n)+ cost ) ) ^(1/p); + +%Output components if called for in varargout +if nargout == 3 + varargout(1)= {(1/max(m,n)*cost)^(1/p)}; + varargout(2)= {(1/max(m,n)*c^p*abs(m-n))^(1/p)}; +end + \ No newline at end of file diff --git a/unifpdf_2d.m b/unifpdf_2d.m new file mode 100644 index 0000000..dd15faf --- /dev/null +++ b/unifpdf_2d.m @@ -0,0 +1,42 @@ +%unifpdf_2d +%Last modified 21 November 2013 +%Matlab code by Bryan Clarke b.clarke@acfr.usyd.edu.au + +%A 2D uniform PDF function. +%xrange and yrange are inclusive limits of region +%z is a 2D sample point. +%This is used in the clutter function (i.e. we assume a uniform +%distribution of clutter). +%This implementation is fairly standard for a uniform distribution in X and +%Y. This can be problematic for our application if care is not taken. +%With a uniform distribution, we assume there is ZERO probability outside of the region of interest. +%This implies that any measurement outside the region of interest has ZERO probability of +%clutter, when it should have 100% probability of clutter (i.e. we cannot make good measurements +%outside that region). +%For this implementation we never have any measurements outside the region +%of interest, and the state estimates are never predicted/updated outside of that region +%but this might not be the case with other simulations or actual data. BEWARE! +function val = unifpdf_2d(xrange, yrange, z) + minX = xrange(1); + maxX = xrange(2); + minY = yrange(1); + maxY = yrange(2); + evalX = z(1); + evalY = z(2); + if(evalX < minX) + val = 0; + return; + elseif(evalX > maxX) + val = 0; + return + elseif (evalY < minY) + val = 0; + return; + elseif(evalY > maxY) + val = 0; + return; + else + val = 1 / ((maxX - minX) * (maxY - minY)); + end + +end \ No newline at end of file