(笔记)(7)AMCL包源码分析 | 粒子滤波器模型与pf文件夹(三)
上一讲完成了对pf.cpp文件的分析,这个CPP文件主要是将Augmented-MCL算法和KLD-sampling算法糅合在一起使用。其中分别采取pf_pdf_gaussian_sample(pdf),pf_init_model_fn_t初始化模型,pf->random_pose_fn的方式进行粒子的初始化。
而位姿的插入和存储则采用了kd树数据结构,同时kd树数据结构也表达了KLD采样算法里所说的直方图,只不过这里直方图的k个bins,用kd树的叶子节点数来呈现。
这一讲,我们着重介绍的是kd树数据结构在粒子滤波器模型中起到的作用(pf_kdtree.cpp),概率密度函数pdf与特征值分解的关系(eig3.cpp,pf_vector.cpp),以及是如何利用概率密度函数pdf生成随机位姿(pf_pdf.cpp),kd树与直方图histogram的对应关系。
1.概率密度函数pdf
1.1 创建概率密度函数PDF结构体
创建一个概率密度函数并不难,在pf_pdf.h里定一个高斯PDF结构体pf_pdf_gaussian_t,其中包括均值(vector表示),协方差(matrix表示),以及将协方差matrix进行分解,分解成rotation矩阵*diagonal矩阵。
// Gaussian PDF info 高斯概率密度函数模型
typedef struct
{
// Mean, covariance and inverse covariance
pf_vector_t x;
pf_matrix_t cx;
//pf_matrix_t cxi;
double cxdet;
// Decomposed covariance matrix (rotation * diagonal)
pf_matrix_t cr;
pf_vector_t cd;
// A random number generator
//gsl_rng *rng;
} pf_pdf_gaussian_t;
1.2 特征值分解->协方差矩阵分解:使用Housholder算子+QR分解
那么如何将协方差矩阵分解呢?具体步骤在pf_vector.cpp中得到答案。
//将协方差矩阵分解成旋转矩阵和对角矩阵
// Decompose a covariance matrix [a] into a rotation matrix [r] and a diagonal
// matrix [d] such that a = r d r^T.
void pf_matrix_unitary(pf_matrix_t *r, pf_matrix_t *d, pf_matrix_t a)
{
int i, j;
double aa[3][3];//矩阵A
double eval[3];//特征向量V
double evec[3][3];//特征值组成的对角矩阵
for (i = 0; i < 3; i++)
{
for (j = 0; j < 3; j++)
{
aa[i][j] = a.m[i][j];
}
}
//使用eigen特征值分解得到特征向量和特征值组成的对角矩阵
// Compute eigenvectors/values
eigen_decomposition(aa,evec,eval);
*d = pf_matrix_zero();
for (i = 0; i < 3; i++)
{
d->m[i][i] = eval[i];
for (j = 0; j < 3; j++)
{
r->m[i][j] = evec[i][j];
}
}
return;
}
关于特征值分解,且看eig3.h关于特征值分解的介绍:对称矩阵A分解为特征向量组成的矩阵V和特征值组成的对角矩阵d。
/* Eigen-decomposition for symmetric 3x3 real matrices.
Public domain, copied from the public domain Java library JAMA. */
#ifndef _eig_h
/* Symmetric matrix A => eigenvectors in columns of V, corresponding
eigenvalues in d. */
void eigen_decomposition(double A[3][3], double V[3][3], double d[3]);
#endif
具体在eige3.cpp中实现,使用Housholder算子:
/* Eigen decomposition code for symmetric 3x3 matrices, copied from the public
domain Java Matrix library JAMA. */
#define n 3
static double hypot2(double x, double y) {
return sqrt(x*x+y*y);
}
//对称Householder分解到三对角形式
// Symmetric Householder reduction to tridiagonal form.
static void tred2(double V[n][n], double d[n], double e[n]) {
// This is derived from the Algol procedures tred2 by
// Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
// Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
int i,j,k;
double f,g,h,hh;
for (j = 0; j < n; j++) {
d[j] = V[n-1][j];
}
//Housholder分解成三对角形式
// Householder reduction to tridiagonal form.
for (i = n-1; i > 0; i--) {
// Scale to avoid under/overflow.
double scale = 0.0;
double h = 0.0;
for (k = 0; k < i; k++) {
scale = scale + fabs(d[k]);
}
if (scale == 0.0) {
e[i] = d[i-1];
for (j = 0; j < i; j++) {
d[j] = V[i-1][j];
V[i][j] = 0.0;
V[j][i] = 0.0;
}
} else {
//产生了Householder向量
// Generate Householder vector.
for (k = 0; k < i; k++) {
d[k] /= scale;
h += d[k] * d[k];
}
f = d[i-1];
g = sqrt(h);
if (f > 0) {
g = -g;
}
e[i] = scale * g;
h = h - f * g;
d[i-1] = f - g;
for (j = 0; j < i; j++) {
e[j] = 0.0;
}
//设置相似的转换去remaining columns.
// Apply similarity transformation to remaining columns.
for (j = 0; j < i; j++) {
f = d[j];
V[j][i] = f;
g = e[j] + V[j][j] * f;
for (k = j+1; k <= i-1; k++) {
g += V[k][j] * d[k];
e[k] += V[k][j] * f;
}
e[j] = g;
}
f = 0.0;
for (j = 0; j < i; j++) {
e[j] /= h;
f += e[j] * d[j];
}
hh = f / (h + h);
for (j = 0; j < i; j++) {
e[j] -= hh * d[j];
}
for (j = 0; j < i; j++) {
f = d[j];
g = e[j];
for (k = j; k <= i-1; k++) {
V[k][j] -= (f * e[k] + g * d[k]);
}
d[j] = V[i-1][j];
V[i][j] = 0.0;
}
}
d[i] = h;
}
//存储转换
// Accumulate transformations.
for (i = 0; i < n-1; i++) {
V[n-1][i] = V[i][i];
V[i][i] = 1.0;
h = d[i+1];
if (h != 0.0) {
for (k = 0; k <= i; k++) {
d[k] = V[k][i+1] / h;
}
for (j = 0; j <= i; j++) {
g = 0.0;
for (k = 0; k <= i; k++) {
g += V[k][i+1] * V[k][j];
}
for (k = 0; k <= i; k++) {
V[k][j] -= g * d[k];
}
}
}
for (k = 0; k <= i; k++) {
V[k][i+1] = 0.0;
}
}
for (j = 0; j < n; j++) {
d[j] = V[n-1][j];
V[n-1][j] = 0.0;
}
V[n-1][n-1] = 1.0;
e[0] = 0.0;
}
使用QR分解:
//对称三对角形式QR
// Symmetric tridiagonal QL algorithm.
static void tql2(double V[n][n], double d[n], double e[n]) {
// This is derived from the Algol procedures tql2, by
// Bowdler, Martin, Reinsch, and Wilkinson, Handbook for
// Auto. Comp., Vol.ii-Linear Algebra, and the corresponding
// Fortran subroutine in EISPACK.
int i,j,m,l,k;
double g,p,r,dl1,h,f,tst1,eps;
double c,c2,c3,el1,s,s2;
for (i = 1; i < n; i++) {
e[i-1] = e[i];
}
e[n-1] = 0.0;
f = 0.0;
tst1 = 0.0;
eps = pow(2.0,-52.0);
for (l = 0; l < n; l++) {
//找到小的子对角块
// Find small subdiagonal element
tst1 = MAX(tst1,fabs(d[l]) + fabs(e[l]));
m = l;
while (m < n) {
if (fabs(e[m]) <= eps*tst1) {
break;
}
m++;
}
//如果m==l,那么d[l]是一个特征值,否则,继续迭代
// If m == l, d[l] is an eigenvalue,
// otherwise, iterate.
if (m > l) {
int iter = 0;
do {
iter = iter + 1; // (Could check iteration count here.)
// Compute implicit shift
g = d[l];
p = (d[l+1] - g) / (2.0 * e[l]);
r = hypot2(p,1.0);
if (p < 0) {
r = -r;
}
d[l] = e[l] / (p + r);
d[l+1] = e[l] * (p + r);
dl1 = d[l+1];
h = g - d[l];
for (i = l+2; i < n; i++) {
d[i] -= h;
}
f = f + h;
// Implicit QL transformation.
p = d[m];
c = 1.0;
c2 = c;
c3 = c;
el1 = e[l+1];
s = 0.0;
s2 = 0.0;
for (i = m-1; i >= l; i--) {
c3 = c2;
c2 = c;
s2 = s;
g = c * e[i];
h = c * p;
r = hypot2(p,e[i]);
e[i+1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i+1] = h + s * (c * g + s * d[i]);
// Accumulate transformation.
for (k = 0; k < n; k++) {
h = V[k][i+1];
V[k][i+1] = s * V[k][i] + c * h;
V[k][i] = c * V[k][i] - s * h;
}
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
//检查收敛
// Check for convergence.
} while (fabs(e[l]) > eps*tst1);
}
d[l] = d[l] + f;
e[l] = 0.0;
}
//Sort特征值和对应的特征向量
// Sort eigenvalues and corresponding vectors.
for (i = 0; i < n-1; i++) {
k = i;
p = d[i];
for (j = i+1; j < n; j++) {
if (d[j] < p) {
k = j;
p = d[j];
}
}
if (k != i) {
d[k] = d[i];
d[i] = p;
for (j = 0; j < n; j++) {
p = V[j][i];
V[j][i] = V[j][k];
V[j][k] = p;
}
}
}
}
综合以上二者的特征值分解,如下:
//特征值分解
void eigen_decomposition(double A[n][n], double V[n][n], double d[n]) {
int i,j;
double e[n];
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
V[i][j] = A[i][j];
}
}
tred2(V, d, e);
tql2(V, d, e);
}
1.3 创建概率密度函数PDF
说完了协方差如何分解,回到创建概率密度函数pdf的具体函数:
//创建高斯pdf ,这里使用协方差分解,最后pdf->cd更新得到协方差分解的对角矩阵;
//注意这里的输入为mean和covariance;最后返回一个pdf
// Create a gaussian pdf
pf_pdf_gaussian_t *pf_pdf_gaussian_alloc(pf_vector_t x, pf_matrix_t cx)
{
pf_matrix_t cd;
pf_pdf_gaussian_t *pdf;
pdf = calloc(1, sizeof(pf_pdf_gaussian_t));
pdf->x = x;
pdf->cx = cx;
//将协方差矩阵分解成旋转矩阵和对角矩阵
// Decompose the convariance matrix into a rotation
// matrix and a diagonal matrix.
// Decompose a covariance matrix [a] into a rotation matrix [r] and a diagonal
// matrix [d] such that a = r d r^T.
||| void pf_matrix_unitary(pf_matrix_t *r, pf_matrix_t *d, pf_matrix_t a)
//注意pf_matrix_unitary函数,这里使用协方差分解,得到pdf->cd 即对角矩阵;
pf_matrix_unitary(&pdf->cr, &cd, pdf->cx);
pdf->cd.v[0] = sqrt(cd.m[0][0]);
pdf->cd.v[1] = sqrt(cd.m[1][1]);
pdf->cd.v[2] = sqrt(cd.m[2][2]);
// Initialize the random number generator
srand48(++pf_pdf_seed);
return pdf;
}
1.4 使用概率密度函数pdf产生随机位姿
下一步就是如何利用pdf产生随机位姿,这才是我们使用PDF的根本目的,在pf_pdf.cpp中建立pf_pdf_gaussian_sample函数。
//从pdf中产生sample
// Generate a sample from the pdf.
pf_vector_t pf_pdf_gaussian_sample(pf_pdf_gaussian_t *pdf)
{
int i, j;
pf_vector_t r;
pf_vector_t x;
// Generate a random vector 生成随机向量
for (i = 0; i < 3; i++)
{
//这里pdf->cd.v[i]作为输入sigma传入,return sigma * x2 * sqrt(-2.0*log(w)/w)
r.v[i] = pf_ran_gaussian(pdf->cd.v[i]);
}
for (i = 0; i < 3; i++)
{
x.v[i] = pdf->x.v[i];
for (j = 0; j < 3; j++)
x.v[i] += pdf->cr.m[i][j] * r.v[j];
}
return x;//返回位姿
}
其中用到的pf_ran_gaussian函数,使用无均值的,带标准差sigma的高斯分布。
// Draw randomly from a zero-mean Gaussian distribution, with standard
// deviation sigma.带标准差sigma,产生无均值高斯分布
// We use the polar form of the Box-Muller transformation, explained here:
// http://www.taygeta.com/random/gaussian.html
double pf_ran_gaussian(double sigma)
{
double x1, x2, w, r;
do
{
do { r = drand48(); } while (r==0.0);
x1 = 2.0 * r - 1.0;
do { r = drand48(); } while (r==0.0);
x2 = 2.0 * r - 1.0;
w = x1*x1 + x2*x2;
} while(w > 1.0 || w==0.0);
return(sigma * x2 * sqrt(-2.0*log(w)/w));
}
2.kd树数据结构
2.1创建一棵kd树来表示粒子sample集
关于kd树,我们很容易在pf_kdtree.h这个头文件中找到相关定义,首先是定义一棵kd树的节点:
//树节点的信息
// Info for a node in the tree
typedef struct pf_kdtree_node
{
//树的深度,叶子
// Depth in the tree
int leaf, depth;
//分叉的维度和分叉值
// Pivot dimension and value
int pivot_dim;
double pivot_value;
//这个节点的key,这里这个key有3个元素,在AMCL的情形下表示位姿
// The key for this node
int key[3];
//这个节点的value,在AMCL的情形下表示位姿的权重
// The value for this node
double value;
//标签
// The cluster label (leaf nodes)
int cluster;
//孩子节点
// Child nodes
struct pf_kdtree_node *children[2];
} pf_kdtree_node_t;
其次是定义一棵kd树:
//一棵kd树
// A kd tree
typedef struct
{
// Cell size
double size[3];//树的size
//树的根节点
// The root node of the tree
pf_kdtree_node_t *root;
//kd树的节点数
// The number of nodes in the tree
int node_count, node_max_count;
pf_kdtree_node_t *nodes;
//这棵树的叶子节点数
// The number of leaf nodes in the tree
int leaf_count;
} pf_kdtree_t;
初始化一棵kd树:
// Create a tree
pf_kdtree_t *pf_kdtree_alloc(int max_size)
{
pf_kdtree_t *self;
self = calloc(1, sizeof(pf_kdtree_t));
self->size[0] = 0.50;
self->size[1] = 0.50;
self->size[2] = (10 * M_PI / 180);
self->root = NULL;
self->node_count = 0;
self->node_max_count = max_size;
self->nodes = calloc(self->node_max_count, sizeof(pf_kdtree_node_t));
self->leaf_count = 0;
return self;
}
2.2 插入新位姿到kd树里
kd树作为AMCL的核心数据结构,担负的重要职能便是能自如地插入新的位姿:
// Insert a pose into the tree.
void pf_kdtree_insert(pf_kdtree_t *self, pf_vector_t pose, double value)
{
int key[3];
key[0] = floor(pose.v[0] / self->size[0]);
key[1] = floor(pose.v[1] / self->size[1]);
key[2] = floor(pose.v[2] / self->size[2]);
self->root = pf_kdtree_insert_node(self, NULL, self->root, key, value);
return;
}
而插入新位姿,按照树的相关性质来解释说,就是新插入了一个节点,而如何插入一个新的节点,则要计算max_split,并找出这个分支点在第几维,取一个中位数,小于中位数的,将节点插入左树,大于中位数的,插入右树。
// Insert a node into the tree
pf_kdtree_node_t *pf_kdtree_insert_node(pf_kdtree_t *self, pf_kdtree_node_t *parent,
pf_kdtree_node_t *node, int key[], double value)
{
int i;
int split, max_split;
// If the node doesnt exist yet...
if (node == NULL)
{
assert(self->node_count < self->node_max_count);
node = self->nodes + self->node_count++;
memset(node, 0, sizeof(pf_kdtree_node_t));
node->leaf = 1;
if (parent == NULL)
node->depth = 0;
else
node->depth = parent->depth + 1;
for (i = 0; i < 3; i++)
node->key[i] = key[i];
node->value = value;
self->leaf_count += 1;
}
// If the node exists, and it is a leaf node...
else if (node->leaf)
{
// If the keys are equal, increment the value
if (pf_kdtree_equal(self, key, node->key))
{
node->value += value;
}
// The keys are not equal, so split this node如果keys不相等,那就分支
else
{
//找到具有最大协方差的尺寸,然后做一个均值,分支
// Find the dimension with the largest variance and do a mean
// split
max_split = 0;
node->pivot_dim = -1;
for (i = 0; i < 3; i++)
{
//如何决定分支点(kdtree),利用 |key[i]-node->key[i]|
split = abs(key[i] - node->key[i]);
if (split > max_split)
{
max_split = split;//找到最大的分支点
node->pivot_dim = i;//找到第i个,这是分支的第几维
}
}
assert(node->pivot_dim >= 0);
node->pivot_value = (key[node->pivot_dim] + node->key[node->pivot_dim]) / 2.0;//天选之子-中位数
if (key[node->pivot_dim] < node->pivot_value)
{
node->children[0] = pf_kdtree_insert_node(self, node, NULL, key, value);//左树
node->children[1] = pf_kdtree_insert_node(self, node, NULL, node->key, node->value);//右树
}
else
{
node->children[0] = pf_kdtree_insert_node(self, node, NULL, node->key, node->value);//左树
node->children[1] = pf_kdtree_insert_node(self, node, NULL, key, value);//右树
}
node->leaf = 0;
self->leaf_count -= 1;
}
}
// If the node exists, and it has children...
else
{
assert(node->children[0] != NULL);
assert(node->children[1] != NULL);
if (key[node->pivot_dim] < node->pivot_value)
pf_kdtree_insert_node(self, node, node->children[0], key, value);//左树
else
pf_kdtree_insert_node(self, node, node->children[1], key, value);//右树
}
return node;
}
2.3 在kd树里查找位姿
那么还有一个问题,怎么查找节点呢?
// Recursive node search
pf_kdtree_node_t *pf_kdtree_find_node(pf_kdtree_t *self, pf_kdtree_node_t *node, int key[])
{
if (node->leaf)
{
//printf("find : leaf %p %d %d %d\n", node, node->key[0], node->key[1], node->key[2]);
// If the keys are the same...
if (pf_kdtree_equal(self, key, node->key))
return node;
else
return NULL;
}
else
{
//printf("find : brch %p %d %f\n", node, node->pivot_dim, node->pivot_value);
assert(node->children[0] != NULL);
assert(node->children[1] != NULL);
// If the keys are different...
if (key[node->pivot_dim] < node->pivot_value)
return pf_kdtree_find_node(self, node->children[0], key);
else
return pf_kdtree_find_node(self, node->children[1], key);
}
return NULL;
}
以及如何计算给定位姿的权重呢?
// Determine the probability estimate for the given pose. TODO: this
// should do a kernel density estimate(核密度估计) rather than a simple histogram
(简单的直方图).
double pf_kdtree_get_prob(pf_kdtree_t *self, pf_vector_t pose)
{
int key[3];
pf_kdtree_node_t *node;
key[0] = floor(pose.v[0] / self->size[0]);
key[1] = floor(pose.v[1] / self->size[1]);
key[2] = floor(pose.v[2] / self->size[2]);
node = pf_kdtree_find_node(self, self->root, key);
if (node == NULL)
return 0.0;
return node->value;
}
2.4 给位姿打标签
还有如何计算给定位姿的cluster标签呢?
// Determine the cluster label for the given pose
int pf_kdtree_get_cluster(pf_kdtree_t *self, pf_vector_t pose)
{
int key[3];
pf_kdtree_node_t *node;
key[0] = floor(pose.v[0] / self->size[0]);
key[1] = floor(pose.v[1] / self->size[1]);
key[2] = floor(pose.v[2] / self->size[2]);
node = pf_kdtree_find_node(self, self->root, key);
if (node == NULL)
return -1;
return node->cluster;
}
给节点打上标签:
// Recursively label nodes in this cluster
void pf_kdtree_cluster_node(pf_kdtree_t *self, pf_kdtree_node_t *node, int depth)
{
int i;
int nkey[3];
pf_kdtree_node_t *nnode;
for (i = 0; i < 3 * 3 * 3; i++)
{
nkey[0] = node->key[0] + (i / 9) - 1;
nkey[1] = node->key[1] + ((i % 9) / 3) - 1;
nkey[2] = node->key[2] + ((i % 9) % 3) - 1;
nnode = pf_kdtree_find_node(self, self->root, nkey);
if (nnode == NULL)
continue;
assert(nnode->leaf);
// This node already has a label; skip it. The label should be
// consistent, however.
if (nnode->cluster >= 0)
{
assert(nnode->cluster == node->cluster);
continue;
}
// Label this node and recurse
nnode->cluster = node->cluster;
pf_kdtree_cluster_node(self, nnode, depth + 1);
}
return;
}
将树里的叶子放在一个queue里,给它们打标签:
// Cluster the leaves in the tree
void pf_kdtree_cluster(pf_kdtree_t *self)
{
int i;
int queue_count, cluster_count;
pf_kdtree_node_t **queue, *node;
queue_count = 0;
queue = calloc(self->node_count, sizeof(queue[0]));
// Put all the leaves in a queue
for (i = 0; i < self->node_count; i++)
{
//遍历所有的节点
node = self->nodes + i;
if (node->leaf)
{
node->cluster = -1;
assert(queue_count < self->node_count);
queue[queue_count++] = node;
// TESTING; remove
assert(node == pf_kdtree_find_node(self, self->root, node->key));
}
}
cluster_count = 0;
// Do connected components for each node
while (queue_count > 0)
{
node = queue[--queue_count];
// If this node has already been labelled, skip it
if (node->cluster >= 0)
continue;
// Assign a label to this cluster
node->cluster = cluster_count++;
// Recursively label nodes in this cluster
pf_kdtree_cluster_node(self, node, 0);
}
free(queue);
return;
}
2.5 粒子sample->粒子簇cluster->粒子集set的统计特性:均值和协方差
这个打标签有啥用处呢?我们可以在pf.cpp里找到对应的pf_cluster_stats函数,它的作用是计算整个粒子集set的统计特性。一棵kd树正好表示一个粒子集set,最后得出了粒子sample集线性/角度方向上的协方差。
// Re-compute the cluster statistics for a sample set
void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set)
{
int i, j, k, cidx;
pf_sample_t *sample;
pf_cluster_t *cluster;
// Workspace
double m[4], c[2][2];
size_t count;
double weight;
//在这里就给这棵kd树打标签了,其实针对的元素是粒子sample
// Cluster the samples
pf_kdtree_cluster(set->kdtree);
// Initialize cluster stats
set->cluster_count = 0;
for (i = 0; i < set->cluster_max_count; i++)
{
cluster = set->clusters + i;
cluster->count = 0;
cluster->weight = 0;
cluster->mean = pf_vector_zero();
cluster->cov = pf_matrix_zero();
for (j = 0; j < 4; j++)
cluster->m[j] = 0.0;
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
cluster->c[j][k] = 0.0;
}
// 初始化滤波器的统计特性
// Initialize overall filter stats
count = 0;
weight = 0.0;
set->mean = pf_vector_zero();
set->cov = pf_matrix_zero();
for (j = 0; j < 4; j++)
m[j] = 0.0;
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
c[j][k] = 0.0;
//计算簇的统计特性,可以得到簇的均值和线性/角度方向的协方差,
//记住它是由粒子sample的权重和位姿计算得到。
// Compute cluster stats
for (i = 0; i < set->sample_count; i++)
{
sample = set->samples + i;
//printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]);
//获取粒子sample的cluster标签
// Get the cluster label for this sample
cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose);
assert(cidx >= 0);
if (cidx >= set->cluster_max_count)
continue;
if (cidx + 1 > set->cluster_count)
set->cluster_count = cidx + 1;
cluster = set->clusters + cidx;
cluster->count += 1;
cluster->weight += sample->weight;
count += 1;
weight += sample->weight;
//计算簇的均值
// Compute mean
cluster->m[0] += sample->weight * sample->pose.v[0];
cluster->m[1] += sample->weight * sample->pose.v[1];
cluster->m[2] += sample->weight * cos(sample->pose.v[2]);
cluster->m[3] += sample->weight * sin(sample->pose.v[2]);
m[0] += sample->weight * sample->pose.v[0];
m[1] += sample->weight * sample->pose.v[1];
m[2] += sample->weight * cos(sample->pose.v[2]);
m[3] += sample->weight * sin(sample->pose.v[2]);
//计算簇在线性方向上的协方差
// Compute covariance in linear components
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
{
cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
}
}
//归一化
// Normalize
for (i = 0; i < set->cluster_count; i++)
{
cluster = set->clusters + i;
cluster->mean.v[0] = cluster->m[0] / cluster->weight;
cluster->mean.v[1] = cluster->m[1] / cluster->weight;
cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]);
cluster->cov = pf_matrix_zero();
//计算簇在线性方向上的协方差
// Covariance in linear components
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight -
cluster->mean.v[j] * cluster->mean.v[k];
//计算簇在角度方向上的协方差
// Covariance in angular components; I think this is the correct
// formula for circular statistics.
cluster->cov.m[2][2] = -2 * log(sqrt(cluster->m[2] * cluster->m[2] +
cluster->m[3] * cluster->m[3]));
//printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight,
//cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]);
//pf_matrix_fprintf(cluster->cov, stdout, "%e");
}
//计算粒子集set的统计特性:均值
// Compute overall filter stats
set->mean.v[0] = m[0] / weight;
set->mean.v[1] = m[1] / weight;
set->mean.v[2] = atan2(m[3], m[2]);
//计算粒子集set的统计特性:协方差
// Covariance in linear components
for (j = 0; j < 2; j++)
for (k = 0; k < 2; k++)
set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k];
// Covariance in angular components; I think this is the correct
// formula for circular statistics.
set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3]));
return;
}
为什么出现这么多计算均值和协方差的呢?想来想去,除去开头的传入均值/协方差构建概率密度函数进行粒子位姿的初始化,至少还有一种作用是用来判断粒子是否收敛。在pf.cpp中:
int pf_update_converged(pf_t *pf)
{
int i;
pf_sample_set_t *set;
pf_sample_t *sample;
double total;
set = pf->sets + pf->current_set;
double mean_x = 0, mean_y = 0;
for (i = 0; i < set->sample_count; i++){
sample = set->samples + i;
mean_x += sample->pose.v[0];
mean_y += sample->pose.v[1];
}
//得出整个粒子sample集set的均值(x,y两个方向上的)
mean_x /= set->sample_count;
mean_y /= set->sample_count;
for (i = 0; i < set->sample_count; i++){
sample = set->samples + i;
//每个粒子sample在x方向上与均值的差值与粒子滤波器设定的阈值比较,得出是否收敛
if(fabs(sample->pose.v[0] - mean_x) > pf->dist_threshold ||
fabs(sample->pose.v[1] - mean_y) > pf->dist_threshold){
set->converged = 0;
pf->converged = 0;
return 0;
}
}
set->converged = 1;
pf->converged = 1;
return 1;
}
2.6 kd树表达直方图
到最后,说说kd树如何表达直方图,在pf.cpp中pf_update_resample函数可以找到:
// Add sample to histogram(kdtree)
pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
// See if we have enough samples yet:
//这里使用了pf_resample_limit函数,其中输入为叶子节点数
if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
break;
而关于pf_resample_limit函数,在pf.cpp中可以找到:
// Compute the required number of samples, given that there are k bins
// with samples in them. (跟historgram有关) This is taken directly from Fox et al.
//这里pf_resample_limit函数的输入,其中k表示直方图的bin个数
int pf_resample_limit(pf_t *pf, int k)
{
double a, b, c, x;
int n;
// Return max_samples in case k is outside expected range, this shouldn't
// happen, but is added to prevent any runtime errors 约束 && k
if (k < 1 || k > pf->max_samples)
return pf->max_samples;
// Return value if cache is valid, which means value is non-zero positive pf粒子滤波器 && limit_cache ,pop_err,pop_z;
if (pf->limit_cache[k-1] > 0)
return pf->limit_cache[k-1];
if (k == 1)
{
pf->limit_cache[k-1] = pf->max_samples;
return pf->max_samples;
}
...
}
3.总结
kd树可以存下粒子sample集的粒子,可供查找,插入。kd树在KLD采样算法中承担直方图的功能,以kd树的叶子节点数目表示直方图的bin个数(k)。概率密度函数pdf依靠外界输入的均值和协方差来生成,它的作用是可以产生随机位姿,完成粒子sample集的初始化。当然粒子位姿初始化的方式也不仅仅是这一种。
下一讲我们将讨论amcl_node.cpp,关于amcl_node.cpp,我们主要关心初始位姿、激光数据以及坐标系转换的处理,当然还有无处不在的粒子滤波器pf。
转自:7.AMCL包源码分析 | 粒子滤波器模型与pf文件夹(三) - 知乎 (zhihu.com)
posted on 2022-09-14 13:56 tdyizhen1314 阅读(207) 评论(0) 编辑 收藏 举报
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· AI技术革命,工作效率10个最佳AI工具