KITTI数据集评估方法小结

更新时间:2023-06-24 11:21:13 阅读: 评论:0

KITTI数据集评估⽅法⼩结
KITTI数据集评估⽅法⼩结
建议先通篇阅读,了解需要修改什么、⽂件如何组织等,再进⾏实践,因为涉及到⼀些代码的问题。你既可以使⽤.cpp代码+Linux系统的原汁原味操作,也可以选择python代码直接评估的⽅式,便捷、优雅。
1 平台与资源
Windows上我试过Visual Studio 2022编译、下载安装CMake、cmd操作等,不是报头⽂件错误,就是Boost缺少,反正过程就不说了,之后我才知道缺少的那些好多包其实是Linux系统上⾃带的。最后是在腾讯云上购买了Ubuntu系统⼀年使⽤权,正好最近春节有活动,hh
链接:(1)github上较多的以及(2)我后⾯主要使⽤的,两个都可以。
2 编译与运⾏
编译的时候在放置.cpp⽂件与.h⽂件的⽬录下运⾏指令,主要参考这位⽼哥的⽅法:
cmake .
make
g++-O3 -DNDEBUG -o evaluate_object evaluate_object.cpp
⼀步⼀步来,反正基本上每⼀步都会报错,报啥错、缺啥包,直接去⽹上搜ubuntu系统安装某个包的⽅法 (我也是⼩⽩)。
编译好了后没有问题了,会⽣成⼀个不带后缀的evaluate_object⽂件,下⾯就只剩下数据准备(可以先进⾏)与运⾏了。另外,我只是第⼀次⽤了cmake .与 make 指令,后⾯再编译的时候发现只⽤第三⾏指令也可以编译好。
3 ⽂件格式及⽬录结构
在运⾏之前,我们还需要准备数据噻,label与results。
3.1 格式
labels就是kitti数据集⾥⾯的label_2,选择你需要的N个测试⽂件,如3769个,格式如下:
type truncated occluded alpha bbox dimensions location rotation_y Pedestrian-1-10.29873.70 152.10 933.44 256.07  1.87 0.50 0.90  5.42 1.50 13.430.67
预测结果格式如下:(注意,最后⼀个数字是预测结果特有的score)
type truncated occluded alpha bbox dimensions location rotation_y score Pedestrian-1-10.29873.70 152.10 933.44 256.07  1.87 0.50 0.90  5.42 1.50 13.430.670.99 The detection format should be simillar to the KITTI datat label format with 15 columns reprenting:
Values Name Description
1type Describes the type of object: ‘Car’, ‘Van’, ‘Truck’, ‘Pedestrian’, ‘Person_sitting’, ‘Cyclist’,‘Tram’, ‘Misc’ or ‘DontCare’
1truncated-1 1occluded-1
Values Name Description
1alpha Obrvation angle of object, ranging [-pi (i)
4bbox2D bounding box of object in the image (0-bad index): contains left, top, right, bottom pixel coordinates 3dimensions3D object dimensions: height, width, length (in meters)
3location3D object location x,y,z in camera coordinates (in meters)
1rotation_y Rotation ry around Y-axis in camera coordinates [-pi (i)
1score Only for results: Float, indicating confidence in detection, needed for p/r curves, higher is better.
3.2 ⽂件结构
evaluate_object
evaluate_object.cpp
mail.h
label
--
--
-- ...
results
--data
--
--
-- ...
4 运⾏
./evaluate_object label results
成功的话,会在results⽬录下⽣成plot⽂件夹和⼀些 .txt ⽂件,我⼀般只查看 来记录。
5 代码
地下室出租参见
#include<iostream>
#include<algorithm>
#include<stdio.h>
#include<math.h>
#include<vector>
#include<numeric>
#include<strings.h>
#include<asrt.h>
#include<dirent.h>
#include<boost/numeric/ublas/matrix.hpp>
#include<boost/numeric/ublas/io.hpp>
#include<boost/geometry.hpp>
#include<boost/geometry/geometries/point_xy.hpp>
#include<boost/geometry/geometries/polygon.hpp>
#include<boost/geometry/geometries/adapted/c_array.hpp>
#include"mail.h"
BOOST_GEOMETRY_REGISTER_C_ARRAY_CS(cs::cartesian)
typedef boost::geometry::model::polygon<boost::geometry::model::d2::point_xy<double>> Polygon;
using namespace std;
/*=======================================================================
STATIC EVALUATION PARAMETERS
=======================================================================*/
// holds the number of test images on the rver
const int32_t N_TESTIMAGES =7480;
/
/ easy, moderate and hard evaluation level
enum DIFFICULTY{EASY=0, MODERATE=1, HARD=2};
// evaluation metrics: image, ground or 3D
enum METRIC{IMAGE=0, GROUND=1, BOX3D=2};
// evaluation parameter
const int32_t MIN_HEIGHT[3]={40,25,25};// minimum height for evaluated groundtruth/detections
const int32_t MAX_OCCLUSION[3]={0,1,2};// maximum occlusion level of the groundtruth ud for evaluation const double  MAX_TRUNCATION[3]={0.15,0.3,0.5};// maximum truncation level of the groundtruth ud for evaluation
// evaluated object class
enum CLASSES{CAR=0, PEDESTRIAN=1, CYCLIST=2};
const int NUM_CLASS =3;
// parameters varying per class
vector<string> CLASS_NAMES;
// the minimum overlap required for 2D evaluation on the image/ground plane and 3D evaluation
const double MIN_OVERLAP[3][3]={{0.7,0.5,0.5},{0.5,0.25,0.25},{0.5,0.25,0.25}};
// const double MIN_OVERLAP[3][3] = {{0.7, 0.5, 0.5}, {0.7, 0.5, 0.5}, {0.7, 0.5, 0.5}};
// no. of recall steps that should be evaluated (discretized)
const double N_SAMPLE_PTS =41;
// initialize class names
void initGlobals(){
CLASS_NAMES.push_back("car");
CLASS_NAMES.push_back("pedestrian");
CLASS_NAMES.push_back("cyclist");
}
/*=======================================================================
DATA TYPES FOR EVALUATION
=======================================================================*/
// holding data needed for precision-recall and precision-aos
struct tPrData {
vector<double> v;// detection score for computing score thresholds
double        similarity;// orientation similarity
int32_t        tp;// true positives
int32_t        fp;// fal positives
int32_t        fn;// fal negatives
骂人套路
tPrData():
similarity(0),tp(0),fp(0),fn(0){}
};
// holding bounding boxes for ground truth and detections
struct tBox {
string  type;// object type as car, pedestrian or cyclist,...
double  x1;// left corner
double  y1;// top corner
double  x2;// right corner
double  y2;// bottom corner
double  alpha;// image orientation
tBox(string type,double x1,double y1,double x2,double y2,double alpha):
type(type),x1(x1),y1(y1),x2(x2),y2(y2),alpha(alpha){}
};
// holding ground truth data
struct tGroundtruth {
tBox    box;// object type, box, orientation
double  truncation;// truncation 0..1
int32_t occlusion;// occlusion 0,1,2 (non, partly, fully)
double ry;
double  t1, t2, t3;
double h, w, l;
tGroundtruth():
box(tBox("invalild",-1,-1,-1,-1,-10)),truncation(-1),occlusion(-1){}
tGroundtruth(tBox box,double truncation,int32_t occlusion):
box(box),truncation(truncation),occlusion(occlusion){}
tGroundtruth(string type,double x1,double y1,double x2,double y2,double alpha,double truncation,int32_t occlusion): box(tBox(type,x1,y1,x2,y2,alpha)),truncation(truncation),occlusion(occlusion){}山行即事
};
// holding detection data
struct tDetection {
tBox    box;// object type, box, orientation
double  thresh;// detection score
double  ry;
double  t1, t2, t3;
double  h, w, l;
tDetection():
box(tBox("invalid",-1,-1,-1,-1,-10)),thresh(-1000){}
tDetection(tBox box,double thresh):
box(box),thresh(thresh){}
tDetection(string type,double x1,double y1,double x2,double y2,double alpha,double thresh):
box(tBox(type,x1,y1,x2,y2,alpha)),thresh(thresh){}
};
/
*=======================================================================
FUNCTIONS TO LOAD DETECTION AND GROUND TRUTH DATA ONCE, SAVE RESULTS阳光下的星星
=======================================================================*/
vector<int32_t> indices;
vector<tDetection>loadDetections(string file_name,bool&compute_aos,
vector<bool>&eval_image, vector<bool>&eval_ground,
vector<bool>&eval_3d,bool&success){
// holds all detections (ignored detections are indicated by an index vector
vector<tDetection> detections;
FILE *fp =fopen(file_name.c_str(),"r");
if(!fp){
success =fal;
return detections;
}
while(!feof(fp)){
tDetection d;
double trash;
char str[255];
if(fscanf(fp,"%s %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
str,&trash,&trash,&d.box.alpha,&d.box.x1,&d.box.y1,
&d.box.x2,&d.box.y2,&d.h,&d.w,&d.l,&d.t1,&d.t2,&d.t3,
&,&d.thresh)==16){
/
/ d.thresh = 1;
pe = str;
detections.push_back(d);苹果手机如何重启
// orientation=-10 is invalid, AOS is not evaluated if at least one orientation is invalid
if(d.box.alpha ==-10)
compute_aos =fal;
// a class is only evaluated if it is detected at least once
for(int c =0; c < NUM_CLASS; c++){
if(!strcacmp(pe.c_str(), CLASS_NAMES[c].c_str())){
if(!eval_image[c]&& d.box.x1 >=0)
eval_image[c]=true;
if(!eval_ground[c]&& d.t1 !=-1000)
eval_ground[c]=true;
if(!eval_3d[c]&& d.t2 !=-1000)
eval_3d[c]=true;
break;
}
}
}
}
fclo(fp);
success =true;
return detections;
}
vector<tGroundtruth>loadGroundtruth(string file_name,bool&success){
// holds all ground truth (ignored ground truth is indicated by an index vector
vector<tGroundtruth> groundtruth;
FILE *fp =fopen(file_name.c_str(),"r");
if(!fp){
success =fal;
return groundtruth;
}
while(!feof(fp)){
龚自珍的资料
tGroundtruth g;
char str[255];
食肉者
if(fscanf(fp,"%s %lf %d %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
str,&uncation,&lusion,&g.box.alpha,
&g.box.x1,&g.box.y1,&g.box.x2,&g.box.y2,
&g.h,&g.w,&g.l,&g.t1,
难忘那眼神&g.t2,&g.t3,& )==15){
pe = str;
groundtruth.push_back(g);
}
}
fclo(fp);
success =true;
return groundtruth;
}
void saveStats(const vector<double>&precision,const vector<double>&aos, FILE *fp_det, FILE *fp_ori){
// save precision to file
pty())
return;
for(int32_t i=0; i<precision.size(); i++)
fprintf(fp_det,"%f ",precision[i]);
fprintf(fp_det,"\n");
// save orientation similarity, only if there were no invalid orientation entries in submission (alpha=-10)
pty())
return;
for(int32_t i=0; i<aos.size(); i++)
fprintf(fp_ori,"%f ",aos[i]);
fprintf(fp_ori,"\n");
}
/*======================================================================= EVALUATION HELPER FUNCTIONS
=======================================================================*/

本文发布于:2023-06-24 11:21:13,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/82/1028319.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:需要   编译   系统   代码   缺少   格式   数据   操作
相关文章
留言与评论(共有 0 条评论)
   
验证码:
推荐文章
排行榜
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图