From 19f7b65a7d2102c44d686f8d7c0d97927cc56967 Mon Sep 17 00:00:00 2001 From: Yezhen Cong <52420115+THU17cyz@users.noreply.github.com> Date: Thu, 10 Jun 2021 20:57:50 +0800 Subject: [PATCH] Fix Lyft test command in docs and made some refinements (#635) --- docs/1_exist_data_model.md | 24 +++++++++++++----------- docs/tutorials/waymo.md | 6 +++--- docs/useful_tools.md | 2 +- mmdet3d/datasets/lyft_dataset.py | 2 ++ 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/docs/1_exist_data_model.md b/docs/1_exist_data_model.md index 4db86af90..1d0acf687 100644 --- a/docs/1_exist_data_model.md +++ b/docs/1_exist_data_model.md @@ -46,7 +46,7 @@ Assume that you have already downloaded the checkpoints to the directory `checkp python tools/test.py configs/votenet/votenet_8x8_scannet-3d-18class.py \ checkpoints/votenet_8x8_scannet-3d-18class_20200620_230238-2cea9c3a.pth \ --eval mAP - --options 'show=True' 'out_dir=./data/scannet/show_results' + --eval-options 'show=True' 'out_dir=./data/scannet/show_results' ``` 3. Test votenet on ScanNet (without saving the test results) and evaluate the mAP. @@ -70,7 +70,7 @@ Assume that you have already downloaded the checkpoints to the directory `checkp ```shell ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py \ checkpoints/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d_20200620_230405-2fa62f3d.pth \ - --format-only --options 'jsonfile_prefix=./pointpillars_nuscenes_results' + --format-only --eval-options 'jsonfile_prefix=./pointpillars_nuscenes_results' ``` The generated results be under `./pointpillars_nuscenes_results` directory. @@ -80,7 +80,7 @@ Assume that you have already downloaded the checkpoints to the directory `checkp ```shell ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/second/hv_second_secfpn_6x8_80e_kitti-3d-3class.py \ checkpoints/hv_second_secfpn_6x8_80e_kitti-3d-3class_20200620_230238-9208083a.pth \ - --format-only --options 'pklfile_prefix=./second_kitti_results' 'submission_prefix=./second_kitti_results' + --format-only --eval-options 'pklfile_prefix=./second_kitti_results' 'submission_prefix=./second_kitti_results' ``` The generated results be under `./second_kitti_results` directory. @@ -90,29 +90,31 @@ Assume that you have already downloaded the checkpoints to the directory `checkp ```shell ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_fpn_sbn-2x8_2x_lyft-3d.py \ checkpoints/hv_pointpillars_fpn_sbn-2x8_2x_lyft-3d_latest.pth --out results/pp_lyft/results_challenge.pkl \ - --format-only --options 'jsonfile_prefix=results/pp_lyft/results_challenge' \ - 'csv_path=results/pp_lyft/results_challenge.csv' + --format-only --eval-options 'jsonfile_prefix=results/pp_lyft/results_challenge' \ + 'csv_savepath=results/pp_lyft/results_challenge.csv' ``` - **Notice**: To generate submissions on Lyft, `csv_path` must be given in the options. After generating the csv file, you can make a submission with kaggle commands given on the [website](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/submit). + **Notice**: To generate submissions on Lyft, `csv_savepath` must be given in the `--eval-options`. After generating the csv file, you can make a submission with kaggle commands given on the [website](https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/submit). -7. Test PointPillars on waymo with 8 GPUs, and evaluate the mAP with waymo metrics. + Note that in the [config of Lyft dataset](../configs/_base_/datasets/lyft-3d.py), the value of `ann_file` keyword in `test` is `data_root + 'lyft_infos_test.pkl'`, which is the official test set of Lyft without annotation. To test on the validation set, please change this to `data_root + 'lyft_infos_val.pkl'`. + +8. Test PointPillars on waymo with 8 GPUs, and evaluate the mAP with waymo metrics. ```shell ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car.py \ checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth --out results/waymo-car/results_eval.pkl \ - --eval waymo --options 'pklfile_prefix=results/waymo-car/kitti_results' \ + --eval waymo --eval-options 'pklfile_prefix=results/waymo-car/kitti_results' \ 'submission_prefix=results/waymo-car/kitti_results' ``` - **Notice**: For evaluation on waymo, please follow the [instruction](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/) to build the binary file `compute_detection_metrics_main` for metrics computation and put it into `mmdet3d/core/evaluation/waymo_utils/`.(Sometimes when using bazel to build `compute_detection_metrics_main`, an error `'round' is not a member of 'std'` may appear. We just need to remove the `std::` before `round` in that file.) `pklfile_prefix` should be given in the options for the bin file generation. For metrics, `waymo` is the recommended official evaluation prototype. Currently, evaluating with choice `kitti` is adapted from KITTI and the results for each difficulty are not exactly the same as the definition of KITTI. Instead, most of objects are marked with difficulty 0 currently, which will be fixed in the future. The reasons of its instability include the large computation for evalution, the lack of occlusion and truncation in the converted data, different definition of difficulty and different methods of computing average precision. + **Notice**: For evaluation on waymo, please follow the [instruction](https://github.com/waymo-research/waymo-open-dataset/blob/master/docs/quick_start.md/) to build the binary file `compute_detection_metrics_main` for metrics computation and put it into `mmdet3d/core/evaluation/waymo_utils/`.(Sometimes when using bazel to build `compute_detection_metrics_main`, an error `'round' is not a member of 'std'` may appear. We just need to remove the `std::` before `round` in that file.) `pklfile_prefix` should be given in the `--eval-options` for the bin file generation. For metrics, `waymo` is the recommended official evaluation prototype. Currently, evaluating with choice `kitti` is adapted from KITTI and the results for each difficulty are not exactly the same as the definition of KITTI. Instead, most of objects are marked with difficulty 0 currently, which will be fixed in the future. The reasons of its instability include the large computation for evalution, the lack of occlusion and truncation in the converted data, different definition of difficulty and different methods of computing average precision. -8. Test PointPillars on waymo with 8 GPUs, generate the bin files and make a submission to the leaderboard. +9. Test PointPillars on waymo with 8 GPUs, generate the bin files and make a submission to the leaderboard. ```shell ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car.py \ checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth --out results/waymo-car/results_eval.pkl \ - --format-only --options 'pklfile_prefix=results/waymo-car/kitti_results' \ + --format-only --eval-options 'pklfile_prefix=results/waymo-car/kitti_results' \ 'submission_prefix=results/waymo-car/kitti_results' ``` diff --git a/docs/tutorials/waymo.md b/docs/tutorials/waymo.md index 8e7cdb25d..1b423691c 100644 --- a/docs/tutorials/waymo.md +++ b/docs/tutorials/waymo.md @@ -125,11 +125,11 @@ Then you can evaluate your models on waymo. An example to evaluate PointPillars ```shell ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car.py \ checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth --out results/waymo-car/results_eval.pkl \ - --eval waymo --options 'pklfile_prefix=results/waymo-car/kitti_results' \ + --eval waymo --eval-options 'pklfile_prefix=results/waymo-car/kitti_results' \ 'submission_prefix=results/waymo-car/kitti_results' ``` -`pklfile_prefix` should be given in the options if the bin file is needed to be generated. For metrics, `waymo` is the recommended official evaluation prototype. Currently, evaluating with choice `kitti` is adapted from KITTI and the results for each difficulty are not exactly the same as the definition of KITTI. Instead, most of objects are marked with difficulty 0 currently, which will be fixed in the future. The reasons of its instability include the large computation for evalution, the lack of occlusion and truncation in the converted data, different definition of difficulty and different methods of computing average precision. +`pklfile_prefix` should be given in the `--eval-options` if the bin file is needed to be generated. For metrics, `waymo` is the recommended official evaluation prototype. Currently, evaluating with choice `kitti` is adapted from KITTI and the results for each difficulty are not exactly the same as the definition of KITTI. Instead, most of objects are marked with difficulty 0 currently, which will be fixed in the future. The reasons of its instability include the large computation for evalution, the lack of occlusion and truncation in the converted data, different definition of difficulty and different methods of computing average precision. **Notice**: @@ -146,7 +146,7 @@ An example to test PointPillars on waymo with 8 GPUs, generate the bin files and ```shell ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} configs/pointpillars/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car.py \ checkpoints/hv_pointpillars_secfpn_sbn-2x16_2x_waymo-3d-car_latest.pth --out results/waymo-car/results_eval.pkl \ - --format-only --options 'pklfile_prefix=results/waymo-car/kitti_results' \ + --format-only --eval-options 'pklfile_prefix=results/waymo-car/kitti_results' \ 'submission_prefix=results/waymo-car/kitti_results' ``` diff --git a/docs/useful_tools.md b/docs/useful_tools.md index 6714bbd68..97faafd94 100644 --- a/docs/useful_tools.md +++ b/docs/useful_tools.md @@ -68,7 +68,7 @@ After running this command, plotted results including input data and the output To see the prediction results during evaluation time, you can run the following command ```bash -python tools/test.py ${CONFIG_FILE} ${CKPT_PATH} --eval 'mAP' --options 'show=True' 'out_dir=${SHOW_DIR}' +python tools/test.py ${CONFIG_FILE} ${CKPT_PATH} --eval 'mAP' --eval-options 'show=True' 'out_dir=${SHOW_DIR}' ``` After running this command, you will obtain the input data, the output of networks and ground-truth labels visualized on the input (e.g. `***_points.obj`, `***_pred.obj`, `***_gt.obj`, `***_img.png` and `***_pred.png` in multi-modality detection task) in `${SHOW_DIR}`. When `show` is enabled, [Open3D](http://www.open3d.org/) will be used to visualize the results online. You need to set `show=False` while running test in remote server without GUI. diff --git a/mmdet3d/datasets/lyft_dataset.py b/mmdet3d/datasets/lyft_dataset.py index 20a598f89..365b839c2 100644 --- a/mmdet3d/datasets/lyft_dataset.py +++ b/mmdet3d/datasets/lyft_dataset.py @@ -1,5 +1,6 @@ import mmcv import numpy as np +import os import pandas as pd import tempfile from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft @@ -495,6 +496,7 @@ def json2csv(self, json_path, csv_savepath): idx = Id_list.index(token) pred_list[idx] = prediction_str df = pd.DataFrame({'Id': Id_list, 'PredictionString': pred_list}) + mmcv.mkdir_or_exist(os.path.dirname(csv_savepath)) df.to_csv(csv_savepath, index=False)