diff --git a/src/PaddleClas/benchmark/README.md b/src/PaddleClas/benchmark/README.md new file mode 100644 index 0000000..2e892d9 --- /dev/null +++ b/src/PaddleClas/benchmark/README.md @@ -0,0 +1,27 @@ +# benchmark使用说明 + +此目录所有shell脚本是为了测试PaddleClas中不同模型的速度指标,如单卡训练速度指标、多卡训练速度指标等。 + +## 相关脚本说明 + +一共有3个脚本: + +- `prepare_data.sh`: 下载相应的测试数据,并配置好数据路径 +- `run_benchmark.sh`: 执行单独一个训练测试的脚本,具体调用方式,可查看脚本注释 +- `run_all.sh`: 执行所有训练测试的入口脚本 + +## 使用说明 + +**注意**:为了跟PaddleClas中其他的模块的执行目录保持一致,此模块的执行目录为`PaddleClas`的根目录。 + +### 1.准备数据 + +```shell +bash benchmark/prepare_data.sh +``` + +### 2.执行所有模型的测试 + +```shell +bash benchmark/run_all.sh +``` diff --git a/src/PaddleClas/benchmark/prepare_data.sh b/src/PaddleClas/benchmark/prepare_data.sh new file mode 100644 index 0000000..411459c --- /dev/null +++ b/src/PaddleClas/benchmark/prepare_data.sh @@ -0,0 +1,22 @@ +#!/bin/bash +dataset_url=$1 + +package_check_list=(imageio tqdm Cython pycocotools tb_paddle scipy pandas wget h5py sklearn opencv-python visualdl) +for package in ${package_check_list[@]}; do + if python -c "import ${package}" >/dev/null 2>&1; then + echo "${package} have already installed" + else + echo "${package} NOT FOUND" + pip install ${package} + echo "${package} installed" + fi +done + +cd dataset +rm -rf ILSVRC2012 +wget -nc ${dataset_url} +tar xf ILSVRC2012_val.tar +ln -s ILSVRC2012_val ILSVRC2012 +cd ILSVRC2012 +ln -s val_list.txt train_list.txt +cd ../../ diff --git a/src/PaddleClas/benchmark/run_all.sh b/src/PaddleClas/benchmark/run_all.sh new file mode 100644 index 0000000..a6d6884 --- /dev/null +++ b/src/PaddleClas/benchmark/run_all.sh @@ -0,0 +1,31 @@ +# 提供可稳定复现性能的脚本,默认在标准docker环境内py37执行: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7 paddle=2.1.2 py=37 +# 执行目录:需说明 +# cd ** +# 1 安装该模型需要的依赖 (如需开启优化策略请注明) +# pip install ... +# 2 拷贝该模型需要数据、预训练模型 +# 3 批量运行(如不方便批量,1,2需放到单个模型中) +log_path=${LOG_PATH_INDEX_DIR:-$(pwd)} # LOG_PATH_INDEX_DIR 后续QA设置参数 +model_mode_list=(MobileNetV1 MobileNetV2 MobileNetV3_large_x1_0 ShuffleNetV2_x1_0 HRNet_W48_C SwinTransformer_tiny_patch4_window7_224 alt_gvt_base) # benchmark 监控模型列表 +#model_mode_list=(MobileNetV1 MobileNetV2 MobileNetV3_large_x1_0 EfficientNetB0 ShuffleNetV2_x1_0 DenseNet121 HRNet_W48_C SwinTransformer_tiny_patch4_window7_224 alt_gvt_base) # 该脚本支持列表 +fp_item_list=(fp32) +#bs_list=(32 64 96 128) +for model_mode in ${model_mode_list[@]}; do + for fp_item in ${fp_item_list[@]}; do + if [ ${model_mode} = MobileNetV3_large_x1_0 ] || [ ${model_mode} = ShuffleNetV2_x1_0 ]; then + bs_list=(256) + else + bs_list=(64) + fi + for bs_item in ${bs_list[@]};do + echo "index is speed, 1gpus, begin, ${model_name}" + run_mode=sp + CUDA_VISIBLE_DEVICES=0 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 1 ${model_mode} | tee ${log_path}/clas_${model_mode}_${run_mode}_bs${bs_item}_${fp_item}_1gpus 2>&1 # (5min) + sleep 10 + echo "index is speed, 8gpus, run_mode is multi_process, begin, ${model_name}" + run_mode=mp + CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 bash benchmark/run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 1 ${model_mode}| tee ${log_path}/clas_${model_mode}_${run_mode}_bs${bs_item}_${fp_item}_8gpus8p 2>&1 + sleep 10 + done + done +done diff --git a/src/PaddleClas/benchmark/run_benchmark.sh b/src/PaddleClas/benchmark/run_benchmark.sh new file mode 100644 index 0000000..8cee6fc --- /dev/null +++ b/src/PaddleClas/benchmark/run_benchmark.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +set -xe +# 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode} +# 参数说明 +function _set_params(){ + run_mode=${1:-"sp"} # 单卡sp|多卡mp + batch_size=${2:-"64"} + fp_item=${3:-"fp32"} # fp32|fp16 + epochs=${4:-"2"} # 可选,如果需要修改代码提前中断 + model_item=${5:-"model_item"} + run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # TRAIN_LOG_DIR 后续QA设置该参数 + + index=1 + mission_name="图像分类" # 模型所属任务名称,具体可参考scripts/config.ini (必填) + direction_id=0 # 任务所属方向,0:CV,1:NLP,2:Rec。 (必填) + skip_steps=8 # 解析日志,有些模型前几个step耗时长,需要跳过 (必填) + keyword="ips:" # 解析日志,筛选出数据所在行的关键字 (必填) + keyword_loss="loss:" #选填 + model_mode=-1 # 解析日志,具体参考scripts/analysis.py. (必填) + ips_unit="images/s" + base_batch_size=$batch_size +# 以下不用修改 + device=${CUDA_VISIBLE_DEVICES//,/ } + arr=(${device}) + num_gpu_devices=${#arr[*]} + log_file=${run_log_path}/clas_${model_item}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices} + model_name=${model_item}_bs${batch_size}_${fp_item} # model_item 用于yml匹配,model_name用于入库 +} +function _train(){ + echo "Train on ${num_gpu_devices} GPUs" + echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size" + + if [ ${fp_item} = "fp32" ];then + model_config=`find ppcls/configs/ImageNet -name ${model_item}.yaml` + else + model_config=`find ppcls/configs/ImageNet -name ${model_item}_fp16.yaml` + fi + + train_cmd="-c ${model_config} -o DataLoader.Train.sampler.batch_size=${batch_size} -o Global.epochs=${epochs} -o Global.eval_during_train=False -o Global.print_batch_step=2" + case ${run_mode} in + sp) train_cmd="python -u tools/train.py ${train_cmd}" ;; + mp) + train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --gpus=$CUDA_VISIBLE_DEVICES tools/train.py ${train_cmd}" + log_parse_file="mylog/workerlog.0" ;; + *) echo "choose run_mode(sp or mp)"; exit 1; + esac + rm -rf mylog +# 以下不用修改 + timeout 5m ${train_cmd} > ${log_file} 2>&1 + if [ $? -ne 0 ];then + echo -e "${model_name}, FAIL" + export job_fail_flag=1 + else + echo -e "${model_name}, SUCCESS" + export job_fail_flag=0 + fi + kill -9 `ps -ef|grep 'python'|awk '{print $2}'` + + if [ $run_mode = "mp" -a -d mylog ]; then + rm ${log_file} + cp mylog/workerlog.0 ${log_file} + fi +} + +source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在连调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开 +_set_params $@ +_run +#_train