Skip to content

Commit

Permalink
Merge pull request PaddlePaddle#56 from heavengate/add_metric_test
Browse files Browse the repository at this point in the history
add test_metrics
  • Loading branch information
heavengate authored Apr 28, 2020
2 parents 725536c + d649bd0 commit 4615853
Show file tree
Hide file tree
Showing 13 changed files with 257 additions and 18 deletions.
6 changes: 4 additions & 2 deletions examples/tsm/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ TSM模型是将Temporal Shift Module插入到ResNet网络中构建的视频分
```bash
git clone https://github.com/PaddlePaddle/hapi
cd hapi
export PYTHONPATH=$PYTHONPATH:`pwd`
cd tsm
export PYTHONPATH=`pwd`:$PYTHONPATH
cd examples/tsm
```

### 数据准备
Expand Down Expand Up @@ -141,6 +141,8 @@ python infer.py --data=<path/to/dataset> --label_list=<path/to/label_list> --inf
2020-04-03 07:37:16,321-INFO: Sample ./kineteics/val_10/data_batch_10-042_6 predict label: 6, ground truth label: 6
```

**注意:** 推断时`--infer_file`需要指定到pickle文件路径。

## 参考论文

- [Temporal Shift Module for Efficient Video Understanding](https://arxiv.org/abs/1811.08383v1), Ji Lin, Chuang Gan, Song Han
Expand Down
4 changes: 3 additions & 1 deletion examples/tsm/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from modeling import tsm_resnet50
from kinetics_dataset import KineticsDataset
from transforms import *
from utils import print_arguments

import logging
logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -56,7 +57,7 @@ def main():
model.load(FLAGS.weights, reset_optimizer=True)

imgs, label = dataset[0]
pred = model.test([imgs[np.newaxis, :]])
pred = model.test_batch([imgs[np.newaxis, :]])
pred = labels[np.argmax(pred)]
logger.info("Sample {} predict label: {}, ground truth label: {}" \
.format(FLAGS.infer_file, pred, labels[int(label)]))
Expand Down Expand Up @@ -86,6 +87,7 @@ def main():
type=str,
help="weights path for evaluation")
FLAGS = parser.parse_args()
print_arguments(FLAGS)

check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
Expand Down
2 changes: 1 addition & 1 deletion examples/tsm/kinetics_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def __getitem__(self, idx):

if self.transform:
imgs, label = self.transform(imgs, label)
return imgs, np.array([label])
return imgs, np.array([label]).astype('int64')

@property
def num_classes(self):
Expand Down
10 changes: 9 additions & 1 deletion examples/tsm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
from check import check_gpu, check_version
from kinetics_dataset import KineticsDataset
from transforms import *
from utils import print_arguments


def make_optimizer(step_per_epoch, parameter_list=None):
Expand Down Expand Up @@ -106,7 +107,7 @@ def main():
eval_data=val_dataset,
epochs=FLAGS.epoch,
batch_size=FLAGS.batch_size,
save_dir='tsm_checkpoint',
save_dir=FLAGS.save_dir or 'tsm_checkpoint',
num_workers=FLAGS.num_workers,
drop_last=True,
shuffle=True)
Expand Down Expand Up @@ -150,7 +151,14 @@ def main():
default=None,
type=str,
help="weights path for evaluation")
parser.add_argument(
"-s",
"--save_dir",
default=None,
type=str,
help="directory path for checkpoint saving, default ./yolo_checkpoint")
FLAGS = parser.parse_args()
print_arguments(FLAGS)

check_gpu(str.lower(FLAGS.device) == 'gpu')
check_version()
Expand Down
44 changes: 44 additions & 0 deletions examples/tsm/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import six
import logging
logger = logging.getLogger(__name__)

__all__ = ['print_ar']


def print_arguments(args):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
logger.info("----------- Configuration Arguments -----------")
for arg, value in sorted(six.iteritems(vars(args))):
logger.info("%s: %s" % (arg, value))
logger.info("------------------------------------------------")
10 changes: 5 additions & 5 deletions examples/yolov3/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,8 @@ YOLOv3 的网络结构由基础特征提取网络、multi-scale特征融合层
```bash
git clone https://github.com/PaddlePaddle/hapi
cd hapi
export PYTHONPATH=$PYTHONPATH:`pwd`
cd tsm
export PYTHONPATH=`pwd`:$PYTHONPATH
cd examples/yolov3
```

#### 安装COCO-API
Expand Down Expand Up @@ -126,13 +126,13 @@ CUDA_VISIBLE_DEVICES=0,1,2,3 python -m paddle.distributed.launch main.py --data=
使用如下方式进行多卡训练:

```bash
CUDA_VISIBLE_DEVICES=0,1,2,3 python main.py -m paddle.distributed.launch --data=<path/to/dataset> --batch_size=16 -d
CUDA_VISIBLE_DEVICES=0,1,2,3 python -m paddle.distributed.launch main.py --data=<path/to/dataset> --batch_size=16 -d
```


### 模型评估

YOLOv3模型输出为LoDTensor,只支持使用batch_size为1进行评估,可通过如下两种方式进行模型评估。
YOLOv3模型输出为LoDTensor,只支持使用单卡且batch_size为1进行评估,可通过如下两种方式进行模型评估。

1. 自动下载Paddle发布的[YOLOv3-DarkNet53](https://paddlemodels.bj.bcebos.com/hapi/yolov3_darknet53.pdparams)权重评估

Expand Down Expand Up @@ -180,7 +180,7 @@ python infer.py --label_list=dataset/voc/label_list.txt --infer_image=image/dog.
2. 加载checkpoint进行精度评估

```bash
python infer.py --label_list=dataset/voc/label_list.txt --infer_image=image/dog.jpg --weights=yolo_checkpoint/mo_mixup/final
python infer.py --label_list=dataset/voc/label_list.txt --infer_image=image/dog.jpg --weights=yolo_checkpoint/no_mixup/final
```

推断结果可视化图像会保存于`--output`指定的文件夹下,默认保存于`./output`目录。
Expand Down
5 changes: 3 additions & 2 deletions examples/yolov3/infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

from modeling import yolov3_darknet53, YoloLoss
from transforms import *

from utils import print_arguments
from visualizer import draw_bbox

import logging
Expand Down Expand Up @@ -91,7 +91,7 @@ def main():
img_id = np.array([0]).astype('int64')[np.newaxis, :]
img_shape = np.array([h, w]).astype('int32')[np.newaxis, :]

_, bboxes = model.test([img_id, img_shape, img])
_, bboxes = model.test_batch([img_id, img_shape, img])

vis_img = draw_bbox(orig_img, cat2name, bboxes, FLAGS.draw_threshold)
save_name = get_save_image_name(FLAGS.output_dir, FLAGS.infer_image)
Expand Down Expand Up @@ -121,6 +121,7 @@ def main():
"-w", "--weights", default=None, type=str,
help="path to weights for inference")
FLAGS = parser.parse_args()
print_arguments(FLAGS)
assert os.path.isfile(FLAGS.infer_image), \
"infer_image {} not a file".format(FLAGS.infer_image)
assert os.path.isfile(FLAGS.label_list), \
Expand Down
14 changes: 12 additions & 2 deletions examples/yolov3/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
from coco import COCODataset
from coco_metric import COCOMetric
from transforms import *
from utils import print_arguments

NUM_MAX_BOXES = 50

Expand Down Expand Up @@ -171,16 +172,18 @@ def main():
if FLAGS.resume is not None:
model.load(FLAGS.resume)

save_dir = FLAGS.save_dir or 'yolo_checkpoint'

model.fit(train_data=loader,
epochs=FLAGS.epoch - FLAGS.no_mixup_epoch,
save_dir="yolo_checkpoint/mixup",
save_dir=os.path.join(save_dir, "mixup"),
save_freq=10)

# do not use image mixup transfrom in the last FLAGS.no_mixup_epoch epoches
dataset.mixup = False
model.fit(train_data=loader,
epochs=FLAGS.no_mixup_epoch,
save_dir="yolo_checkpoint/no_mixup",
save_dir=os.path.join(save_dir, "no_mixup"),
save_freq=5)


Expand Down Expand Up @@ -233,6 +236,13 @@ def main():
default=None,
type=str,
help="path to weights for evaluation")
parser.add_argument(
"-s",
"--save_dir",
default=None,
type=str,
help="directory path for checkpoint saving, default ./yolo_checkpoint")
FLAGS = parser.parse_args()
print_arguments(FLAGS)
assert FLAGS.data, "error: must provide data path"
main()
44 changes: 44 additions & 0 deletions examples/yolov3/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import six
import logging
logger = logging.getLogger(__name__)

__all__ = ['print_ar']


def print_arguments(args):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
logger.info("----------- Configuration Arguments -----------")
for arg, value in sorted(six.iteritems(vars(args))):
logger.info("%s: %s" % (arg, value))
logger.info("------------------------------------------------")
2 changes: 1 addition & 1 deletion hapi/datasets/flowers.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def __getitem__(self, idx):
if self.transform is not None:
image = self.transform(image)

return image, label
return image, label.astype('int64')

def __len__(self):
return len(self.indexes)
2 changes: 1 addition & 1 deletion hapi/datasets/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def _parse_dataset(self, buffer_size=100):

for i in range(buffer_size):
self.images.append(images[i, :])
self.labels.append(np.array([labels[i]]))
self.labels.append(np.array([labels[i]]).astype('int64'))

def __getitem__(self, idx):
image, label = self.images[idx], self.labels[idx]
Expand Down
4 changes: 2 additions & 2 deletions hapi/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __init__(self, topk=(1, ), name=None, *args, **kwargs):
def add_metric_op(self, pred, label, *args):
pred = fluid.layers.argsort(pred, descending=True)[1][:, :self.maxk]
correct = pred == label
return correct
return fluid.layers.cast(correct, dtype='float32')

def update(self, correct, *args):
accs = []
Expand All @@ -143,7 +143,7 @@ def _init_name(self, name):
if self.maxk != 1:
self._name = ['{}_top{}'.format(name, k) for k in self.topk]
else:
self._name = ['acc']
self._name = [name]

def name(self):
return self._name
Loading

0 comments on commit 4615853

Please sign in to comment.