-
Notifications
You must be signed in to change notification settings - Fork 9.5k
/
mask2former_r50_lsj_8x2_50e_coco.py
79 lines (78 loc) · 2.72 KB
/
mask2former_r50_lsj_8x2_50e_coco.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py']
num_things_classes = 80
num_stuff_classes = 0
num_classes = num_things_classes + num_stuff_classes
model = dict(
panoptic_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])),
panoptic_fusion_head=dict(
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes),
test_cfg=dict(panoptic_on=False))
# dataset settings
image_size = (1024, 1024)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
pad_cfg = dict(img=(128, 128, 128), masks=0, seg=255)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(type='RandomFlip', flip_ratio=0.5),
# large scale jittering
dict(
type='Resize',
img_scale=image_size,
ratio_range=(0.1, 2.0),
multiscale_mode='range',
keep_ratio=True),
dict(
type='RandomCrop',
crop_size=image_size,
crop_type='absolute',
recompute_bbox=True,
allow_negative_crop=True),
dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True),
dict(type='Pad', size=image_size, pad_val=pad_cfg),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle', img_to_float=True),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32, pad_val=pad_cfg),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
data = dict(
_delete_=True,
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])