mmsegmentation使い方

・git clone
GitHub - open-mmlab/mmsegmentation: OpenMMLab Semantic Segmentation Toolbox and Benchmark.

・get_started.md

step0にmim install mmengineを追加。

mmsegmentation/get_started.md at master · open-mmlab/mmsegmentation · GitHub

・自前データの学習

MMSegmentationによる多数クラス画像(Multi Class)のセマンティックセグメンテーション(Semantic Segmentation). - Qiita
4節を参照。

https://github.com/alexgkendall/SegNet-Tutorial をmmsegmentation直下に置く。
以下の学習データ生成コードを実行。

import os.path as osp
import mmcv


def main():
data_root = 'SegNet-Tutorial/CamVid'
ann_dir = 'trainannot'

split_dir = 'splits_resnet50A'
mmcv.mkdir_or_exist(osp.join(data_root, split_dir))
filename_list = [osp.splitext(filename)[0] for filename in mmcv.scandir(osp.join(data_root, ann_dir), suffix='.png')]
with open(osp.join(data_root, split_dir, 'train.txt'), 'w') as f:
train_length = int(len(filename_list)*4/5)
f.writelines(line + '\n' for line in filename_list[:train_length])
with open(osp.join(data_root, split_dir, 'val.txt'), 'w') as f:
f.writelines(line + '\n' for line in filename_list[train_length:])


if __name__ == '__main__':
main()

CamVidフォルダには元々train.txtなどが含まれるが、mmdetectionのフォーマットに合っていないのでそのままは使えない。
新たに生成したtrain.txtなどはフォルダ名や拡張子を除くbasenameのみの羅列。

その後、以下の学習コードを実行。

import os.path as osp
import mmcv
from mmcv import Config
from mmseg.datasets.builder import DATASETS
from mmseg.datasets.custom import CustomDataset
from mmseg.datasets import build_dataset
from mmseg.apis import set_random_seed
from mmseg.models import build_segmentor
from mmseg.apis import train_segmentor


data_root = 'SegNet-Tutorial/CamVid'
img_dir = 'train'
ann_dir = 'trainannot'

classes = (
'sky', 'Bulding', 'Pole', 'Road_marking', 'Road', 'Pavement', 'Tree',
'SingSymbole', 'Fence', 'Car', 'Pedestrian', 'Bicyclist'
)

palette = [
[128, 128, 128], [128, 0, 0], [192, 192, 128], [255, 69, 0], [128, 64, 128], [60, 40, 222],
[128, 128, 0], [192, 128, 128], [64, 64, 128], [64, 0, 128], [64, 64, 0], [0, 128, 192]
]


@DATASETS.register_module()
class splits_resnet50A(CustomDataset):

CLASSES = classes
PALETTE = palette

def __init__(self, split, **kwargs):
super().__init__(img_suffix='.png', seg_map_suffix='.png', split=split, **kwargs)
assert osp.exists(self.img_dir) and self.split is not None


def main():

w = 480
h = 360

cfg = Config.fromfile('configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py')

cfg.checkpoint_config.meta = dict(CLASSES=classes, PALETTE=palette)
cfg.norm_cfg = dict(type='BN', requires_grad=True)
cfg.model.backbone.norm_cfg = cfg.norm_cfg
cfg.model.decode_head.norm_cfg = cfg.norm_cfg
cfg.model.auxiliary_head.norm_cfg = cfg.norm_cfg
cfg.model.decode_head.num_classes = len(classes)
cfg.model.auxiliary_head.num_classes = len(classes)

cfg.dataset_type = 'splits_resnet50A'
cfg.data_root = data_root

cfg.data.samples_per_gpu = 8
cfg.data.workers_per_gpu = 8

cfg.img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
cfg.crop_size = (256, 256)
cfg.train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
# dict(type='Resize', img_scale=(w, h), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=cfg.crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **cfg.img_norm_cfg),
# dict(type='Pad', size=cfg.crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]

cfg.test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(w, h),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]

cfg.data.train.type = cfg.dataset_type
cfg.data.train.data_root = cfg.data_root
cfg.data.train.img_dir = img_dir
cfg.data.train.ann_dir = ann_dir
cfg.data.train.pipeline = cfg.train_pipeline
cfg.data.train.split = 'splits_resnet50A/train.txt' # 3)

cfg.data.val.type = cfg.dataset_type
cfg.data.val.data_root = cfg.data_root
cfg.data.val.img_dir = img_dir
cfg.data.val.ann_dir = ann_dir
cfg.data.val.pipeline = cfg.test_pipeline
cfg.data.val.split = 'splits_resnet50A/val.txt' # 3)

cfg.work_dir = './work_dirs/tutorial_pspnet_r50A'

cfg.runner.max_iters = 40000
cfg.log_config.interval = 10
cfg.evaluation.interval = 200
cfg.checkpoint_config.interval = 1000

cfg.seed = 0
set_random_seed(0, deterministic=False)
cfg.gpu_ids = range(1)
cfg.device = 'cuda'

datasets = [build_dataset(cfg.data.train)]
model = build_segmentor(
cfg.model, train_cfg=cfg.get('train_cfg'), test_cfg=cfg.get('test_cfg')
)
model.CLASSES = datasets[0].CLASSES

mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
train_segmentor(model, datasets, cfg, distributed=False, validate=True, meta=dict())


if __name__ == '__main__':
main()


trainフォルダの画像は480x360のRGB画像。
trainannotフォルダの画像は480x360のラベル画像(画素値=ラベル)。
例えば、画素値0の場所はsky。