LoginSignup
0
0

More than 1 year has passed since last update.

Detectron2でセグメンテーション

Last updated at Posted at 2022-09-23

Jetson xavierでDetectron2を用いた画像解析

環境

名前 バージョン
ubuntu 20.04
detectron2 0.4
pytorch 1.11.0 +cu113
torch vision 0.12.0+cu113
CUDA 11.3
python 3.8.10
jetpack 4.4

リポジトリのタグを指定して使用するときはこちらの記事を参考にした

アノテーション

  • Detectron2はCOCO形式に対応しているようなのでアノテーションツールにはCOCO-Annotatorを使用する
  • COCO-Annotatorはdocker環境が必要。docker環境の構築などはこちらの記事を参考にした

Detectron2で学習

import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()

# 訓練済みモデル選択モジュールModelZoo(https://github.com/facebookresearch/detectron2/blob/main/MODEL_ZOO.md)
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
# その他ライブラリ
import numpy as np
import os, json, cv2, random

from detectron2.data.datasets import register_coco_instances
register_coco_instances("leaf", {}, "/home/mananishigaki/d2/people_detection.json", "/home/mananishigaki/d2/people_detection")

from detectron2.engine import DefaultTrainer

cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("leaf",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")  # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.00025  # 学習率
cfg.SOLVER.MAX_ITER = 300    # 学習回数
cfg.SOLVER.STEPS = []        # do not decay learning rate
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # 1クラスのみ(人だけ)
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg) 
trainer.resume_or_load(resume=False)
trainer.train()

テスト


import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()

# 訓練済みモデル選択モジュールModelZoo(https://github.com/facebookresearch/detectron2/blob/main/MODEL_ZOO.md)
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
# import some common libraries
import numpy as np
import os, json, cv2, random
import torch
from PIL import Image
# colab上ではcv2オブジェクトの表示に特にcv2_imshowを用いる

cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # 1クラスのみ
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "/home/mananishigaki/d2/output/model_final.pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.6
cfg.MODEL.DEVICE = "cpu"
predictor = DefaultPredictor(cfg)


people_metadata = MetadataCatalog.get("leaf")
imgPath = "/home/mananishigaki/d2/3people.jpg"
im = cv2.imread(imgPath)
    
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
               metadata=people_metadata, 
               scale=1.0
)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
#cv2.imshow("plan",v.get_image()[:, :, ::-1])
#result = cv2.imwrite("picture-gray.jpg",v.get_image()[:, :, ::-1])

print(outputs)


セグメンテーション

BitMapでセグメントする部分についてはこちらの記事を参考にした

# このclasses_listのindexと物体のカテゴリーIDが対応しています
classes_list= MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes

objects = []
for id in outputs["instances"]._fields["pred_classes"].to('cpu').detach().numpy().copy():
  obj = classes_list[id]
  objects.append(obj)
# 推定した物体名のリスト
object_est = [(k,i) for k,i in zip(objects,outputs["instances"]._fields["scores"].detach().numpy().copy())]

print(object_est)

# # bounding boxの取得 -> 物体の[左上座標, 左下座標, 右上座標, 右下座標]
# bounding_boxes = outputs["instances"]._fields["pred_boxes"].tensor.cpu().numpy()
# # 上書き用に画像をコピー
# img_copy = im
# # 検出した物体数反復する
# for i in range(len(bounding_boxes)):
#   # 左上座標
#   left_pt = tuple(bounding_boxes[i][0:2].detach().numpy().copy())
#   # 右下座標
#   right_pt = tuple(bounding_boxes[i][2:4].detach().numpy().copy())
#   # cv2.putText(img_copy, f'{i}', left_pt, cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 5, cv2.LINE_AA)
#   # 範囲を枠線で囲む
#   cv2.rectangle(img_copy,left_pt,right_pt,(0,0,155),1)

# cv2.imshow('image',img_copy)
# cv2.waitKey(0)

object_estimation = object_est
dct = {imgPath:object_estimation}
# ビットマップの抽出
bool_array = outputs["instances"]._fields["pred_masks"]

for j,b_array in enumerate(bool_array):
    # tensorをnumpy配列に変換
    array = b_array.cpu().numpy()
    # 論理値の反転
    inv_bool_array = []
    for l in array:
      inv_b = []
      for b in l:
        if b == False:
          inv_b.append(True)
        else:
          inv_b.append(False)
      inv_bool_array.append(inv_b)
    # cv2オブジェクトはnumpy配列なので,正(true)を灰色に変換
    copy_img = im.copy()
    copy_img[inv_bool_array] = [128,128,128]
    print(type(copy_img))
    out = Image.fromarray(copy_img)
    print(out.mode)
    out.save('save.jpg')


    # We can use `Visualizer` to draw the predictions on the image.
    #v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.0)
    #out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
    # image[:, :, ::-1]はRGBをBGRへ変換している
    #cv2.imshow(out.get_image()[:, :, ::-1])
    #result2 = cv2.imwrite("result.jpg",out.get_image()[:, :, ::-1])
    
0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0