LoginSignup
8
7

More than 3 years have passed since last update.

【Ubuntu】【Python】dlibを用いて顔器官検出

Last updated at Posted at 2016-10-11

今のところdlibにはあって、OpenCVには無い顔器官検出。

とりあえず、無理やり色付けしたけど、もっとスマートな方法があるはず。
というか、リファレンスをしっかり読み込んでいないだけだと思いますが。。。

動画は以下。
顔を出すのは恥ずかしいので顔検出を用いて隠しております。
https://www.youtube.com/watch?v=s2YtXqcBuPY
【Ubuntu】【Python】dlibを用いて顔器官検出

ソースコードは以下。
動作させるには、pyファイルと同じディレクトリに顔器官の学習済みデータを
配置する必要があります。
→shape_predictor_68_face_landmarks.dat(http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2)


#!/usr/bin/env python
# -*- coding: utf-8 -*-

'''
face_landmark_detector.py.

Usage:
  face_landmark_detector.py [<video source>] [<resize rate>] [<privacy mask>]
'''

import sys
import dlib
import cv2
import time
import copy

try:
    fn = sys.argv[1]
    if fn.isdigit() == True:
        fn = int(fn)
except:
    fn = 0

try:
    resize_rate = sys.argv[2]
    resize_rate = int(resize_rate)
except:
    resize_rate = 1

try:
    privacy_mask = sys.argv[3]
    privacy_mask = int(privacy_mask)
except:
    privacy_mask = 0

predictor_path = "./shape_predictor_68_face_landmarks.dat"

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)

video_input = cv2.VideoCapture(fn)

while(video_input.isOpened() == True):
    ret, frame = video_input.read()
    temp_frame = copy.deepcopy(frame)

    # 処理負荷軽減のための対象フレーム縮小(引数指定時)
    height, width = frame.shape[:2]
    temp_frame = cv2.resize(frame, (int(width/resize_rate), int(height/resize_rate)))

    # 顔検出
    start = time.time()
    dets = detector(temp_frame, 1)
    elapsed_time = time.time() - start
    print ("detector processing time:{0}".format(elapsed_time)) + "[sec]"

    for k, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            k, d.left(), d.top(), d.right(), d.bottom()))

        # 顔器官検出
        start = time.time()
        shape = predictor(temp_frame, d)
        elapsed_time = time.time() - start
        print ("predictor processing time:{0}".format(elapsed_time)) + "[sec]"

        # 描画
        rect_offset = 20
        if privacy_mask == 1:
            cv2.rectangle(frame, (int(d.left() * resize_rate) - rect_offset, int(d.top() * resize_rate) - rect_offset), \
                (int(d.right() * resize_rate) + rect_offset, int(d.bottom() * resize_rate) + rect_offset), (255, 255, 255), -1)

        for shape_point_count in range(shape.num_parts):
            shape_point = shape.part(shape_point_count)
            if shape_point_count < 17: # [0-16]:輪郭
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 0, 255), -1)
            elif shape_point_count < 22: # [17-21]眉(右)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 255, 0), -1)
            elif shape_point_count < 27: # [22-26]眉(左)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 0, 0), -1)
            elif shape_point_count < 31: # [27-30]鼻背
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 255, 255), -1)
            elif shape_point_count < 36: # [31-35]鼻翼、鼻尖
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 255, 0), -1)
            elif shape_point_count < 42: # [36-4142目47)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (255, 0, 255), -1)
            elif shape_point_count < 48: # [42-47]目(左)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 0, 128), -1)
            elif shape_point_count < 55: # [48-54]上唇(上側輪郭)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 128, 0), -1)
            elif shape_point_count < 60: # [54-59]下唇(下側輪郭)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (128, 0, 0), -1)
            elif shape_point_count < 65: # [60-64]上唇(下側輪郭)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (0, 128, 255), -1)
            elif shape_point_count < 68: # [65-67]下唇(上側輪郭)
                cv2.circle(frame, (int(shape_point.x * resize_rate), int(shape_point.y * resize_rate)), 2, (128, 255, 0), -1)

    cv2.imshow('face landmark detector', frame)

    c = cv2.waitKey(50) & 0xFF

    if c==27: # ESC
        break

video_input.release()
cv2.destroyAllWindows()

以上。

8
7
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
8
7