2
2

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

More than 1 year has passed since last update.

Python OpenCVを使ったWebカメラ録画

Last updated at Posted at 2022-03-16

Python OpenCVを使ったWebカメラ録画

OpenCVとは

OpenCV (Open Source Computer Vision Library)は、画像や動画を処理するさまざまな機能が実装されているライブラリです。intelが開発・公開し、2009年にWillow Garageに開発が移され、その後Itseezに移されました。Pyhon, C++など様々なプログラミング言語でライブラリが用意されています。

主な機能一覧

画像・動画の基本的な処理や応用的なものがある。

  • コア機能(core) - 基本処理を行う。
  • 画像処理(imgproc)- 線形/非線形の画像フィルタリング、幾何学的画像変換などの画像処理モジュール。
  • ビデオ分析(video)- モーション推定、バックグラウンド減算などのビデオ分析モジュール。
  • カメラ校正と3D再構成(calib3d)- 基本的なマルチビュージオメトリアルゴリズム、3D再構成の要素など。
  • 2D Features Framework(features2d)- 特徴検出器、記述子など。
  • オブジェクト検出(objdetect)- 事前定義された物体検出。
  • その他多数のモジュール

インストール

Pythonの場合、opencv–pythonをpip installする。importはcv2で行う。

pip install opencv–python
import cv2

基本的な画像処理

import numpy as np
import cv2 as cv
import sys

### 1. 画像読み込んで表示して、pngで保存
img = cv.imread(cv.samples.findFile("sample.jpg"))
if img is None:
    sys.exit("Could not read the image.")
cv.imshow("Display window", img)
k = cv.waitKey(0)
if k == ord("s"):
    cv.imwrite("output.jpg", img)
cv.destroyAllWindows()

### 2. 黒い画像を作って、いろいろ描写
import numpy as np
import cv2 as cv
# Create a black image
img = np.zeros((512,512,3), np.uint8)
cv.line(img,(0,0),(511,511),(255,0,0),5)
cv.rectangle(img,(384,0),(510,128),(0,255,0),3)
cv.circle(img,(447,63), 63, (0,0,255), -1)
cv.ellipse(img,(256,256),(100,50),0,0,180,255,-1)
pts = np.array([[10,5],[20,30],[70,20],[50,10]], np.int32)
pts = pts.reshape((-1,1,2))
cv.polylines(img,[pts],True,(0,255,255))
font = cv.FONT_HERSHEY_SIMPLEX
cv.putText(img,'Sample text',(10,500), font, 4,(255,255,255),2,cv.LINE_AA)
cv.imwrite("output.jpg", img)
cv.destroyAllWindows()

### 3. マウスダブルクリックで円を描写
events = [i for i in dir(cv) if 'EVENT' in i]
def draw_circle(event,x,y,flags,param):
    if event == cv.EVENT_LBUTTONDBLCLK:
        cv.circle(img,(x,y),100,(255,0,0),-1)

#['EVENT_FLAG_ALTKEY', 'EVENT_FLAG_CTRLKEY', 'EVENT_FLAG_LBUTTON', 'EVENT_FLAG_MBUTTON', 
#'EVENT_FLAG_RBUTTON', 'EVENT_FLAG_SHIFTKEY', 'EVENT_LBUTTONDBLCLK', 'EVENT_LBUTTONDOWN', 
#'EVENT_LBUTTONUP', 'EVENT_MBUTTONDBLCLK', 'EVENT_MBUTTONDOWN', 'EVENT_MBUTTONUP', 'EVENT_MOUSEHWHEEL', 
#'EVENT_MOUSEMOVE', 'EVENT_MOUSEWHEEL', 'EVENT_RBUTTONDBLCLK', 'EVENT_RBUTTONDOWN', 'EVENT_RBUTTONUP']

img = np.zeros((512,512,3), np.uint8)
cv.namedWindow('image')
cv.setMouseCallback('image',draw_circle)
while(1):
    cv.imshow('image',img)
    if cv.waitKey(20) & 0xFF == 27: # Esc
        break
cv.imwrite("input/draw_circle.png", img)
cv.destroyAllWindows()

### 4. 色を変えるトラッカーを表示する
import numpy as np
import cv2 as cv
def nothing(x):
    pass
img = np.zeros((300,512,3), np.uint8)
cv.namedWindow('image')
cv.createTrackbar('R','image',0,255,nothing)
cv.createTrackbar('G','image',0,255,nothing)
cv.createTrackbar('B','image',0,255,nothing)
switch = '0 : OFF \n1 : ON'
cv.createTrackbar(switch, 'image',0,1,nothing)
while(1):
    cv.imshow('image',img)
    k = cv.waitKey(1) & 0xFF
    if k == 27:
        break
    # get current positions of four trackbars
    r = cv.getTrackbarPos('R','image')
    g = cv.getTrackbarPos('G','image')
    b = cv.getTrackbarPos('B','image')
    s = cv.getTrackbarPos(switch,'image')
    if s == 0:
        img[:] = 0
    else:
        img[:] = [b,g,r]
cv.destroyAllWindows()

基本的な動画処理

動画処理は、フレーム毎に画像処理を施す。

### 1. 動画を読み込んでもしくはWebカメラから動画を1 frameずつ取得して、保存していく
import numpy as np
import cv2 as cv
import glob
import re
import shutil
import os
cap = cv.VideoCapture('vtest.avi')
# Webカメラを使う場合は以下
#cap = cv.VideoCapture(0)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('output.avi', fourcc, 20.0, (640,  480))

if not cap.isOpened():
    print("Cannot open camera")
    exit()
if(os.path.isdir('input/frame') == True):
    shutil.rmtree('input/frame')
os.makedirs('input/frame', exist_ok=True)
i = 0
while True:
    # Capture frame-by-frame
    ret, frame = cap.read()
    cv.imwrite("input/frame/image%s.png"%i, frame)
    if not ret:
        print("Can't receive frame (stream end?). Exiting ...")
        break
    frame = cv.flip(frame, 0)
    out.write(frame)
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    cv.imshow('frame', frame)
    if cv.waitKey(1) == ord('q'):
        break
    i += 1
cap.release()
out.release()
cv.destroyAllWindows()

### 2. 背景差分法を用いて、動いている部分を特定する。
import cv2 as cv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, help='Path to a video or a sequence of image.', default='vtest.avi')
parser.add_argument('--algo', type=str, help='Background subtraction method (KNN, MOG2).', default='MOG2')
args = parser.parse_args()
if args.algo == 'MOG2':
    backSub = cv.createBackgroundSubtractorMOG2()
else:
    backSub = cv.createBackgroundSubtractorKNN()

capture = cv.VideoCapture('vtest.avi')
#capture = cv.VideoCapture(0)
fps = int(capture.get(cv.CAP_PROP_FPS))
w = int(capture.get(cv.CAP_PROP_FRAME_WIDTH))
h = int(capture.get(cv.CAP_PROP_FRAME_HEIGHT)) 
fourcc = cv.VideoWriter_fourcc(*'mp4v')
out1 = cv.VideoWriter('output1.mp4', fourcc, 20.0, (640,  480))
out2 = cv.VideoWriter('output2.mp4', fourcc, 20.0, (640,  480))

ret, frame = capture.read()
starttime = capture.get(cv.CAP_PROP_POS_MSEC)
time = 8
roop = int(fps * time)
for i in range(roop):
    ret, frame = capture.read()
    fgMask = backSub.apply(frame)
    cv.rectangle(frame, (w - 80, h - 40), (w - 10, h - 10), (255,255,255), -1) # 白枠
    cv.putText(frame, '{:.1f}s'.format((capture.get(cv.CAP_PROP_POS_MSEC) - starttime)/1000), (w - 80, h - 20), 
               cv.FONT_HERSHEY_SIMPLEX, 0.5 , (0,0,255)) # 文字
    cv.imshow('Frame', frame)
    cv.imshow('FG Mask', fgMask)
    keyboard = cv.waitKey(30)
    #out1.write(frame)
    out2.write(fgMask)
    if keyboard == 'q' or keyboard == 27: # Esc
        break

capture.release()
out1.release()
out2.release()
cv.destroyAllWindows()

# 3. 特徴的な光を起点として、その動きを追う
import numpy as np
import cv2 as cv
import argparse

cap = cv.VideoCapture('vtest.avi')
#cap = cv.VideoCapture(0)
feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.3,
                       minDistance = 7,
                       blockSize = 7 )
lk_params = dict( winSize  = (15, 15),
                  maxLevel = 2,
                  criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
color = np.random.randint(0, 255, (100, 3))
ret, old_frame = cap.read()
old_gray = cv.cvtColor(old_frame, cv.COLOR_BGR2GRAY)
p0 = cv.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
mask = np.zeros_like(old_frame)
while(True):
    ret, frame = cap.read()
    frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    p1, st, err = cv.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
    if p1 is not None:
        good_new = p1[st==1]
        good_old = p0[st==1]
    for i, (new, old) in enumerate(zip(good_new, good_old)):
        a, b = new.ravel()
        c, d = old.ravel()
        mask = cv.line(mask, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2)
        frame = cv.circle(frame, (int(a), int(b)), 5, color[i].tolist(), -1)
    img = cv.add(frame, mask)
    cv.imshow('frame', img)
    k = cv.waitKey(30) & 0xff
    if k == 27:
        break
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1, 1, 2)

cv.destroyAllWindows()

4. 複数の動画を結合する

import os
import sys
from random import randint
import time
import datetime
import glob
import cv2 as cv

def comb_movie(movie_files,out_path):
    movie = cv.VideoCapture(movie_files[0])
    fourcc = int(movie.get(cv.CAP_PROP_FOURCC))
    fourcc = fourcc.to_bytes(4, 'little').decode('utf-8')
    print('コーデック : ', fourcc)
    fps = int(movie.get(cv.CAP_PROP_FPS))
    height = int(movie.get(cv.CAP_PROP_FRAME_HEIGHT))
    width = int(movie.get(cv.CAP_PROP_FRAME_WIDTH))
    print(fps,height,width)
    fourcc = cv.VideoWriter_fourcc(*"%s"%fourcc)
    out = cv.VideoWriter(out_path, int(fourcc), fps, (int(width), int(height)))
    for movies in (movie_files):
        print(movies)
        movie = cv.VideoCapture(movies)
        if movie.isOpened() == True: 
            ret, frame = movie.read()
        else:
            ret = False
        while ret:
            out.write(frame)
            ret, frame = movie.read()

now = datetime.datetime.now()
today = now.strftime('%Y-%m-%d')
out_path = "./input/%s_merge.avi"%today

try:
    os.remove(out_path)
except:
    pass

files = sorted(glob.glob("*.avi"))
comb_movie(files, out_path)
2
2
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
2
2

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?