keras
tensorflowのラッパーであるkerasを用いてセマンティックセグメンテーションをおこなう。
学習環境
OS | ubuntu 16.04 LTS |
GPU | ELSA GeForce GTX 1070 8GB S.A.C |
使用データ
実装
# coding:utf-8
# Plan-Actionモデル
import numpy as np
import cv2
import cnf
import math
import h
import h_inv
from submain import Particle
from submain import Rect
import submain
import os.path
import argparse
import sys
from multiprocessing import Pool
import skvideo.io
# argvs = sys.argv # コマンドライン引数を格納したリストの取得
# argc = len(argvs)
# parser = argparse.ArgumentParser()
# parser.add_argument('--model', type=str, required=True)
# parser.add_argument('--prob', type=str, required=True)
# parser.add_argument('--fluc', type=float, required=True)
# parser.add_argument('--Dnum', type=int, required=True)
# args = parser.parse_args()
Dataset = 1
img_h = 576
img_w = 720
rect_h = 50
rect_w = 50
v_mean = 0.7
v_std = 0.1
# ---------------------------------------------------------------テストデータ(停止している歩行者を除いたもの)
pedestrian_list = []
move_ped = np.loadtxt("Dataset_Hotel/move_ped.txt")
for i in range(len(move_ped)):
if move_ped[i] > 320:
pedestrian_list.append(move_ped[i])
if move_ped[i] > 420:
break
pedestrian_list = map(int,pedestrian_list)
# print pedestrian_list
# ----------------------------------------------------------------
obs = np.loadtxt("Dataset_Hotel/obsmat.txt")#歩行者データ
dest = np.loadtxt("Dataset_Hotel/destinations.txt")#目的地データ
# flag=1
def Tracking(pedestrian):
# for pedestrian in pedestrian_list:
# for pedestrian in [100]:
# print "a"
proposed = []#描画用
overlaprate_list = []#オーバーラップ計算用
print ("=====ID"+str(pedestrian)+"=====")
obs_true = np.loadtxt("Dataset_Hotel/ID_data/ID_%04d"%pedestrian+".txt")#ID別歩行者データの読み込み
rect = Rect()
p = []
for i in range(Particle.PN):
p.append(Particle())
p_mean = Particle()
i = 0#対象IDの行番号取得のため
# fgbg = cv2.BackgroundSubtractorMOG2()# 背景差分計算用オブジェクトの生成
fgbg = cv2.createBackgroundSubtractorMOG2()
cap = cv2.VideoCapture("home/nakaishi/tracking/Dataset_Hotel/seq.avi")
# cap = skvideo.io.VideoCapture("Dataset_Hotel/seq.avi")
ret, frame = cap.read()
# cap = cv2.VideoCapture(0)
print(cap.isOpened())
# print(cap)
# if flag==0:
# break
cap.set(cv2.CAP_PROP_POS_MSEC,obs_true[0,0]/25.0*1000.0)#動画開始地点を指定
while(cap.isOpened()):#qを押してカメラをreleaseしないと次回起動しなくなる
frame_num = cap.get(cv2.CAP_PROP_POS_FRAMES)
print("a")
# print cap.get(0)
# cap.set(0,obs_true[0,0]/25.0*1000.0)
ret, img_src = cap.read()
img_dst = img_src
#---------------------------------------------------------------------------背景差分
fgmask = fgbg.apply(img_src, learningRate=0.01)
img_dst = img_src.copy()
img_dst = cv2.bitwise_and(img_src, img_src, mask=fgmask)
#----------------------------------------------------------------------------------
rect.y,rect.x = int(np.round(h_inv.h_inv(obs_true[1,2],obs_true[1,4])[0])),int(np.round(h_inv.h_inv(obs_true[1,2],obs_true[1,4])[1]))
rect.y = int(np.round(rect.y))
rect.x = int(np.round(rect.x))
rect.h = rect_h
rect.w = rect_w
y = np.round(rect.y-rect_h/2)
x = np.round(rect.x-rect_w/2)
if frame_num >= obs_true[0,0]:#動画のはじめから
if i < obs_true.shape[0] and obs_true[i,0] == frame_num:
if rect.selectObject and obs_true[1,0] == frame_num:
hsv_img = cv2.cvtColor(img_dst, cv2.COLOR_BGR2HSV)
tmp_hist,hist_x,hist_y = rect.CalcHSVHist_tmp(x, y, rect.w, rect.h, hsv_img)
roi_img = img_dst[rect.y-rect.h/2:rect.y+rect.h/2,rect.x-rect.w/2:rect.x+rect.w/2]
rect.bx,rect.by = obs_true[1-1,2],obs_true[1-1,4]
rect.x,rect.y = obs_true[1,2],obs_true[1,4]
rect.selectObject = False
if rect.trackObject==True and rect.startTracking==False and frame_num == obs_true[1,0]:
rect.InitParticleSet(p)
rect.startTracking = True
if rect.startTracking==True:
rect.PredictionParticleSet(p, frame_num, img_dst, v_mean, v_std,obs_true)
rect.CalcParticleWeight(p, img_dst, tmp_hist, hist_x, hist_y, frame_num)
rect.ResampleParticleSet(p)
rect.CalcMeanParticleSet(p, p_mean)
# rect.DrawParticleSet(p, p_mean,obs_true[i], img_dst)
rect.CalcOverlap(p_mean,obs_true[i],overlaprate_list)
# rect.changeVelocity(p)
# rect.DrawLocus(img_dst,p_mean,obs_true[i],i,pmlist,pmlist_random,obslist,Model)
i+=1
# cv2.imshow("Movie",img_dst)
# if cv2.waitKey(10) & 0xFF == ord('q'):
# flag = 0
# break
if frame_num == obs_true[obs_true.shape[0]-1,0]:#対象のデータ取得が終了
break
cap.release()
cv2.destroyAllWindows()
# print overlaprate_list
return overlaprate_list
# def hoge():
p = Pool(18)
o = p.map(Tracking, pedestrian_list)
ol = []
for i in range(len(o)):
for j in range(len(o[i])):
ol.append(o[i][j])
# print o.dtype
# hoge()
# print len(o),o[0],len(o[1]),len(o[2])
# np.savetxt("overlap_test.txt",ol)