0
1

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

nerfstudioでcolmapによるカメラの軌跡を得たときに、カメラの軌跡をxz平面上に移動する

Posted at

概要

nerfstudioである動画の3次元点群を作りたくなったとき、colmapを使用してカメラの軌跡を得ることがある。
ある物体に対して高さを変えずに撮影した場合、colmapでカメラの軌跡を作ってもカメラの軌跡がxz平面上に載ることがない。
点群の後処理をするときに不便なので、colmapを使用してカメラの軌跡を得たあとに、カメラの軌跡がxz平面上に移動するコードを書いた。

おおまかなアルゴリズム

  1. カメラの軌跡の位置座標から固有値・固有ベクトルを使って平面を推定する
  2. ロドリゲスの回転公式より回転行列を求める
  3. 回転行列を使ってカメラの位置情報を変える(ついでにカメラの向き情報・colmapで得られたsparse_pc.plyも変える)

実装

import json
import numpy as np
import copy
import open3d as o3d

def estimate_plane(points):
    com = np.sum(points, axis=0) / len(points)
    # 重心を計算
    q = points - com
    # 重心を原点に移動し、同様に全点群を平行移動する  pythonのブロードキャスト機能使用
    Q = np.dot(q.T, q)
    # 3x3行列を計算する 行列計算で和の形になるため総和になる
    la, vectors = np.linalg.eig(Q)
    # 固有値、固有ベクトルを計算 固有ベクトルは縦のベクトルが横に並ぶ形式
    plane_v = vectors.T[np.argmin(la)]

    return plane_v

def find_rotation_matrix(n, n_target):
    axis = np.cross(n, n_target)
    sin_theta = np.linalg.norm(axis)
    cos_theta = np.dot(n, n_target) / (np.linalg.norm(n) * np.linalg.norm(n_target))
    axis = axis / sin_theta
    K = np.array([
        [0, -axis[2], axis[1]], 
        [axis[2], 0, -axis[0]], 
        [-axis[1], axis[0], 0]
    ])
    R = np.eye(3) + sin_theta * K + (1 - cos_theta) * np.dot(K, K)
    return R

def project_points(points, R):
    return np.dot(points, R.T)

def calc_new_points(points, R = None, average_point = None):
    coeffs = estimate_plane(points)
    normal_vector = coeffs[:3]

    n_target = np.array([0., 1., 0.])

    if R is None:
        R = find_rotation_matrix(normal_vector, n_target)

    projected_points = project_points(points, R)

    if average_point is None:
        average_point = np.average(projected_points, axis=0)
    projected_points -= average_point

    return projected_points, R, average_point

def calc_new_axis(axis, R, average_point):
    projected_axis = project_points(axis, R)
    projected_axis -= average_point
    return projected_axis

if __name__ == "__main__":
    path = "transforms.json"
    new_path = "new_transforms.json"
    ply_path = "sparse_pc.ply"
    new_ply_path = "new_sparse_pc.ply"

    with open(path) as f:
        transform_json = json.load(f)
    
    ply = o3d.io.read_point_cloud(ply_path)
    ply_points = np.asarray(ply.points)

    points = []
    xaxis = []
    yaxis = []
    zaxis = []

    for frame in transform_json["frames"]:
        points.append([frame["transform_matrix"][0][-1], frame["transform_matrix"][1][-1], frame["transform_matrix"][2][-1]])
        xaxis.append([frame["transform_matrix"][0][0], frame["transform_matrix"][1][0], frame["transform_matrix"][2][0]])
        yaxis.append([frame["transform_matrix"][0][1], frame["transform_matrix"][1][1], frame["transform_matrix"][2][1]])
        zaxis.append([frame["transform_matrix"][0][2], frame["transform_matrix"][1][2], frame["transform_matrix"][2][2]])


    points = np.array(points)
    xaxis = np.array(xaxis)
    yaxis = np.array(yaxis)
    zaxis = np.array(zaxis)

    projected_points, R, average_point = calc_new_points(points)
    projected_xaxis = calc_new_axis(xaxis, R, average_point)
    projected_yaxis = calc_new_axis(yaxis, R, average_point)
    projected_zaxis = calc_new_axis(zaxis, R, average_point)
    projected_ply_points, _, _ = calc_new_points(ply_points, R, average_point)


    new_transform_json = copy.deepcopy(transform_json)
    for frame, point, x, y, z in zip(new_transform_json["frames"], projected_points, projected_xaxis, projected_yaxis, projected_zaxis):
        frame["transform_matrix"][0] = [x[0], y[0], z[0], point[0]]
        frame["transform_matrix"][1] = [x[1], y[1], z[1], point[1]]
        frame["transform_matrix"][2] = [x[2], y[2], z[2], point[2]]
    
    with open(new_path, "w") as f:
        json.dump(new_transform_json, f, indent=4)
    
    ply.points = o3d.utility.Vector3dVector(projected_ply_points)
    o3d.io.write_point_cloud(new_ply_path, ply)

    camera = o3d.geometry.PointCloud()
    camera.points = o3d.utility.Vector3dVector(projected_points)

0
1
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
1

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?