0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?

More than 1 year has passed since last update.

lidar-centerpoint-tvmをpython化

Last updated at Posted at 2023-06-13

autowarefoundationにあるautoware.universeのモジュールlidar-centerpoint-tvmをデバックするため、python化しました。ソースは
https://github.com/tohmae/lidar-centerpoint-tvm-python
にあります。以下説明です。

1.点群のVoxelを計算(前処理)

def pointsToVoxel(pc_msg):
    grid_size = config.grid_size_z_ * config.grid_size_y_ * config.grid_size_x_
    coord_to_voxel_idx = np.repeat(-1, grid_size)

    df_pc = pd.DataFrame(pc_msg,columns=['x','y','z'])
    df_pc['timelag'] = timelag
    df_pc['coord_x'] = ((df_pc['x'] - config.range_[0]) * config.recip_voxel_size_[0]).astype('int32')
    df_pc['coord_y'] = ((df_pc['y'] - config.range_[1]) * config.recip_voxel_size_[1]).astype('int32')
    df_pc['coord_z'] = ((df_pc['z'] - config.range_[2]) * config.recip_voxel_size_[2]).astype('int32')
    
    df_pc = df_pc[(df_pc['coord_x']>=0) & (df_pc['coord_x'] < config.grid_size_[0])]
    df_pc = df_pc[(df_pc['coord_y']>=0) & (df_pc['coord_y'] < config.grid_size_[1])]
    df_pc = df_pc[(df_pc['coord_z']>=0) & (df_pc['coord_z'] < config.grid_size_[2])]
    
    df_pc['coord_idx'] = df_pc['coord_z'] * config.grid_size_y_ * config.grid_size_x_ +df_pc['coord_y'] * config.grid_size_x_ + df_pc['coord_x']
    
    np.random.seed(seed=32)
    df_pc['random'] = np.random.rand(len(df_pc))
    df_pc['rank'] = df_pc.groupby(['coord_idx'])['random'].rank(method='first')
    df_pc = df_pc[df_pc['rank']<=32]
    
    df_num_points_per_voxel = df_pc['coord_idx'].value_counts().reset_index()
    df_num_points_per_voxel.columns = ['coord_idx', 'point_cnt']
    df_num_points_per_voxel['voxel_idx'] = df_num_points_per_voxel.index
    df_num_points_per_voxel = df_num_points_per_voxel[['voxel_idx','coord_idx','point_cnt']]
    voxel_cnt = len(df_num_points_per_voxel)
    
    df_pc = pd.merge(df_pc, df_num_points_per_voxel, how='inner', on='coord_idx')
    df_coordinate_tmp = df_pc[~df_pc.duplicated(subset='voxel_idx')].sort_values('voxel_idx')[['voxel_idx', 'coord_z','coord_y', 'coord_x']].reset_index(drop=True)

    df_coordinate = pd.DataFrame(np.arange(0, config.max_voxel_size),columns=['voxel_idx'])
    df_coordinate = pd.merge(df_coordinate, df_coordinate_tmp, how='left', on='voxel_idx')
    df_coordinate = df_coordinate.fillna(-1)
    df_coordinate = df_coordinate.astype('int32')

    df_voxel = pd.DataFrame(np.repeat(np.arange(0,config.max_voxel_size), config.max_point_in_voxel_size),columns=['voxel_idx'])
    df_voxel['rank'] = np.tile(np.arange(1,config.max_point_in_voxel_size+1), config.max_voxel_size)
    df_voxel = pd.merge(df_voxel, df_pc, how='left', on=['voxel_idx', 'rank'])
    nancols = ['x','y','z','timelag']
    for nancol in nancols:
        df_voxel[nancol] = df_voxel[nancol].fillna(0)
    df_voxel = df_voxel[['voxel_idx','rank','x','y','z','timelag']]
    
    return df_pc, df_voxel, df_coordinate, df_num_points_per_voxel, voxel_cnt

2. 各点のfeatureを計算(前処理)

def generatefeature(df_pc):
    df_pc_mean = df_pc[['x','y','z', 'voxel_idx']].groupby('voxel_idx').mean().reset_index()
    df_pc_mean.columns = ['voxel_idx','x_mean', 'y_mean', 'z_mean']
    
    df_pc['x_offset'] = df_pc['coord_x'] * config.voxel_size_x_ + config.offset_x_
    df_pc['y_offset'] = df_pc['coord_y'] * config.voxel_size_y_ + config.offset_y_
    
    df_feature = pd.merge(df_pc, df_pc_mean, on='voxel_idx')
    
    df_feature['feature_0'] = df_feature['x']
    df_feature['feature_1'] = df_feature['y']
    df_feature['feature_2'] = df_feature['z']
    df_feature['feature_3'] = df_feature['timelag']

    df_feature['feature_4'] = df_feature['x'] - df_feature['x_mean']
    df_feature['feature_5'] = df_feature['y'] - df_feature['y_mean']
    df_feature['feature_6'] = df_feature['z'] - df_feature['z_mean']

    df_feature['feature_7'] = df_feature['x'] - df_feature['x_offset']
    df_feature['feature_8'] = df_feature['y'] - df_feature['y_offset']
    
    df_feature_new = pd.DataFrame(np.repeat(np.arange(0,config.max_voxel_size), config.max_point_in_voxel_size),columns=['voxel_idx'])
    df_feature_new['rank'] = np.tile(np.arange(1,config.max_point_in_voxel_size+1), config.max_voxel_size)
    df_feature_new = pd.merge(df_feature_new, df_feature, how='left', on=['voxel_idx', 'rank'])

    feature_cols = ['feature_{}'.format(i) for i in range(9)]
    for nancol in feature_cols:
        df_feature_new[nancol] = df_feature_new[nancol].fillna(0)
    features = df_feature_new[['feature_{}'.format(i) for i in range(9)]].values.reshape(40000,32,9).astype('float32')
    return df_feature, features

3. TVMのモデルを読み込み(モデル処理)

def generatemodule(model_dir):
    loaded_lib = tvm.runtime.load_module(model_dir + '/deploy_lib.so')
    print("deploy loaded")
    loaded_json = open(model_dir + '/deploy_graph.json').read()
    print("json loaded")
    loaded_params = bytearray(open(model_dir + '/deploy_param.params', "rb").read())
    print("params loaded")
    
    module = graph_runtime.create(loaded_json, loaded_lib, tvm.cpu(0))
    module.load_params(loaded_params)
    
    return module

4. TVMの関数を読み込み(モデル処理)

def generatefunction(model_dir):
    lib = tvm.runtime.load_module(model_dir + '/preprocess.so')
    print("deploy loaded")
    fun = lib["scatter"]
    return fun

5. TVM処理(モデル処理)

def encoder(features):
    # VE_IE
    module_en.set_input('input_features', tvm.nd.array(features))
    module_en.run()
    output_en = module_en.get_output(0)
    
    return output_en


def scatter_ie(input_0, coordinate):
    output_sc_np = np.array(np.random.random(size=(1,32,560,560)),dtype=np.float32)
    
    input_1: tvm.runtime.NDArray = tvm.nd.array(coordinates)
    output_sc: tvm.runtime.NDArray = tvm.nd.array(output_sc_np)
    scatter(input_0, input_1, output_sc)
    
    return output_sc

def backbone(input_bk):
    module_bk.set_input('spatial_features', input_bk)
    module_bk.run()
    
    output_cols = ['heatmap','offset','z','dim','rot','vel']
    outputs = {}
    for i, output_col in enumerate(output_cols):
        outputs[output_col] = module_bk.get_output(i).asnumpy()[0]
        
    return outputs

def TSP_pipeline(features, coordinates):
    output_en = encoder(features)
    output_sc = scatter_ie(output_en, coordinates)
    output = backbone(output_sc)
    
    return output

6. 3Dbox作成(後処理)

def generateBoxes3D(outputs):
    max_score = np.max(sigmoid(outputs['heatmap']), axis=0)
    label = np.argmax(sigmoid(outputs['heatmap']), axis=0)
    
    offset_x = outputs['offset'][0,:,:]
    offset_y = outputs['offset'][1,:,:]
    y_i = np.repeat(np.arange(0, config.grid_size_x_), config.grid_size_y_).reshape((config.grid_size_x_, config.grid_size_x_))
    x_i = y_i.T

    x = config.voxel_size_x_ * (offset_x + x_i) + config.range_min_x_
    y = config.voxel_size_y_ * (offset_y + y_i) + config.range_min_y_
    
    z = outputs['z'][0]
    
    w = outputs['dim'][0]
    l = outputs['dim'][1]
    h = outputs['dim'][2]
    yaw_sin = outputs['rot'][0]
    yaw_cos = outputs['rot'][1]
    yaw_norm = np.sqrt(yaw_sin * yaw_sin + yaw_cos * yaw_cos)
    vel_x = outputs['vel'][0]
    vel_y = outputs['vel'][1]
    
    score = np.where(yaw_norm >= config.yaw_norm_threshold, max_score, 0.0)
    length = np.exp(l)
    width = np.exp(w)
    height = np.exp(h)
    yaw = np.arctan2(yaw_sin, yaw_cos)
    
    boxes3d = np.stack([label, score, x, y, z, length, width, height, yaw, vel_x, vel_y])

    num_det_boxes3d = np.sum(max_score > config.score_threshold)
    
    box3d_columns = ['label','score','x','y','z','length','width','height','yaw','vel_x','vel_y']
    index = np.where(boxes3d[1,:,:] > config.score_threshold)
    df_det_boxes3d_nonms = pd.DataFrame(columns=box3d_columns)
    for i, colnm in enumerate(box3d_columns):
        df_det_boxes3d_nonms[colnm] = boxes3d[i,:,:][index]
    
    df_det_boxes3d_nonms = df_det_boxes3d_nonms.sort_values('score', ascending=False).reset_index(drop=True)
    
    num_to_keep, keep_mask = circleNMS(df_det_boxes3d_nonms)
    
    box3d_columns = ['label','score','x','y','z','length','width','height','yaw','vel_x','vel_y']
    df_det_boxes3d = pd.DataFrame(columns=box3d_columns)
    for idx, mask in enumerate(keep_mask):
        if mask:
#        print(idx,df_det_boxes3d_nonms.loc[idx])
#        df_det_boxes3d.append(df_det_boxes3d_nonms.loc[idx],ignore_index=True)
            df_det_boxes3d = pd.concat([df_det_boxes3d, pd.DataFrame(df_det_boxes3d_nonms.loc[idx]).T])
#            print(idx)
    
    return df_det_boxes3d_nonms, df_det_boxes3d

7. 認識したobjを出力

def dumpDetectionsAsMech(objs, output_path):
    index = 0
    num_detections = len(objs)
    vertices_stream = []
    faces_stream = []
    with open(output_path, 'w') as f:
        print("ply", file=f)
        print("format ascii 1.0", file=f)
        print("comment created by lidar_centerpoint", file=f)
        print("element vertex {}".format(8*num_detections), file=f)
        print("property float x", file=f)
        print("property float y", file=f)
        print("property float z", file=f)
        print("element face {}".format(12*num_detections), file=f)
        print("property list uchar uint vertex_indices", file=f)
        print("end_header", file=f)
        
        for obj in objs:
            position = obj.kinematics.pose_with_covariance.position
            orientation = obj.kinematics.pose_with_covariance.orientation
            shape = obj.shape
            
            pose_affine = np.eye(4)
            R = o3d.geometry.get_rotation_matrix_from_quaternion([orientation.w, orientation.x, orientation.y, orientation.z])

            pose_affine[:3,:3] = R
            pose_affine[0,3] = position.x
            pose_affine[1,3] = position.y
            pose_affine[2,3] = position.z
            
            vertices = getVertices(shape, pose_affine)

            for vertex in vertices:
                vertices_stream.append("{} {} {}".format(vertex[0], vertex[1], vertex[2]))
            
            
            faces_stream.append(streamFace(index + 1, index + 3, index + 4))
            faces_stream.append(streamFace(index + 3, index + 5, index + 6))
            faces_stream.append(streamFace(index + 0, index + 7, index + 5))
            faces_stream.append(streamFace(index + 7, index + 2, index + 4))
            faces_stream.append(streamFace(index + 5, index + 3, index + 1))
            faces_stream.append(streamFace(index + 7, index + 0, index + 2))
            faces_stream.append(streamFace(index + 2, index + 1, index + 4))
            faces_stream.append(streamFace(index + 4, index + 3, index + 6))
            faces_stream.append(streamFace(index + 5, index + 7, index + 6))
            faces_stream.append(streamFace(index + 6, index + 7, index + 4))
            faces_stream.append(streamFace(index + 0, index + 5, index + 1))
            index += 8
        print('\n'.join(vertices_stream),file=f)
        print('\n'.join(faces_stream),file=f)

8. 実行結果

スクリーンショット 2023-06-15 22.09.33.png

0
0
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
0
0

Delete article

Deleted articles cannot be recovered.

Draft of this article would be also deleted.

Are you sure you want to delete this article?