はじめに
Unitreeの4脚ロボットgo2の歩容取得はGenesisのexampleにあり、以下の記事でその手順が紹介されています。
今回は、デモのgo2をMITのMini Cheetahに変更して、学習して見ようと思います。
Mini Cheetah
MITのMini Cheetahは初めて4脚ロボットでバックフリップをしたロボットみたいです。
個人的に、オープンソースのMini CheetahのソフトウェアであるCheetah Softwareで4脚ロボットの勉強を始めたため、今回はMini Cheetahを選びました。
使用環境
ubuntu24.04
Python3.11
GPU:RTX3070
Genesisのセットアップに関しては以前記事を書いたので、そちらを参照してください。(宣伝)
学習
go2の学習のデモを少し変更し、Mini Cheetahの学習を行います。
go2の学習には強化学習環境のgo2_env.py
、学習を行うgo2_train.py
、動作確認を行うgo2_eval.py
の3つのファイルがあり、これらを変更します。
URDFの変更
Mini Cheetahのurdfには以下のリポジトリのものを使用します。
変更点として、mini_cheetah.urdfのmesh/~
を../mesh/~
に変更しました。
また、urdfが正しいかチェックするためにGenesisのURDFビュワー機能を使います。
gs view mini_cheetah.urdf
これでURDFが正しいことを確認できました。
参考ポスト
Genesis の URDFビューワー機能で、 ロボットOSS「Meridian」で使用するヒューマノイドRoid1のURDFモデルを描画してみた
— holypong (@automo_emo) January 1, 2025
ROSの「rqt」みたくGUIで軸操作が可能
Tkinterのインストールが必要:
sudo apt-get install python3-tk
gs view <filename>#rosjp#Meridian計画https://t.co/XDzdxrulx3 pic.twitter.com/schkRsAJMu
続いてgo2_env.py
をコピーして新しくmini_cheetah_env.py
を作ります。
以下は変更点になります。
gs.morphs.URDF(
# file="urdf/go2/urdf/go2.urdf",
file="Mini_Cheetah_test/mini_cheetah_urdf/urdf/mini_cheetah.urdf",
pos=self.base_init_pos.cpu().numpy(),
quat=self.base_init_quat.cpu().numpy(),
),
gs.morphs.URDF
のfile
をurdfのパスに変更してください。
(上記ではMini_Cheetah_test/mini_cheetah_urdf/urdf/
にmini_cheetah.urdf
を配置)
また、クラス名をGo2Env
からMiniCheetahEnv
に変更しています。
以下はmini_cheetahの強化学習環境であるmini_cheetah_env.py
です。
import torch
import math
import genesis as gs
from genesis.utils.geom import quat_to_xyz, transform_by_quat, inv_quat, transform_quat_by_quat
def gs_rand_float(lower, upper, shape, device):
return (upper - lower) * torch.rand(size=shape, device=device) + lower
class MiniCheetahEnv:
def __init__(self, num_envs, env_cfg, obs_cfg, reward_cfg, command_cfg, show_viewer=False, device="cuda"):
self.device = torch.device(device)
self.num_envs = num_envs
self.num_obs = obs_cfg["num_obs"]
self.num_privileged_obs = None
self.num_actions = env_cfg["num_actions"]
self.num_commands = command_cfg["num_commands"]
self.simulate_action_latency = True # there is a 1 step latency on real robot
self.dt = 0.02 # control frequence on real robot is 50hz
self.max_episode_length = math.ceil(env_cfg["episode_length_s"] / self.dt)
self.env_cfg = env_cfg
self.obs_cfg = obs_cfg
self.reward_cfg = reward_cfg
self.command_cfg = command_cfg
self.obs_scales = obs_cfg["obs_scales"]
self.reward_scales = reward_cfg["reward_scales"]
# create scene
self.scene = gs.Scene(
sim_options=gs.options.SimOptions(dt=self.dt, substeps=2),
viewer_options=gs.options.ViewerOptions(
max_FPS=int(0.5 / self.dt),
camera_pos=(2.0, 0.0, 2.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=40,
),
vis_options=gs.options.VisOptions(n_rendered_envs=1),
rigid_options=gs.options.RigidOptions(
dt=self.dt,
constraint_solver=gs.constraint_solver.Newton,
enable_collision=True,
enable_joint_limit=True,
),
show_viewer=show_viewer,
)
# add plain
self.scene.add_entity(gs.morphs.URDF(file="urdf/plane/plane.urdf", fixed=True))
# add robot
self.base_init_pos = torch.tensor(self.env_cfg["base_init_pos"], device=self.device)
self.base_init_quat = torch.tensor(self.env_cfg["base_init_quat"], device=self.device)
self.inv_base_init_quat = inv_quat(self.base_init_quat)
self.robot = self.scene.add_entity(
gs.morphs.URDF(
# file="urdf/go2/urdf/go2.urdf",
file="Mini_Cheetah_test/mini_cheetah_urdf/urdf/mini_cheetah.urdf",
pos=self.base_init_pos.cpu().numpy(),
quat=self.base_init_quat.cpu().numpy(),
),
)
# build
self.scene.build(n_envs=num_envs)
# names to indices
self.motor_dofs = [self.robot.get_joint(name).dof_idx_local for name in self.env_cfg["dof_names"]]
# PD control parameters
self.robot.set_dofs_kp([self.env_cfg["kp"]] * self.num_actions, self.motor_dofs)
self.robot.set_dofs_kv([self.env_cfg["kd"]] * self.num_actions, self.motor_dofs)
# prepare reward functions and multiply reward scales by dt
self.reward_functions, self.episode_sums = dict(), dict()
for name in self.reward_scales.keys():
self.reward_scales[name] *= self.dt
self.reward_functions[name] = getattr(self, "_reward_" + name)
self.episode_sums[name] = torch.zeros((self.num_envs,), device=self.device, dtype=gs.tc_float)
# initialize buffers
self.base_lin_vel = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float)
self.base_ang_vel = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float)
self.projected_gravity = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float)
self.global_gravity = torch.tensor([0.0, 0.0, -1.0], device=self.device, dtype=gs.tc_float).repeat(
self.num_envs, 1
)
self.obs_buf = torch.zeros((self.num_envs, self.num_obs), device=self.device, dtype=gs.tc_float)
self.rew_buf = torch.zeros((self.num_envs,), device=self.device, dtype=gs.tc_float)
self.reset_buf = torch.ones((self.num_envs,), device=self.device, dtype=gs.tc_int)
self.episode_length_buf = torch.zeros((self.num_envs,), device=self.device, dtype=gs.tc_int)
self.commands = torch.zeros((self.num_envs, self.num_commands), device=self.device, dtype=gs.tc_float)
self.commands_scale = torch.tensor(
[self.obs_scales["lin_vel"], self.obs_scales["lin_vel"], self.obs_scales["ang_vel"]],
device=self.device,
dtype=gs.tc_float,
)
self.actions = torch.zeros((self.num_envs, self.num_actions), device=self.device, dtype=gs.tc_float)
self.last_actions = torch.zeros_like(self.actions)
self.dof_pos = torch.zeros_like(self.actions)
self.dof_vel = torch.zeros_like(self.actions)
self.last_dof_vel = torch.zeros_like(self.actions)
self.base_pos = torch.zeros((self.num_envs, 3), device=self.device, dtype=gs.tc_float)
self.base_quat = torch.zeros((self.num_envs, 4), device=self.device, dtype=gs.tc_float)
self.default_dof_pos = torch.tensor(
[self.env_cfg["default_joint_angles"][name] for name in self.env_cfg["dof_names"]],
device=self.device,
dtype=gs.tc_float,
)
self.extras = dict() # extra information for logging
def _resample_commands(self, envs_idx):
self.commands[envs_idx, 0] = gs_rand_float(*self.command_cfg["lin_vel_x_range"], (len(envs_idx),), self.device)
self.commands[envs_idx, 1] = gs_rand_float(*self.command_cfg["lin_vel_y_range"], (len(envs_idx),), self.device)
self.commands[envs_idx, 2] = gs_rand_float(*self.command_cfg["ang_vel_range"], (len(envs_idx),), self.device)
def step(self, actions):
self.actions = torch.clip(actions, -self.env_cfg["clip_actions"], self.env_cfg["clip_actions"])
exec_actions = self.last_actions if self.simulate_action_latency else self.actions
target_dof_pos = exec_actions * self.env_cfg["action_scale"] + self.default_dof_pos
self.robot.control_dofs_position(target_dof_pos, self.motor_dofs)
self.scene.step()
# update buffers
self.episode_length_buf += 1
self.base_pos[:] = self.robot.get_pos()
self.base_quat[:] = self.robot.get_quat()
self.base_euler = quat_to_xyz(
transform_quat_by_quat(torch.ones_like(self.base_quat) * self.inv_base_init_quat, self.base_quat)
)
inv_base_quat = inv_quat(self.base_quat)
self.base_lin_vel[:] = transform_by_quat(self.robot.get_vel(), inv_base_quat)
self.base_ang_vel[:] = transform_by_quat(self.robot.get_ang(), inv_base_quat)
self.projected_gravity = transform_by_quat(self.global_gravity, inv_base_quat)
self.dof_pos[:] = self.robot.get_dofs_position(self.motor_dofs)
self.dof_vel[:] = self.robot.get_dofs_velocity(self.motor_dofs)
# resample commands
envs_idx = (
(self.episode_length_buf % int(self.env_cfg["resampling_time_s"] / self.dt) == 0)
.nonzero(as_tuple=False)
.flatten()
)
self._resample_commands(envs_idx)
# check termination and reset
self.reset_buf = self.episode_length_buf > self.max_episode_length
self.reset_buf |= torch.abs(self.base_euler[:, 1]) > self.env_cfg["termination_if_pitch_greater_than"]
self.reset_buf |= torch.abs(self.base_euler[:, 0]) > self.env_cfg["termination_if_roll_greater_than"]
time_out_idx = (self.episode_length_buf > self.max_episode_length).nonzero(as_tuple=False).flatten()
self.extras["time_outs"] = torch.zeros_like(self.reset_buf, device=self.device, dtype=gs.tc_float)
self.extras["time_outs"][time_out_idx] = 1.0
self.reset_idx(self.reset_buf.nonzero(as_tuple=False).flatten())
# compute reward
self.rew_buf[:] = 0.0
for name, reward_func in self.reward_functions.items():
rew = reward_func() * self.reward_scales[name]
self.rew_buf += rew
self.episode_sums[name] += rew
# compute observations
self.obs_buf = torch.cat(
[
self.base_ang_vel * self.obs_scales["ang_vel"], # 3
self.projected_gravity, # 3
self.commands * self.commands_scale, # 3
(self.dof_pos - self.default_dof_pos) * self.obs_scales["dof_pos"], # 12
self.dof_vel * self.obs_scales["dof_vel"], # 12
self.actions, # 12
],
axis=-1,
)
self.last_actions[:] = self.actions[:]
self.last_dof_vel[:] = self.dof_vel[:]
return self.obs_buf, None, self.rew_buf, self.reset_buf, self.extras
def get_observations(self):
return self.obs_buf
def get_privileged_observations(self):
return None
def reset_idx(self, envs_idx):
if len(envs_idx) == 0:
return
# reset dofs
self.dof_pos[envs_idx] = self.default_dof_pos
self.dof_vel[envs_idx] = 0.0
self.robot.set_dofs_position(
position=self.dof_pos[envs_idx],
dofs_idx_local=self.motor_dofs,
zero_velocity=True,
envs_idx=envs_idx,
)
# reset base
self.base_pos[envs_idx] = self.base_init_pos
self.base_quat[envs_idx] = self.base_init_quat.reshape(1, -1)
self.robot.set_pos(self.base_pos[envs_idx], zero_velocity=False, envs_idx=envs_idx)
self.robot.set_quat(self.base_quat[envs_idx], zero_velocity=False, envs_idx=envs_idx)
self.base_lin_vel[envs_idx] = 0
self.base_ang_vel[envs_idx] = 0
self.robot.zero_all_dofs_velocity(envs_idx)
# reset buffers
self.last_actions[envs_idx] = 0.0
self.last_dof_vel[envs_idx] = 0.0
self.episode_length_buf[envs_idx] = 0
self.reset_buf[envs_idx] = True
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
self.extras["episode"]["rew_" + key] = (
torch.mean(self.episode_sums[key][envs_idx]).item() / self.env_cfg["episode_length_s"]
)
self.episode_sums[key][envs_idx] = 0.0
self._resample_commands(envs_idx)
def reset(self):
self.reset_buf[:] = True
self.reset_idx(torch.arange(self.num_envs, device=self.device))
return self.obs_buf, None
# ------------ reward functions----------------
def _reward_tracking_lin_vel(self):
# Tracking of linear velocity commands (xy axes)
lin_vel_error = torch.sum(torch.square(self.commands[:, :2] - self.base_lin_vel[:, :2]), dim=1)
return torch.exp(-lin_vel_error / self.reward_cfg["tracking_sigma"])
def _reward_tracking_ang_vel(self):
# Tracking of angular velocity commands (yaw)
ang_vel_error = torch.square(self.commands[:, 2] - self.base_ang_vel[:, 2])
return torch.exp(-ang_vel_error / self.reward_cfg["tracking_sigma"])
def _reward_lin_vel_z(self):
# Penalize z axis base linear velocity
return torch.square(self.base_lin_vel[:, 2])
def _reward_action_rate(self):
# Penalize changes in actions
return torch.sum(torch.square(self.last_actions - self.actions), dim=1)
def _reward_similar_to_default(self):
# Penalize joint poses far away from default pose
return torch.sum(torch.abs(self.dof_pos - self.default_dof_pos), dim=1)
def _reward_base_height(self):
# Penalize base height away from target
return torch.square(self.base_pos[:, 2] - self.reward_cfg["base_height_target"])
ジョイント名の変更
上記のmini_cheetahのurdfのジョイント名は、go2のジョイント名と異なるため、それらを修正します。
go2_train.py
をコピーし、学習を行うためのmini_cheetah_train.py
を作成します。
go2とmini_cheetahのジョイント名の対応は以下のようになります。
go2 | mini_cheetah |
---|---|
FL_hip_joint | torso_to_abduct_fl_j |
FR_hip_joint | torso_to_abduct_fr_j |
RL_hip_joint | torso_to_abduct_hr_j |
RR_hip_joint | torso_to_abduct_hl_j |
FL_thigh_joint | abduct_fl_to_thigh_fl_j |
FR_thigh_joint | abduct_fr_to_thigh_fr_j |
RL_thigh_joint | abduct_hr_to_thigh_hr_j |
RR_thigh_joint | abduct_hl_to_thigh_hl_j |
FL_calf_joint | thigh_fl_to_knee_fl_j |
FR_calf_joint | thigh_fr_to_knee_fr_j |
RL_calf_joint | thigh_hr_to_knee_hr_j |
RR_calf_joint | thigh_hl_to_knee_hl_j |
これを考慮しmini_cheetah_train.py
のget_cfgs()
のdefault_joint_angles
とdof_name
を変更します。
また、先程クラス名を変更したので、imoportのモジュール名も変更しています。
変更後の学習用のmini_cheetah_train.py
は以下になります。
import argparse
import os
import pickle
import shutil
from mini_cheetah_env import MiniCheetahEnv
from rsl_rl.runners import OnPolicyRunner
import genesis as gs
def get_train_cfg(exp_name, max_iterations):
train_cfg_dict = {
"algorithm": {
"clip_param": 0.2,
"desired_kl": 0.01,
"entropy_coef": 0.01,
"gamma": 0.99,
"lam": 0.95,
"learning_rate": 0.001,
"max_grad_norm": 1.0,
"num_learning_epochs": 5,
"num_mini_batches": 4,
"schedule": "adaptive",
"use_clipped_value_loss": True,
"value_loss_coef": 1.0,
},
"init_member_classes": {},
"policy": {
"activation": "elu",
"actor_hidden_dims": [512, 256, 128],
"critic_hidden_dims": [512, 256, 128],
"init_noise_std": 1.0,
},
"runner": {
"algorithm_class_name": "PPO",
"checkpoint": -1,
"experiment_name": exp_name,
"load_run": -1,
"log_interval": 1,
"max_iterations": max_iterations,
"num_steps_per_env": 24,
"policy_class_name": "ActorCritic",
"record_interval": -1,
"resume": False,
"resume_path": None,
"run_name": "",
"runner_class_name": "runner_class_name",
"save_interval": 100,
},
"runner_class_name": "OnPolicyRunner",
"seed": 1,
}
return train_cfg_dict
def get_cfgs():
env_cfg = {
"num_actions": 12,
# joint/link names
# "default_joint_angles": { # [rad]
# "FL_hip_joint": 0.0,
# "FR_hip_joint": 0.0,
# "RL_hip_joint": 0.0,
# "RR_hip_joint": 0.0,
# "FL_thigh_joint": 0.8,
# "FR_thigh_joint": 0.8,
# "RL_thigh_joint": 1.0,
# "RR_thigh_joint": 1.0,
# "FL_calf_joint": -1.5,
# "FR_calf_joint": -1.5,
# "RL_calf_joint": -1.5,
# "RR_calf_joint": -1.5,
# },
# "dof_names": [
# "FR_hip_joint",
# "FR_thigh_joint",
# "FR_calf_joint",
# "FL_hip_joint",
# "FL_thigh_joint",
# "FL_calf_joint",
# "RR_hip_joint",
# "RR_thigh_joint",
# "RR_calf_joint",
# "RL_hip_joint",
# "RL_thigh_joint",
# "RL_calf_joint",
# ],
"default_joint_angles": { # [rad]
"torso_to_abduct_fl_j": 0.0,
"torso_to_abduct_fr_j": 0.0,
"torso_to_abduct_hr_j": 0.0,
"torso_to_abduct_hl_j": 0.0,
"abduct_fl_to_thigh_fl_j": 0.8,
"abduct_fr_to_thigh_fr_j": 0.8,
"abduct_hr_to_thigh_hr_j": 1.0,
"abduct_hl_to_thigh_hl_j": 1.0,
"thigh_fl_to_knee_fl_j": -1.5,
"thigh_fr_to_knee_fr_j": -1.5,
"thigh_hr_to_knee_hr_j": -1.5,
"thigh_hl_to_knee_hl_j": -1.5,
},
"dof_names": [
"torso_to_abduct_fr_j",
"abduct_fr_to_thigh_fr_j",
"thigh_fr_to_knee_fr_j",
"torso_to_abduct_fl_j",
"abduct_fl_to_thigh_fl_j",
"thigh_fl_to_knee_fl_j",
"torso_to_abduct_hr_j",
"abduct_hr_to_thigh_hr_j",
"thigh_hr_to_knee_hr_j",
"torso_to_abduct_hl_j",
"abduct_hl_to_thigh_hl_j",
"thigh_hl_to_knee_hl_j",
],
# PD
"kp": 20.0,
"kd": 0.5,
# termination
"termination_if_roll_greater_than": 10, # degree
"termination_if_pitch_greater_than": 10,
# base pose
"base_init_pos": [0.0, 0.0, 0.42],
"base_init_quat": [1.0, 0.0, 0.0, 0.0],
"episode_length_s": 20.0,
"resampling_time_s": 4.0,
"action_scale": 0.25,
"simulate_action_latency": True,
"clip_actions": 100.0,
}
obs_cfg = {
"num_obs": 45,
"obs_scales": {
"lin_vel": 2.0,
"ang_vel": 0.25,
"dof_pos": 1.0,
"dof_vel": 0.05,
},
}
reward_cfg = {
"tracking_sigma": 0.25,
"base_height_target": 0.3,
"feet_height_target": 0.075,
"reward_scales": {
"tracking_lin_vel": 1.0,
"tracking_ang_vel": 0.2,
"lin_vel_z": -1.0,
"base_height": -50.0,
"action_rate": -0.005,
"similar_to_default": -0.1,
},
}
command_cfg = {
"num_commands": 3,
"lin_vel_x_range": [0.5, 0.5],
"lin_vel_y_range": [0, 0],
"ang_vel_range": [0, 0],
}
return env_cfg, obs_cfg, reward_cfg, command_cfg
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exp_name", type=str, default="mini_cheetah-walking")
parser.add_argument("-B", "--num_envs", type=int, default=4096)
parser.add_argument("--max_iterations", type=int, default=100)
args = parser.parse_args()
gs.init(logging_level="warning")
log_dir = f"logs/{args.exp_name}"
env_cfg, obs_cfg, reward_cfg, command_cfg = get_cfgs()
train_cfg = get_train_cfg(args.exp_name, args.max_iterations)
if os.path.exists(log_dir):
shutil.rmtree(log_dir)
os.makedirs(log_dir, exist_ok=True)
env = MiniCheetahEnv(
num_envs=args.num_envs, env_cfg=env_cfg, obs_cfg=obs_cfg, reward_cfg=reward_cfg, command_cfg=command_cfg
)
runner = OnPolicyRunner(env, train_cfg, log_dir, device="cuda:0")
pickle.dump(
[env_cfg, obs_cfg, reward_cfg, command_cfg, train_cfg],
open(f"{log_dir}/cfgs.pkl", "wb"),
)
runner.learn(num_learning_iterations=args.max_iterations, init_at_random_ep_len=True)
if __name__ == "__main__":
main()
"""
# training
python examples/locomotion/mini_cheetah_train.py
"""
これで実行すると学習を行うことができます。
動作確認
go2の動作確認用スクリプトgo2_eval.py
をコピーし、mini_cheetah_eval.py
を作成します。
こちらは、上記で変更したクラス名を修正するのみです。
以下は動作確認用のmini_cheetah_eval.py
になります。
import argparse
import os
import pickle
import torch
from mini_cheetah_env import MiniCheetahEnv
from rsl_rl.runners import OnPolicyRunner
import genesis as gs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exp_name", type=str, default="mini_cheetah-walking")
parser.add_argument("--ckpt", type=int, default=100)
args = parser.parse_args()
gs.init()
log_dir = f"logs/{args.exp_name}"
env_cfg, obs_cfg, reward_cfg, command_cfg, train_cfg = pickle.load(open(f"logs/{args.exp_name}/cfgs.pkl", "rb"))
reward_cfg["reward_scales"] = {}
env = MiniCheetahEnv(
num_envs=1,
env_cfg=env_cfg,
obs_cfg=obs_cfg,
reward_cfg=reward_cfg,
command_cfg=command_cfg,
show_viewer=True,
)
runner = OnPolicyRunner(env, train_cfg, log_dir, device="cuda:0")
resume_path = os.path.join(log_dir, f"model_{args.ckpt}.pt")
runner.load(resume_path)
policy = runner.get_inference_policy(device="cuda:0")
obs, _ = env.reset()
with torch.no_grad():
while True:
actions = policy(obs)
obs, _, rews, dones, infos = env.step(actions)
if __name__ == "__main__":
main()
"""
# evaluation
python examples/locomotion/mini_cheetah_eval.py -e mini-cheetah-walking -v --ckpt 100
"""
以下は今回の結果になります。
Genesisで学習してmini_cheetah歩いた
— タマル (@tamarn5G) January 3, 2025
(後ろ向き) pic.twitter.com/zOwnb2JPDX
歩き方の修正
現在後ろ向きに歩いているのと、後ろ足が内股になっているのでそれらを修正します。
デフォルトの姿勢とのズレに対するペナルティーがあることから、初期関節角がおかしいため、後ろ向きの歩行に見えています。
(実際は前に進んでいるが、関節が逆向きになっている)
そのため、mini_cheetah_train.py
のデフォルトの関節角度を調整します。
"default_joint_angles": { # [rad]
"torso_to_abduct_fl_j": 0.0,
"torso_to_abduct_fr_j": 0.0,
"torso_to_abduct_hr_j": 0.0,
"torso_to_abduct_hl_j": 0.0,
"abduct_fl_to_thigh_fl_j": -0.8,
"abduct_fr_to_thigh_fr_j": -0.8,
"abduct_hr_to_thigh_hr_j": -0.8,
"abduct_hl_to_thigh_hl_j": -0.8,
"thigh_fl_to_knee_fl_j": 1.5,
"thigh_fr_to_knee_fr_j": 1.5,
"thigh_hr_to_knee_hr_j": 1.5,
"thigh_hl_to_knee_hl_j": 1.5,
},
結果は以下のようになりました。
デフォルトの関節角度調節したら前向きに歩いた pic.twitter.com/dqNUTkJYQc
— タマル (@tamarn5G) January 3, 2025
おわりに
Genesisでmini_cheetahの歩容生成ができたので、次は学習したPolicyを使って、Gazeboで動作させてみたいです。
また、これを機に強化学習をちゃんと学ぼうと思います。