Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix for test cases #531

Merged
merged 6 commits into from
Oct 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ Data-driven algorithms, such as reinforcement learning and imitation learning, p
* a modular design that offers great flexibility in designing new robot simulation environments;
* a high-quality implementation of robot controllers and off-the-shelf learning algorithms to lower the barriers to entry.

This framework was originally developed in late 2017 by researchers in [Stanford Vision and Learning Lab](http://svl.stanford.edu) (SVL) as an internal tool for robot learning research. Now, it is actively maintained and used for robotics research projects in SVL, the [UT Robot Perception and Learning Lab](http://rpl.cs.utexas.edu) (RPL) and NVIDIA [Genearlist Embodied Agent Research Group](https://research.nvidia.com/labs/gear/) (GEAR). We welcome community contributions to this project. For details, please check out our [contributing guidelines](CONTRIBUTING.md).
This framework was originally developed in late 2017 by researchers in [Stanford Vision and Learning Lab](http://svl.stanford.edu) (SVL) as an internal tool for robot learning research. Now, it is actively maintained and used for robotics research projects in SVL, the [UT Robot Perception and Learning Lab](http://rpl.cs.utexas.edu) (RPL) and NVIDIA [Generalist Embodied Agent Research Group](https://research.nvidia.com/labs/gear/) (GEAR). We welcome community contributions to this project. For details, please check out our [contributing guidelines](CONTRIBUTING.md).

**Robosuite** offers a modular design of APIs for building new environments, robot embodiments, and robot controllers with procedural generation. We highlight these primary features below:

Expand Down
4 changes: 2 additions & 2 deletions robosuite/controllers/parts/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,8 +258,8 @@ def update_initial_joints(self, initial_joints):
self.update(force=True)

if self.ref_name is not None:
self.initial_ee_pos = self.ee_pos
self.initial_ee_ori_mat = self.ee_ori_mat
self.initial_ref_pos = self.ref_pos
self.initial_ref_ori_mat = self.ref_ori_mat

def clip_torques(self, torques):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,8 @@ def update_initial_joints(self, initial_joints):
"""
self.initial_joint = np.array(initial_joints)
self.update(force=True)
self.initial_ee_pos = self.ee_pos
self.initial_ee_ori_mat = self.ee_ori_mat
self.initial_ref_pos = self.ref_pos
self.initial_ref_ori_mat = self.ref_ori_mat

def clip_torques(self, torques):
"""
Expand Down
2 changes: 1 addition & 1 deletion robosuite/models/grippers/jaco_three_finger_gripper.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def speed(self):

@property
def dof(self):
return 3
return 1


class JacoThreeFingerDexterousGripper(JacoThreeFingerGripperBase):
Expand Down
40 changes: 20 additions & 20 deletions tests/test_controllers/test_linear_interpolator.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import numpy as np

import robosuite as suite
from robosuite.controllers.composite.composite_controller_factory import load_composite_controller_config
import robosuite.utils.transform_utils as T

# Define the threshold locations, delta values, and ratio #
Expand Down Expand Up @@ -70,7 +71,7 @@ def step(env, action, current_torques):
env.sim.forward()
env._pre_action(action, policy_step)
last_torques = current_torques
current_torques = env.robots[0].torques
current_torques = env.robots[0].composite_controller.part_controllers["right"].torques
summed_abs_delta_torques += np.abs(current_torques - last_torques)
env.sim.step()
policy_step = False
Expand All @@ -83,7 +84,7 @@ def step(env, action, current_torques):
# Running the actual test #
def test_linear_interpolator():

for controller_name in ["IK_POSE", "OSC_POSE"]:
for controller_name in [None, "BASIC"]: # None corresponds to robot's default controller

for traj in ["pos", "ori"]:

Expand All @@ -95,16 +96,11 @@ def test_linear_interpolator():
# Define numpy seed so we guarantee consistent starting pos / ori for each trajectory
np.random.seed(3)

# Define controller path to load
controller_path = os.path.join(
os.path.dirname(__file__),
"../../robosuite",
"controllers/config/{}.json".format(controller_name.lower()),
# load a composite controller
controller_config = load_composite_controller_config(
controller=controller_name,
robot="Sawyer",
)
with open(controller_path) as f:
controller_config = json.load(f)
controller_config["interpolation"] = interpolator
controller_config["ramp_ratio"] = 1.0

# Now, create a test env for testing the controller on
env = suite.make(
Expand All @@ -124,8 +120,8 @@ def test_linear_interpolator():
# Hardcode the starting position for sawyer
init_qpos = [-0.5538, -0.8208, 0.4155, 1.8409, -0.4955, 0.6482, 1.9628]
env.robots[0].set_robot_joint_positions(init_qpos)
env.robots[0].controller.update_initial_joints(init_qpos)
env.robots[0].controller.reset_goal()
env.robots[0].composite_controller.part_controllers["right"].update_initial_joints(init_qpos)
env.robots[0].composite_controller.part_controllers["right"].reset_goal()

# Notify user a new trajectory is beginning
print(
Expand All @@ -140,19 +136,21 @@ def test_linear_interpolator():

# Keep track of state of robot eef (pos, ori (euler)) and torques
current_torques = np.zeros(7)
initial_state = [env.robots[0]._hand_pos, T.mat2quat(env.robots[0]._hand_orn)]
initial_state = [env.robots[0]._hand_pos["right"], T.mat2quat(env.robots[0]._hand_orn["right"])]
dstate = [
env.robots[0]._hand_pos - initial_state[0],
T.mat2euler(T.quat2mat(T.quat_distance(T.mat2quat(env.robots[0]._hand_orn), initial_state[1]))),
env.robots[0]._hand_pos["right"] - initial_state[0],
T.mat2euler(
T.quat2mat(T.quat_distance(T.mat2quat(env.robots[0]._hand_orn["right"]), initial_state[1]))
),
]

# Define the uniform trajectory action
if traj == "pos":
pos_act = pos_action_ik if controller_name == "IK_POSE" else pos_action_osc
pos_act = pos_action_osc
rot_act = np.zeros(3)
else:
pos_act = np.zeros(3)
rot_act = rot_action_ik if controller_name == "IK_POSE" else rot_action_osc
rot_act = rot_action_osc

# Compose the action
action = np.concatenate([pos_act, rot_act, [0]])
Expand All @@ -171,8 +169,10 @@ def test_linear_interpolator():
summed_abs_delta_torques[j] += summed_torques
timesteps[j] += 1
dstate = [
env.robots[0]._hand_pos - initial_state[0],
T.mat2euler(T.quat2mat(T.quat_distance(T.mat2quat(env.robots[0]._hand_orn), initial_state[1]))),
env.robots[0]._hand_pos["right"] - initial_state[0],
T.mat2euler(
T.quat2mat(T.quat_distance(T.mat2quat(env.robots[0]._hand_orn["right"]), initial_state[1]))
),
]

# When finished, print out the timestep results
Expand Down
30 changes: 15 additions & 15 deletions tests/test_controllers/test_variable_impedance.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,11 @@
"""

import argparse
import json
import os

import numpy as np

import robosuite as suite
from robosuite.controllers.composite.composite_controller_factory import load_composite_controller_config

# Define the rate of change when sweeping through kp / damping values
num_timesteps_per_change = 10
Expand All @@ -56,20 +55,21 @@ def test_variable_impedance():
# Define numpy seed so we guarantee consistent starting pos / ori for each trajectory
np.random.seed(3)

# Define controller path to load
controller_path = os.path.join(
os.path.dirname(__file__), "../../robosuite", "controllers/config/{}.json".format(controller_name.lower())
)

# Load the controller
with open(controller_path) as f:
controller_config = json.load(f)

composite_controller_config = load_composite_controller_config(controller=None, robot="Sawyer")
controller_config = composite_controller_config["body_parts"]["right"]
controller_config["type"] = controller_name
# Manually edit impedance settings
controller_config["impedance_mode"] = "variable"
controller_config["kp_limits"] = [0, 300]
controller_config["damping_limits"] = [0, 10]

if controller_name == "OSC_POSITION":
controller_config["output_min"] = [0.05, 0.05, 0.05]
controller_config["output_max"] = [-0.05, -0.05, -0.05]
elif controller_name == "JOINT_POSITION":
controller_config["output_min"] = -1
controller_config["output_max"] = 1

# Now, create a test env for testing the controller on
env = suite.make(
"Lift",
Expand All @@ -79,7 +79,7 @@ def test_variable_impedance():
use_camera_obs=False,
horizon=10000,
control_freq=20,
controller_configs=controller_config,
controller_configs=composite_controller_config,
)

# Setup printing options for numbers
Expand Down Expand Up @@ -116,7 +116,7 @@ def test_variable_impedance():
# Hardcode the starting position for sawyer
init_qpos = [-0.5538, -0.8208, 0.4155, 1.8409, -0.4955, 0.6482, 1.9628]
env.robots[0].set_robot_joint_positions(init_qpos)
env.robots[0].controller.update_initial_joints(init_qpos)
env.robots[0].composite_controller.part_controllers["right"].update_initial_joints(init_qpos)

# Notify user a new test is beginning
print("\nTesting controller {} while sweeping {}...".format(controller_name, gain))
Expand All @@ -126,7 +126,7 @@ def test_variable_impedance():
env.viewer.set_camera(camera_id=0)

# Keep track of relative changes in robot eef position
last_pos = env.robots[0]._hand_pos
last_pos = env.robots[0]._hand_pos["right"]

# Initialize gains
if gain == "kp":
Expand Down Expand Up @@ -155,7 +155,7 @@ def test_variable_impedance():
env.render()

# Update the current change in state
cur_pos = env.robots[0]._hand_pos
cur_pos = env.robots[0]._hand_pos["right"]

# If we're at the end of the increase, switch direction of traj and gain changes
if i == int(num_timesteps_per_change / percent_increase):
Expand Down
1 change: 1 addition & 0 deletions tests/test_grippers/test_all_grippers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

Obviously, if an environment crashes during runtime, that is considered a failure as well.
"""

from robosuite.models.grippers import GRIPPER_MAPPING


Expand Down
1 change: 1 addition & 0 deletions tests/test_grippers/test_panda_gripper.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Tests panda gripper on grabbing task
"""

from robosuite.models.grippers import GripperTester, PandaGripper


Expand Down
1 change: 1 addition & 0 deletions tests/test_grippers/test_rethink_gripper.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Tests two finger gripper and left two finger gripper on grabbing task
"""

from robosuite.models.grippers import GripperTester, RethinkGripper


Expand Down
Loading