tensor instead of array

This commit is contained in:
Cem Gökmen 2024-06-26 14:00:52 -07:00
parent f93aa51d33
commit f4a31e4f9b
120 changed files with 893 additions and 891 deletions

View File

@ -142,12 +142,12 @@ class PlanningContext(object):
if m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
joint_control_idx = self.robot.arm_control_idx["left"]
joint_pos = np.array(self.robot.get_joint_positions()[joint_control_idx])
joint_pos = th.Tensor(self.robot.get_joint_positions()[joint_control_idx])
else:
joint_combined_idx = np.concatenate(
[self.robot.trunk_control_idx, self.robot.arm_control_idx[fk_descriptor]]
)
joint_pos = np.array(self.robot.get_joint_positions()[joint_combined_idx])
joint_pos = th.Tensor(self.robot.get_joint_positions()[joint_combined_idx])
link_poses = self.fk_solver.get_link_poses(joint_pos, arm_links)
# Set position of robot copy root prim
@ -171,9 +171,9 @@ class PlanningContext(object):
self._set_prim_pose(copy_mesh, mesh_copy_pose)
def _set_prim_pose(self, prim, pose):
translation = lazy.pxr.Gf.Vec3d(*np.array(pose[0], dtype=float))
translation = lazy.pxr.Gf.Vec3d(*th.Tensor(pose[0], dtype=float))
prim.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
orientation = th.Tensor(pose[1], dtype=float)[[3, 0, 1, 2]]
prim.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
def _construct_disabled_collision_pairs(self):
@ -357,9 +357,9 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
lazy.omni.usd.commands.CreatePrimCommand("Xform", rc["copy_path"]).do()
copy_robot = lazy.omni.isaac.core.utils.prims.get_prim_at_path(rc["copy_path"])
reset_pose = robot_copy.reset_pose[robot_type]
translation = lazy.pxr.Gf.Vec3d(*np.array(reset_pose[0], dtype=float))
translation = lazy.pxr.Gf.Vec3d(*th.Tensor(reset_pose[0], dtype=float))
copy_robot.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(reset_pose[1], dtype=float)[[3, 0, 1, 2]]
orientation = th.Tensor(reset_pose[1], dtype=float)[[3, 0, 1, 2]]
copy_robot.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
robot_to_copy = None
@ -385,7 +385,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
relative_pose = T.relative_pose_transform(
*mesh.get_position_orientation(), *link.get_position_orientation()
)
relative_pose = (relative_pose[0], np.array([0, 0, 0, 1]))
relative_pose = (relative_pose[0], th.Tensor([0, 0, 0, 1]))
if link_name not in copy_robot_meshes.keys():
copy_robot_meshes[link_name] = {mesh_name: copy_mesh}
copy_robot_meshes_relative_poses[link_name] = {mesh_name: relative_pose}
@ -462,7 +462,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
attempts (int): Number of attempts to make before raising an error
Yields:
np.array or None: Action array for one step for the robot to execute the primitve or None if primitive completed
th.Tensor or None: Action array for one step for the robot to execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
@ -617,7 +617,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
speed (float): base speed
Returns:
np.array or None: Action array for one step for the robot to move base or None if its at the target pose
th.Tensor or None: Action array for one step for the robot to move base or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
@ -634,7 +634,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
speed (float): eef speed
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
th.Tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
@ -651,7 +651,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
speed (float): eef speed
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
th.Tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
# TODO: Combine these movement functions.
for _ in range(steps):
@ -668,7 +668,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
StatefulObject: Object for robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if grasp completed
th.Tensor or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
@ -746,7 +746,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
th.Tensor or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.OnTop)
@ -758,7 +758,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
th.Tensor or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.Inside)
@ -801,7 +801,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
th.Tensor or None: Action array for one step for the robot to place or None if place completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
@ -846,8 +846,8 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Returns:
2-tuple
- np.array or None: Joint positions to reach target pose or None if impossible to reach target pose
- np.array: Indices for joints in the robot
- th.Tensor or None: Joint positions to reach target pose or None if impossible to reach target pose
- th.Tensor: Indices for joints in the robot
"""
relative_target_pose = self._get_pose_in_robot_frame(target_pose)
joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose)
@ -917,8 +917,8 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Returns:
2-tuple
- np.array or None: Joint positions to reach target pose or None if impossible to reach the target pose
- np.array: Indices for joints in the robot
- th.Tensor or None: Joint positions to reach target pose or None if impossible to reach the target pose
- th.Tensor: Indices for joints in the robot
"""
ik_solver = IKSolver(
robot_description_path=self._manipulation_descriptor_path,
@ -943,7 +943,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
th.Tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
yield from self._settle_robot()
controller_config = self.robot._controller_config["arm_" + self.arm]
@ -959,10 +959,10 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to move arm to reach the specified joint positions using the planner
Args:
joint_pos (np.array): Joint positions for the arm
joint_pos (th.Tensor): Joint positions for the arm
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
with PlanningContext(self.env, self.robot, self.robot_copy, "original") as context:
plan = plan_arm_motion(
@ -990,10 +990,10 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to move arm to reach the specified eef positions using the planner
Args:
eef_pose (np.array): End Effector pose for the arm
eef_pose (th.Tensor): End Effector pose for the arm
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
eef_pos = eef_pose[0]
eef_ori = T.quat2axisangle(eef_pose[1])
@ -1035,7 +1035,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Returns:
Array of arrays: Planned path with additional waypoints
"""
plan = np.array(plan)
plan = th.Tensor(plan)
interpolated_plan = []
for i in range(len(plan) - 1):
max_diff = max(plan[i + 1] - plan[i])
@ -1049,12 +1049,12 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to move its arm to reach the specified joint positions by directly actuating with no planner
Args:
joint_pos (np.array): Array of joint positions for the arm
joint_pos (th.Tensor): Array of joint positions for the arm
stop_on_contact (boolean): Determines whether to stop move once an object is hit
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
controller_name = f"arm_{self.arm}"
use_delta = self.robot._controllers[controller_name].use_delta_commands
@ -1064,7 +1064,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
for _ in range(m.MAX_STEPS_FOR_HAND_MOVE_JOINT):
current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx]
diff_joint_pos = np.array(joint_pos) - np.array(current_joint_pos)
diff_joint_pos = th.Tensor(joint_pos) - th.Tensor(current_joint_pos)
if np.max(np.abs(diff_joint_pos)) < m.JOINT_POS_DIFF_THRESHOLD:
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
@ -1182,7 +1182,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the target pose
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the target pose
"""
# To make sure that this happens in a roughly linear fashion, we will divide the trajectory
# into 1cm-long pieces
@ -1192,7 +1192,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pos_waypoints = np.linspace(start_pos, target_pose[0], num_poses)
# Also interpolate the rotations
combined_rotation = Rotation.from_quat(np.array([start_orn, target_pose[1]]))
combined_rotation = Rotation.from_quat(th.Tensor([start_orn, target_pose[1]]))
slerp = Slerp([0, 1], combined_rotation)
orn_waypoints = slerp(np.linspace(0, 1, num_poses))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
@ -1221,7 +1221,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
pos_diff = np.linalg.norm(th.Tensor(current_pos) - th.Tensor(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.005 and orn_diff < np.deg2rad(0.1):
return
@ -1266,7 +1266,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
pos_diff = np.linalg.norm(th.Tensor(current_pos) - th.Tensor(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.001 and orn_diff < np.deg2rad(0.1):
return
@ -1285,7 +1285,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if its done grasping
th.Tensor or None: Action array for one step for the robot to grasp or None if its done grasping
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
joint_position = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]]
@ -1304,7 +1304,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to release its grasp
Returns:
np.array or None: Action array for one step for the robot to release or None if its done releasing
th.Tensor or None: Action array for one step for the robot to release or None if its done releasing
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
joint_position = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]]
@ -1410,7 +1410,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Get a no-op action that allows us to run simulation without changing robot configuration.
Returns:
np.array or None: Action array for one step for the robot to do nothing
th.Tensor or None: Action array for one step for the robot to do nothing
"""
action = np.zeros(self.robot.action_dim)
for name, controller in self.robot._controllers.items():
@ -1439,7 +1439,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action to move the hand to the position optimal for executing subsequent action primitives
Returns:
np.array or None: Action array for one step for the robot to reset its hand or None if it is done resetting
th.Tensor or None: Action array for one step for the robot to reset its hand or None if it is done resetting
"""
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
@ -1462,16 +1462,16 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
def _get_reset_eef_pose(self):
# TODO: Add support for Fetch
if self.robot_model == "Tiago":
return np.array([0.28493954, 0.37450749, 1.1512334]), np.array(
return th.Tensor([0.28493954, 0.37450749, 1.1512334]), th.Tensor(
[-0.21533823, 0.05361032, -0.08631776, 0.97123871]
)
else:
return np.array([0.48688125, -0.12507881, 0.97888719]), np.array(
return th.Tensor([0.48688125, -0.12507881, 0.97888719]), th.Tensor(
[0.61324748, 0.61305553, -0.35266518, 0.35173529]
)
def _get_reset_joint_pos(self):
reset_pose_fetch = np.array(
reset_pose_fetch = th.Tensor(
[
0.0,
0.0, # wheels
@ -1490,7 +1490,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
]
)
reset_pose_tiago = np.array(
reset_pose_tiago = th.Tensor(
[
-1.78029833e-04,
3.20231302e-05,
@ -1531,7 +1531,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
with PlanningContext(self.env, self.robot, self.robot_copy, "simplified") as context:
plan = plan_base_motion(
@ -1587,7 +1587,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pose_on_obj (Iterable): (pos, quat) Pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
if pose_on_obj is not None:
if self._target_in_reach_of_robot(pose_on_obj):
@ -1607,7 +1607,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pose_on_obj (Iterable): (pos, quat) pose
Returns:
np.array or None: Action array for one step for the robot to navigate in range or None if it is done navigating
th.Tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating
"""
pose = self._sample_pose_near_object(obj, pose_on_obj=pose_on_obj, **kwargs)
yield from self._navigate_to_pose(pose)
@ -1621,7 +1621,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
low_precision (bool): Determines whether to navigate to the pose within a large range (low precision) or small range (high precison)
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
dist_threshold = m.LOW_PRECISION_DIST_THRESHOLD if low_precision else m.DEFAULT_DIST_THRESHOLD
angle_threshold = m.LOW_PRECISION_ANGLE_THRESHOLD if low_precision else m.DEFAULT_ANGLE_THRESHOLD
@ -1670,7 +1670,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
angle_threshold (float): The angle difference between the robot's current and end pose that determines when the robot is done rotating
Returns:
np.array or None: Action array for one step for the robot to rotate or None if it is done rotating
th.Tensor or None: Action array for one step for the robot to rotate or None if it is done rotating
"""
body_target_pose = self._get_pose_in_robot_frame(end_pose)
diff_yaw = T.quat2euler(body_target_pose[1])[2]
@ -1717,12 +1717,12 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT):
if pose_on_obj is None:
pos_on_obj = self._sample_position_on_aabb_side(obj)
pose_on_obj = [pos_on_obj, np.array([0, 0, 0, 1])]
pose_on_obj = [pos_on_obj, th.Tensor([0, 0, 0, 1])]
distance = np.random.uniform(0.0, 5.0)
yaw = np.random.uniform(-np.pi, np.pi)
avg_arm_workspace_range = np.mean(self.robot.arm_workspace_range[self.arm])
pose_2d = np.array(
pose_2d = th.Tensor(
[
pose_on_obj[0][0] + distance * np.cos(yaw),
pose_on_obj[0][1] + distance * np.sin(yaw),
@ -1824,7 +1824,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
sampling_results = sample_cuboid_for_predicate(pred_map[predicate], target_obj, bb_extents)
if sampling_results[0][0] is None:
continue
sampled_bb_center = sampling_results[0][0] + np.array([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET])
sampled_bb_center = sampling_results[0][0] + th.Tensor([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET])
sampled_bb_orn = sampling_results[0][2]
# Get the object pose by subtracting the offset
@ -1834,7 +1834,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Check that the pose is near one of the poses in the near_poses list if provided.
if near_poses:
sampled_pos = np.array([sampled_obj_pose[0]])
sampled_pos = th.Tensor([sampled_obj_pose[0]])
if not np.any(np.linalg.norm(near_poses - sampled_pos, axis=1) < near_poses_threshold):
continue
@ -1885,7 +1885,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
pos = np.array([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR])
pos = th.Tensor([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR])
orn = T.euler2quat([0, 0, pose_2d[2]])
return pos, orn
@ -1938,7 +1938,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields a no op action for a few steps to allow the robot and physics to settle
Returns:
np.array or None: Action array for one step for the robot to do nothing
th.Tensor or None: Action array for one step for the robot to do nothing
"""
for _ in range(30):
empty_action = self._empty_action()

View File

@ -69,7 +69,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
attempts (int): Number of attempts to make before raising an error
Returns:
np.array or None: Action array for one step for the robot tto execute the primitve or None if primitive completed
th.Tensor or None: Action array for one step for the robot tto execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
@ -149,7 +149,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
DatasetObject: Object for robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if grasp completed
th.Tensor or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Don't do anything if the object is already grasped.
obj_in_hand = self._get_obj_in_hand()
@ -242,7 +242,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
th.Tensor or None: Action array for one step for the robot to place or None if place completed
"""
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
@ -553,7 +553,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
)
# Get the position of the heat source on the thing we're placing near
heating_element_positions = np.array(
heating_element_positions = th.Tensor(
[link.get_position() for link in heat_source_obj.states[object_states.HeatSourceOrSink].links.values()]
)
heating_distance_threshold = heat_source_obj.states[object_states.HeatSourceOrSink].distance_threshold
@ -599,7 +599,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
robot_pose = self._get_robot_pose_from_2d_pose(pose_2d)
self.robot.set_position_orientation(*robot_pose)

View File

@ -106,12 +106,12 @@ class BaseController(Serializable, Registerable, Recreatable):
continue
self._control_limits[ControlType.get_type(motor_type)] = [
np.array(control_limits[motor_type][0]),
np.array(control_limits[motor_type][1]),
th.Tensor(control_limits[motor_type][0]),
th.Tensor(control_limits[motor_type][1]),
]
assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist."
self._dof_has_limits = control_limits["has_limit"]
self._dof_idx = np.array(dof_idx, dtype=int)
self._dof_idx = th.Tensor(dof_idx, dtype=int)
# Generate goal information
self._goal_shapes = self._get_goal_shapes()
@ -132,8 +132,8 @@ class BaseController(Serializable, Registerable, Recreatable):
)
command_output_limits = (
(
np.array(self._control_limits[self.control_type][0])[self.dof_idx],
np.array(self._control_limits[self.control_type][1])[self.dof_idx],
th.Tensor(self._control_limits[self.control_type][0])[self.dof_idx],
th.Tensor(self._control_limits[self.control_type][1])[self.dof_idx],
)
if type(command_input_limits) == str and command_input_limits == "default"
else command_output_limits
@ -167,8 +167,8 @@ class BaseController(Serializable, Registerable, Recreatable):
Returns:
Array[float]: Processed command vector
"""
# Make sure command is a np.array
command = np.array([command]) if type(command) in {int, float} else np.array(command)
# Make sure command is a th.Tensor
command = th.Tensor([command]) if type(command) in {int, float} else th.Tensor(command)
# We only clip and / or scale if self.command_input_limits exists
if self._command_input_limits is not None:
# Clip
@ -208,7 +208,7 @@ class BaseController(Serializable, Registerable, Recreatable):
), f"Commands must be dimension {self.command_dim}, got dim {len(command)} instead."
# Preprocess and run internal command
self._goal = self._update_goal(command=self._preprocess_command(np.array(command)), control_dict=control_dict)
self._goal = self._update_goal(command=self._preprocess_command(th.Tensor(command)), control_dict=control_dict)
def _update_goal(self, command, control_dict):
"""
@ -314,7 +314,7 @@ class BaseController(Serializable, Registerable, Recreatable):
self._goal = (
None
if state["goal"] is None
else {name: np.array(goal_state) for name, goal_state in state["goal"].items()}
else {name: th.Tensor(goal_state) for name, goal_state in state["goal"].items()}
)
def serialize(self, state):
@ -369,15 +369,15 @@ class BaseController(Serializable, Registerable, Recreatable):
dim (int): Size of array to broadcast input to
Returns:
np.array: Array filled with values specified in @nums
th.Tensor: Array filled with values specified in @nums
"""
# First run sanity check to make sure no strings are being inputted
if isinstance(nums, str):
raise TypeError("Error: Only numeric inputs are supported for this function, nums2array!")
# Check if input is an Iterable, if so, we simply convert the input to np.array and return
# Check if input is an Iterable, if so, we simply convert the input to th.Tensor and return
# Else, input is a single value, so we map to a numpy array of correct size and return
return np.array(nums) if isinstance(nums, Iterable) else np.ones(dim) * nums
return th.Tensor(nums) if isinstance(nums, Iterable) else np.ones(dim) * nums
@property
def state_size(self):
@ -465,7 +465,7 @@ class BaseController(Serializable, Registerable, Recreatable):
Returns:
Array[int]: DOF indices corresponding to the specific DOFs being controlled by this robot
"""
return np.array(self._dof_idx)
return th.Tensor(self._dof_idx)
@classproperty
def _do_not_register_classes(cls):

View File

@ -107,7 +107,7 @@ class DifferentialDriveController(LocomotionController):
right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius
# Return desired velocities
return np.array([left_wheel_joint_vel, right_wheel_joint_vel])
return th.Tensor([left_wheel_joint_vel, right_wheel_joint_vel])
def compute_no_op_goal(self, control_dict):
# This is zero-vector, since we want zero linear / angular velocity

View File

@ -242,8 +242,8 @@ class InverseKinematicsController(JointController, ManipulationController):
def _update_goal(self, command, control_dict):
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
pos_relative = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
@ -306,8 +306,8 @@ class InverseKinematicsController(JointController, ManipulationController):
Array[float]: outputted (non-clipped!) velocity control signal to deploy
"""
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
pos_relative = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
target_pos = goal_dict["target_pos"]
target_quat = goal_dict["target_quat"]
@ -359,8 +359,8 @@ class InverseKinematicsController(JointController, ManipulationController):
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
return dict(
target_pos=np.array(control_dict[f"{self.task_name}_pos_relative"]),
target_quat=np.array(control_dict[f"{self.task_name}_quat_relative"]),
target_pos=th.Tensor(control_dict[f"{self.task_name}_pos_relative"]),
target_quat=th.Tensor(control_dict[f"{self.task_name}_quat_relative"]),
)
def _get_goal_shapes(self):

View File

@ -95,8 +95,8 @@ class MultiFingerGripperController(GripperController):
self._inverted = inverted
self._mode = mode
self._limit_tolerance = limit_tolerance
self._open_qpos = open_qpos if open_qpos is None else np.array(open_qpos)
self._closed_qpos = closed_qpos if closed_qpos is None else np.array(closed_qpos)
self._open_qpos = open_qpos if open_qpos is None else th.Tensor(open_qpos)
self._closed_qpos = closed_qpos if closed_qpos is None else th.Tensor(closed_qpos)
# Create other args to be filled in at runtime
self._is_grasping = IsGraspingState.FALSE
@ -125,9 +125,9 @@ class MultiFingerGripperController(GripperController):
# We extend this method to make sure command is always 2D
if self._mode != "independent":
command = (
np.array([command] * self.command_dim)
th.Tensor([command] * self.command_dim)
if type(command) in {int, float}
else np.array([command[0]] * self.command_dim)
else th.Tensor([command[0]] * self.command_dim)
)
# Flip the command if the direction is inverted.

View File

@ -55,7 +55,7 @@ class NullJointController(JointController):
applied
"""
# Store values
self._default_command = np.zeros(len(dof_idx)) if default_command is None else np.array(default_command)
self._default_command = np.zeros(len(dof_idx)) if default_command is None else th.Tensor(default_command)
# Run super init
super().__init__(
@ -77,7 +77,7 @@ class NullJointController(JointController):
def _preprocess_command(self, command):
# Override super and force the processed command to be internal stored default value
return np.array(self._default_command)
return th.Tensor(self._default_command)
def update_default_goal(self, target):
"""
@ -91,4 +91,4 @@ class NullJointController(JointController):
len(target) == self.control_dim
), f"Default control must be length: {self.control_dim}, got length: {len(target)}"
self._default_command = np.array(target)
self._default_command = th.Tensor(target)

View File

@ -133,9 +133,9 @@ class OperationalSpaceController(ManipulationController):
self.damping_ratio = damping_ratio
self.kp_null = nums2array(nums=kp_null, dim=control_dim, dtype=np.float32) if kp_null is not None else None
self.kd_null = 2 * np.sqrt(self.kp_null) if kp_null is not None else None # critically damped
self.kp_limits = np.array(kp_limits, dtype=np.float32)
self.damping_ratio_limits = np.array(damping_ratio_limits, dtype=np.float32)
self.kp_null_limits = np.array(kp_null_limits, dtype=np.float32)
self.kp_limits = th.Tensor(kp_limits, dtype=np.float32)
self.damping_ratio_limits = th.Tensor(damping_ratio_limits, dtype=np.float32)
self.kp_null_limits = th.Tensor(kp_null_limits, dtype=np.float32)
# Store settings for whether we're learning gains or not
self.variable_kp = self.kp is None
@ -294,8 +294,8 @@ class OperationalSpaceController(ManipulationController):
frame to control, computed in its local frame (e.g.: robot base frame)
"""
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
pos_relative = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
@ -416,8 +416,8 @@ class OperationalSpaceController(ManipulationController):
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
target_pos = np.array(control_dict[f"{self.task_name}_pos_relative"])
target_quat = np.array(control_dict[f"{self.task_name}_quat_relative"])
target_pos = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
target_quat = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
# Convert quat into eef ori mat
return dict(

View File

@ -578,8 +578,8 @@ class Environment(gym.Env, GymObservable, Recreatable):
following OpenAI Gym's convention
Args:
action (gym.spaces.Dict or dict or np.array): robot actions. If a dict is specified, each entry should
map robot name to corresponding action. If a np.array, it should be the flattened, concatenated set
action (gym.spaces.Dict or dict or th.Tensor): robot actions. If a dict is specified, each entry should
map robot name to corresponding action. If a th.Tensor, it should be the flattened, concatenated set
of actions
Returns:

View File

@ -46,7 +46,7 @@ class EnvironmentWrapper(Wrapper, Registerable):
By default, run the normal environment step() function
Args:
action (np.array): action to take in environment
action (th.Tensor): action to take in environment
Returns:
4-tuple:

View File

@ -111,8 +111,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set viewer camera pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-1.689292, -2.11718198, 0.93332228]),
orientation=np.array([0.57687967, -0.22995655, -0.29022759, 0.72807814]),
position=th.Tensor([-1.689292, -2.11718198, 0.93332228]),
orientation=th.Tensor([0.57687967, -0.22995655, -0.29022759, 0.72807814]),
)
for _ in range(10):
@ -121,7 +121,7 @@ def main(random_selection=False, headless=False, short_exec=False):
shelf_baseboard = env.scene.object_registry("name", "shelf_baseboard")
shelf_baseboard.set_position_orientation([0, -0.979, 0.26], [0, 0, 0, 1])
shelf_baseboard.keep_still()
shelf_baseboard.set_linear_velocity(np.array([-0.2, 0, 0]))
shelf_baseboard.set_linear_velocity(th.Tensor([-0.2, 0, 0]))
shelf_side_left = env.scene.object_registry("name", "shelf_side_left")
shelf_side_left.set_position_orientation([-0.4, 0.0, 0.2], [0, 0, 0, 1])

View File

@ -79,17 +79,17 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the simulator's viewer camera's pose so it points towards the table
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.544888, -0.412084, 1.11569]),
orientation=np.array([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
position=th.Tensor([0.544888, -0.412084, 1.11569]),
orientation=th.Tensor([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
)
# Let apple settle
for _ in range(50):
env.step(np.array([]))
env.step(th.Tensor([]))
knife.keep_still()
knife.set_position_orientation(
position=apple.get_position() + np.array([-0.15, 0.0, 0.2]),
position=apple.get_position() + th.Tensor([-0.15, 0.0, 0.2]),
orientation=T.euler2quat([-np.pi / 2, 0, 0]),
)
@ -97,7 +97,7 @@ def main(random_selection=False, headless=False, short_exec=False):
# Step simulation for a bit so that apple is diced
for i in range(1000):
env.step(np.array([]))
env.step(th.Tensor([]))
input("Apple has been diced! Press [ENTER] to terminate the demo.")

View File

@ -67,8 +67,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set viewer camera
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.46382895, -2.66703958, 1.22616824]),
orientation=np.array([0.58779174, -0.00231237, -0.00318273, 0.80900271]),
position=th.Tensor([0.46382895, -2.66703958, 1.22616824]),
orientation=th.Tensor([0.58779174, -0.00231237, -0.00318273, 0.80900271]),
)
def print_state():
@ -139,11 +139,11 @@ def main(random_selection=False, headless=False, short_exec=False):
increments = 25
for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]):
obj.root_link.set_particle_positions(ctrl_pts, idxs=indices)
env.step(np.array([]))
env.step(th.Tensor([]))
print_state()
while True:
env.step(np.array([]))
env.step(th.Tensor([]))
print_state()
# Shut down env at the end

View File

@ -38,8 +38,8 @@ def main():
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.0792399, -1.30104, 1.51981]),
orientation=np.array([0.54897692, 0.00110359, 0.00168013, 0.83583509]),
position=th.Tensor([-0.0792399, -1.30104, 1.51981]),
orientation=th.Tensor([0.54897692, 0.00110359, 0.00168013, 0.83583509]),
)
# Make sure necessary object states are included with the stove
@ -48,7 +48,7 @@ def main():
# Take a few steps so that visibility propagates
for _ in range(5):
env.step(np.array([]))
env.step(th.Tensor([]))
# Heat source is off.
print("Heat source is OFF.")
@ -62,33 +62,33 @@ def main():
assert stove.states[object_states.ToggledOn].get_value()
# Need to take a step to update the state.
env.step(np.array([]))
env.step(th.Tensor([]))
# Heat source is on
heat_source_state = stove.states[object_states.HeatSourceOrSink].get_value()
assert heat_source_state
for _ in range(500):
env.step(np.array([]))
env.step(th.Tensor([]))
# Toggle off stove, notify user
input("Heat source will now turn OFF: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(False)
assert not stove.states[object_states.ToggledOn].get_value()
for _ in range(200):
env.step(np.array([]))
env.step(th.Tensor([]))
# Move stove, notify user
input("Heat source is now moving: Press ENTER to continue.")
stove.set_position(np.array([0, 1.0, 0.61]))
stove.set_position(th.Tensor([0, 1.0, 0.61]))
for i in range(100):
env.step(np.array([]))
env.step(th.Tensor([]))
# Toggle on stove again, notify user
input("Heat source will now turn ON: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(True)
assert stove.states[object_states.ToggledOn].get_value()
for i in range(500):
env.step(np.array([]))
env.step(th.Tensor([]))
# Shutdown environment at end
env.close()

View File

@ -30,7 +30,7 @@ def main():
name=f"bowl{i}",
category="bowl",
model="ajzltc",
bounding_box=np.array([0.329, 0.293, 0.168]) * scale,
bounding_box=th.Tensor([0.329, 0.293, 0.168]) * scale,
abilities={"heatable": {}},
position=[x, 0, 0.2],
)
@ -49,8 +49,8 @@ def main():
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.182103, -2.07295, 0.14017]),
orientation=np.array([0.77787037, 0.00267566, 0.00216149, 0.62841535]),
position=th.Tensor([0.182103, -2.07295, 0.14017]),
orientation=th.Tensor([0.77787037, 0.00267566, 0.00216149, 0.62841535]),
)
# Dim the skybox so we can see the bowls' steam effectively
@ -76,7 +76,7 @@ def main():
# Heated.
for obj in objs:
obj.states[object_states.Temperature].set_value(50)
env.step(np.array([]))
env.step(th.Tensor([]))
report_states(objs)
# Take a look at the steam effect.
@ -84,7 +84,7 @@ def main():
print("==== Objects are now heated... ====")
print()
for _ in range(2000):
env.step(np.array([]))
env.step(th.Tensor([]))
# Also print temperatures
temps = [f"{obj.states[object_states.Temperature].get_value():>7.2f}" for obj in objs]
print(f"obj temps:", *temps, end="\r")

View File

@ -56,8 +56,8 @@ def main():
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([1.7789, -1.68822, 1.13551]),
orientation=np.array([0.57065614, 0.20331904, 0.267029, 0.74947212]),
position=th.Tensor([1.7789, -1.68822, 1.13551]),
orientation=th.Tensor([0.57065614, 0.20331904, 0.267029, 0.74947212]),
)
# Grab reference to object of interest
@ -72,7 +72,7 @@ def main():
def report_states():
# Make sure states are propagated before printing
for i in range(5):
env.step(np.array([]))
env.step(th.Tensor([]))
print("=" * 20)
print("temperature:", obj.states[object_states.Temperature].get_value())

View File

@ -74,29 +74,29 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.42246569, -0.34745704, 1.56810353]),
orientation=np.array([0.50083786, -0.10407796, -0.17482619, 0.84128772]),
position=th.Tensor([-0.42246569, -0.34745704, 1.56810353]),
orientation=th.Tensor([0.50083786, -0.10407796, -0.17482619, 0.84128772]),
)
# Let objects settle
for _ in range(10):
env.step(np.array([]))
env.step(th.Tensor([]))
# Turn on the stove
stove.states[object_states.ToggledOn].set_value(True)
# The first apple will be affected by the stove
apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.11, 0, 0.1]))
apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.Tensor([0.11, 0, 0.1]))
# The second apple will NOT be affected by the stove, but will be affected by the first apple once it's on fire.
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.32, 0, 0.1]))
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.Tensor([0.32, 0, 0.1]))
steps = 0
max_steps = -1 if not short_exec else 1000
# Main recording loop
while steps != max_steps:
env.step(np.array([]))
env.step(th.Tensor([]))
temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples]
print(f"{'Apple temperature:':<20}", *temps, end="\r")
steps += 1

View File

@ -56,8 +56,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.88215526, -1.40086216, 2.00311063]),
orientation=np.array([0.42013364, 0.12342107, 0.25339685, 0.86258043]),
position=th.Tensor([0.88215526, -1.40086216, 2.00311063]),
orientation=th.Tensor([0.42013364, 0.12342107, 0.25339685, 0.86258043]),
)
max_steps = 100 if short_exec else -1
@ -67,7 +67,7 @@ def main(random_selection=False, headless=False, short_exec=False):
while steps != max_steps:
print(f"Overlaid {carpet.states[Overlaid].get_value(breakfast_table)} ", end="\r")
env.step(np.array([]))
env.step(th.Tensor([]))
# Shut down env at the end
env.close()

View File

@ -72,7 +72,7 @@ def main(random_selection=False, headless=False, short_exec=False):
# Either Cone or Cylinder; shape of the projection where particles can be applied / removed
"type": "Cone",
# Size of the cone
"extents": np.array([0.1875, 0.1875, 0.375]),
"extents": th.Tensor([0.1875, 0.1875, 0.375]),
},
}
@ -129,8 +129,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set the viewer camera appropriately
og.sim.viewer_camera.set_position_orientation(
position=np.array([-1.61340969, -1.79803028, 2.53167412]),
orientation=np.array([0.46291845, -0.12381886, -0.22679218, 0.84790371]),
position=th.Tensor([-1.61340969, -1.79803028, 2.53167412]),
orientation=th.Tensor([0.46291845, -0.12381886, -0.22679218, 0.84790371]),
)
# If we're using a projection volume, we manually add in the required metalink required in order to use the volume
@ -163,12 +163,12 @@ def main(random_selection=False, headless=False, short_exec=False):
modifier._post_load()
env.scene.object_registry.add(modifier)
og.sim.post_import_object(modifier)
modifier.set_position(np.array([0, 0, 5.0]))
modifier.set_position(th.Tensor([0, 0, 5.0]))
# Play the simulator and take some environment steps to let the objects settle
og.sim.play()
for _ in range(25):
env.step(np.array([]))
env.step(th.Tensor([]))
# If we're removing particles, set the table's covered state to be True
if modifier_type == "particleRemover":
@ -176,7 +176,7 @@ def main(random_selection=False, headless=False, short_exec=False):
# Take a few steps to let particles settle
for _ in range(25):
env.step(np.array([]))
env.step(th.Tensor([]))
# Enable camera teleoperation for convenience
og.sim.enable_viewer_camera_teleoperation()
@ -193,21 +193,21 @@ def main(random_selection=False, headless=False, short_exec=False):
z = 1.22
modifier.keep_still()
modifier.set_position_orientation(
position=np.array([0, 0.3, z]),
orientation=np.array([0, 0, 0, 1.0]),
position=th.Tensor([0, 0.3, z]),
orientation=th.Tensor([0, 0, 0, 1.0]),
)
# Move object in square around table
deltas = [
[130, np.array([-0.01, 0, 0])],
[60, np.array([0, -0.01, 0])],
[130, np.array([0.01, 0, 0])],
[60, np.array([0, 0.01, 0])],
[130, th.Tensor([-0.01, 0, 0])],
[60, th.Tensor([0, -0.01, 0])],
[130, th.Tensor([0.01, 0, 0])],
[60, th.Tensor([0, 0.01, 0])],
]
for t, delta in deltas:
for i in range(t):
modifier.set_position(modifier.get_position() + delta)
env.step(np.array([]))
env.step(th.Tensor([]))
# Always shut down environment at the end
env.close()

View File

@ -69,19 +69,19 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to ideal angle for viewing objects
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.37860532, -0.65396566, 1.4067066]),
orientation=np.array([0.49909498, 0.15201752, 0.24857062, 0.81609284]),
position=th.Tensor([0.37860532, -0.65396566, 1.4067066]),
orientation=th.Tensor([0.49909498, 0.15201752, 0.24857062, 0.81609284]),
)
# Take a few steps to let the objects settle, and then turn on the sink
for _ in range(10):
env.step(np.array([])) # Empty action since no robots are in the scene
env.step(th.Tensor([])) # Empty action since no robots are in the scene
sink = env.scene.object_registry("name", "sink")
assert sink.states[object_states.ToggledOn].set_value(True)
# Take a step, and save the state
env.step(np.array([]))
env.step(th.Tensor([]))
initial_state = og.sim.dump_state()
# Main simulation loop.
@ -95,7 +95,7 @@ def main(random_selection=False, headless=False, short_exec=False):
steps = 0
while steps != max_steps:
steps += 1
env.step(np.array([]))
env.step(th.Tensor([]))
og.log.info("Max steps reached; resetting.")
# Reset to the initial state

View File

@ -49,7 +49,7 @@ def main(random_selection=False, headless=False, short_exec=False):
name=f"plate{i}",
category="plate",
model="iawoof",
bounding_box=np.array([0.20, 0.20, 0.05]),
bounding_box=th.Tensor([0.20, 0.20, 0.05]),
)
for i in range(2)
]
@ -70,7 +70,7 @@ def main(random_selection=False, headless=False, short_exec=False):
name=f"shelf",
category="shelf",
model="pkgbcp",
bounding_box=np.array([1.0, 0.4, 2.0]),
bounding_box=th.Tensor([1.0, 0.4, 2.0]),
)
box_cfgs = [
@ -79,7 +79,7 @@ def main(random_selection=False, headless=False, short_exec=False):
name=f"box{i}",
category="box_of_crackers",
model="cmdigf",
bounding_box=np.array([0.2, 0.05, 0.3]),
bounding_box=th.Tensor([0.2, 0.05, 0.3]),
)
for i in range(5)
]
@ -111,7 +111,7 @@ def main(random_selection=False, headless=False, short_exec=False):
max_steps = 100 if short_exec else -1
step = 0
while step != max_steps:
env.step(np.array([]))
env.step(th.Tensor([]))
step += 1
# Always close environment at the end
@ -127,10 +127,10 @@ def sample_microwave_plates_apples(env):
# Place the cabinet at a pre-determined location on the floor
og.log.info("Placing cabinet on the floor...")
cabinet.set_orientation([0, 0, 0, 1.0])
env.step(np.array([]))
env.step(th.Tensor([]))
offset = cabinet.get_position()[2] - cabinet.aabb_center[2]
cabinet.set_position(np.array([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset)
env.step(np.array([]))
cabinet.set_position(th.Tensor([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset)
env.step(th.Tensor([]))
# Set microwave on top of the cabinet, open it, and step 100 times
og.log.info("Placing microwave OnTop of the cabinet...")
@ -138,7 +138,7 @@ def sample_microwave_plates_apples(env):
assert microwave.states[object_states.Open].set_value(True)
og.log.info("Microwave placed.")
for _ in range(50):
env.step(np.array([]))
env.step(th.Tensor([]))
og.log.info("Placing plates")
n_apples_per_plate = int(len(apples) / len(plates))
@ -153,7 +153,7 @@ def sample_microwave_plates_apples(env):
og.log.info(f"Plate {i} placed.")
for _ in range(50):
env.step(np.array([]))
env.step(th.Tensor([]))
og.log.info(f"Placing {n_apples_per_plate} apples OnTop of the plate...")
for j in range(n_apples_per_plate):
@ -162,7 +162,7 @@ def sample_microwave_plates_apples(env):
assert apple.states[object_states.OnTop].set_value(plate, True)
og.log.info(f"Apple {apple_idx} placed.")
for _ in range(50):
env.step(np.array([]))
env.step(th.Tensor([]))
def sample_boxes_on_shelf(env):
@ -171,14 +171,14 @@ def sample_boxes_on_shelf(env):
# Place the shelf at a pre-determined location on the floor
og.log.info("Placing shelf on the floor...")
shelf.set_orientation([0, 0, 0, 1.0])
env.step(np.array([]))
env.step(th.Tensor([]))
offset = shelf.get_position()[2] - shelf.aabb_center[2]
shelf.set_position(np.array([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset)
env.step(np.array([])) # One step is needed for the object to be fully initialized
shelf.set_position(th.Tensor([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset)
env.step(th.Tensor([])) # One step is needed for the object to be fully initialized
og.log.info("Shelf placed.")
for _ in range(50):
env.step(np.array([]))
env.step(th.Tensor([]))
og.log.info("Placing boxes...")
for i, box in enumerate(boxes):
@ -186,7 +186,7 @@ def sample_boxes_on_shelf(env):
og.log.info(f"Box {i} placed.")
for _ in range(50):
env.step(np.array([]))
env.step(th.Tensor([]))
if __name__ == "__main__":

View File

@ -77,17 +77,17 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the simulator's viewer camera's pose so it points towards the table
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.544888, -0.412084, 1.11569]),
orientation=np.array([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
position=th.Tensor([0.544888, -0.412084, 1.11569]),
orientation=th.Tensor([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
)
# Let apple settle
for _ in range(50):
env.step(np.array([]))
env.step(th.Tensor([]))
knife.keep_still()
knife.set_position_orientation(
position=apple.get_position() + np.array([-0.15, 0.0, 0.2]),
position=apple.get_position() + th.Tensor([-0.15, 0.0, 0.2]),
orientation=T.euler2quat([-np.pi / 2, 0, 0]),
)
@ -95,7 +95,7 @@ def main(random_selection=False, headless=False, short_exec=False):
# Step simulation for a bit so that apple is sliced
for i in range(1000):
env.step(np.array([]))
env.step(th.Tensor([]))
input("Apple has been sliced! Press [ENTER] to terminate the demo.")

View File

@ -132,13 +132,13 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.46938863, -3.97887141, 1.64106008]),
orientation=np.array([0.63311689, 0.00127259, 0.00155577, 0.77405359]),
position=th.Tensor([0.46938863, -3.97887141, 1.64106008]),
orientation=th.Tensor([0.63311689, 0.00127259, 0.00155577, 0.77405359]),
)
# Let objects settle
for _ in range(25):
env.step(np.array([]))
env.step(th.Tensor([]))
# Turn on all scene objects
stove.states[object_states.ToggledOn].set_value(True)
@ -149,7 +149,7 @@ def main(random_selection=False, headless=False, short_exec=False):
for apple in apples:
apple.states[object_states.Temperature].set_value(-50)
apples[0].states[object_states.Inside].set_value(oven, True)
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0, 0, 0.1]))
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.Tensor([0, 0, 0.1]))
apples[2].states[object_states.OnTop].set_value(tray, True)
apples[3].states[object_states.Inside].set_value(fridge, True)
apples[4].states[object_states.Inside].set_value(microwave, True)
@ -162,7 +162,7 @@ def main(random_selection=False, headless=False, short_exec=False):
print()
print(f"{'Apple location:':<20}", *locations)
while steps != max_steps:
env.step(np.array([]))
env.step(th.Tensor([]))
temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples]
print(f"{'Apple temperature:':<20}", *temps, end="\r")
steps += 1

View File

@ -52,8 +52,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=np.array([-4.62785, -0.418575, 0.933943]),
orientation=np.array([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
position=th.Tensor([-4.62785, -0.418575, 0.933943]),
orientation=th.Tensor([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
)
# Add bounding boxes to camera sensor
@ -63,7 +63,7 @@ def main(random_selection=False, headless=False, short_exec=False):
# Take a few steps to let objects settle
for i in range(100):
env.step(np.array([]))
env.step(th.Tensor([]))
# Grab observations from viewer camera and write them to disk
obs, _ = cam.get_obs()

View File

@ -29,7 +29,7 @@ def main(random_selection=False, headless=False, short_exec=False):
highlighted = False
max_steps = -1 if not short_exec else 1000
while i != max_steps:
env.step(np.array([]))
env.step(th.Tensor([]))
if i % 50 == 0:
highlighted = not highlighted
og.log.info(f"Toggling window highlight to: {highlighted}")

View File

@ -61,13 +61,13 @@ def main(random_selection=False, headless=False, short_exec=False):
# Place the object so it rests on the floor
obj = env.scene.object_registry("name", "obj")
center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, obj.aabb_extent[2] / 2.0])
center_offset = obj.get_position() - obj.aabb_center + th.Tensor([0, 0, obj.aabb_extent[2] / 2.0])
obj.set_position(center_offset)
# Step through the environment
max_steps = 100 if short_exec else 10000
for i in range(max_steps):
env.step(np.array([]))
env.step(th.Tensor([]))
# Always close the environment at the end
env.close()

View File

@ -94,8 +94,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.00913503, -1.95750906, 1.36407314]),
orientation=np.array([0.6350064, 0.0, 0.0, 0.77250687]),
position=th.Tensor([-0.00913503, -1.95750906, 1.36407314]),
orientation=th.Tensor([0.6350064, 0.0, 0.0, 0.77250687]),
)
# Grab the object references
@ -107,10 +107,10 @@ def main(random_selection=False, headless=False, short_exec=False):
og.sim.stop()
obj.scale = (np.ones(3) / extents).min()
og.sim.play()
env.step(np.array([]))
env.step(th.Tensor([]))
# Move the object so that its center is at [0, 0, 1]
center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, 1.0])
center_offset = obj.get_position() - obj.aabb_center + th.Tensor([0, 0, 1.0])
obj.set_position(center_offset)
# Allow the user to easily move the camera around
@ -122,7 +122,7 @@ def main(random_selection=False, headless=False, short_exec=False):
max_steps = 100 if short_exec else 10000
for i in range(max_steps):
z_angle = 2 * np.pi * (i % steps_per_rotate) / steps_per_rotate
quat = T.euler2quat(np.array([0, 0, z_angle]))
quat = T.euler2quat(th.Tensor([0, 0, z_angle]))
pos = T.quat2mat(quat) @ center_offset
if obj.n_dof > 0:
frac = (i % steps_per_joint) / steps_per_joint
@ -130,7 +130,7 @@ def main(random_selection=False, headless=False, short_exec=False):
obj.set_joint_positions(positions=j_frac * np.ones(obj.n_dof), normalized=True, drive=False)
obj.keep_still()
obj.set_position_orientation(position=pos, orientation=quat)
env.step(np.array([]))
env.step(th.Tensor([]))
# Shut down at the end
og.shutdown()

View File

@ -44,13 +44,13 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=np.array([-4.62785, -0.418575, 0.933943]),
orientation=np.array([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
position=th.Tensor([-4.62785, -0.418575, 0.933943]),
orientation=th.Tensor([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
)
def steps(n):
for _ in range(n):
env.step(np.array([]))
env.step(th.Tensor([]))
# Take a few steps to let objects settle
steps(25)

View File

@ -59,14 +59,14 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the viewer camera's pose so that it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([4.32248, -5.74338, 6.85436]),
orientation=np.array([0.39592, 0.13485, 0.29286, 0.85982]),
position=th.Tensor([4.32248, -5.74338, 6.85436]),
orientation=th.Tensor([0.39592, 0.13485, 0.29286, 0.85982]),
)
robot = env.robots[0]
# Set robot base at the origin
robot.set_position_orientation(np.array([0, 0, 0]), np.array([0, 0, 0, 1]))
robot.set_position_orientation(th.Tensor([0, 0, 0]), th.Tensor([0, 0, 0, 1]))
# At least one simulation step while the simulator is playing must occur for the robot (or in general, any object)
# to be fully initialized after it is imported into the simulator
og.sim.play()
@ -179,12 +179,12 @@ def main(random_selection=False, headless=False, short_exec=False):
def input_to_xyz_delta_command(inp, delta=0.01):
mapping = {
lazy.carb.input.KeyboardInput.W: np.array([delta, 0, 0]),
lazy.carb.input.KeyboardInput.S: np.array([-delta, 0, 0]),
lazy.carb.input.KeyboardInput.DOWN: np.array([0, 0, -delta]),
lazy.carb.input.KeyboardInput.UP: np.array([0, 0, delta]),
lazy.carb.input.KeyboardInput.A: np.array([0, delta, 0]),
lazy.carb.input.KeyboardInput.D: np.array([0, -delta, 0]),
lazy.carb.input.KeyboardInput.W: th.Tensor([delta, 0, 0]),
lazy.carb.input.KeyboardInput.S: th.Tensor([-delta, 0, 0]),
lazy.carb.input.KeyboardInput.DOWN: th.Tensor([0, 0, -delta]),
lazy.carb.input.KeyboardInput.UP: th.Tensor([0, 0, delta]),
lazy.carb.input.KeyboardInput.A: th.Tensor([0, delta, 0]),
lazy.carb.input.KeyboardInput.D: th.Tensor([0, -delta, 0]),
}
return mapping.get(inp)

View File

@ -44,8 +44,8 @@ def main(random_selection=False, headless=False, short_exec=False):
if not headless:
# Set viewer in front facing robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([2.69918369, -3.63686664, 4.57894564]),
orientation=np.array([0.39592411, 0.1348514, 0.29286304, 0.85982]),
position=th.Tensor([2.69918369, -3.63686664, 4.57894564]),
orientation=th.Tensor([0.39592411, 0.1348514, 0.29286304, 0.85982]),
)
og.sim.enable_viewer_camera_teleoperation()

View File

@ -92,8 +92,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([-2.39951, 2.26469, 2.66227]),
orientation=np.array([-0.23898481, 0.48475231, 0.75464013, -0.37204802]),
position=th.Tensor([-2.39951, 2.26469, 2.66227]),
orientation=th.Tensor([-0.23898481, 0.48475231, 0.75464013, -0.37204802]),
)
# Create teleop controller

View File

@ -125,8 +125,8 @@ def main(random_selection=False, headless=False, short_exec=False, quickstart=Fa
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([1.46949, -3.97358, 2.21529]),
orientation=np.array([0.56829048, 0.09569975, 0.13571846, 0.80589577]),
position=th.Tensor([1.46949, -3.97358, 2.21529]),
orientation=th.Tensor([0.56829048, 0.09569975, 0.13571846, 0.80589577]),
)
# Reset environment and robot

View File

@ -82,7 +82,7 @@ def main(random_selection=False, headless=False, short_exec=False):
KeyboardEventHandler.add_keyboard_callback(
key=lazy.carb.input.KeyboardInput.J,
callback_fn=lambda: cam_mover.record_trajectory_from_waypoints(
waypoints=np.array(waypoints),
waypoints=th.Tensor(waypoints),
per_step_distance=0.02,
fps=30,
steps_per_frame=1,

View File

@ -26,7 +26,7 @@ def main(random_selection=False, headless=False, short_exec=False):
trav_map_erosion = 2
trav_map = Image.open(os.path.join(get_og_scene_path(scene_model), "layout", "floor_trav_0.png"))
trav_map = np.array(trav_map.resize((trav_map_size, trav_map_size)))
trav_map = th.Tensor(trav_map.resize((trav_map_size, trav_map_size)))
trav_map = cv2.erode(trav_map, np.ones((trav_map_erosion, trav_map_erosion)))
if not headless:

View File

@ -36,8 +36,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set the camera to a good angle
def set_camera_pose():
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.229375, -3.40576, 7.26143]),
orientation=np.array([0.27619733, -0.00230233, -0.00801152, 0.9610648]),
position=th.Tensor([-0.229375, -3.40576, 7.26143]),
orientation=th.Tensor([0.27619733, -0.00230233, -0.00801152, 0.9610648]),
)
set_camera_pose()

View File

@ -55,4 +55,4 @@ class BaseMap:
xy: 2D location in world reference frame (metric)
:return: 2D location in map reference frame (image)
"""
return np.flip((np.array(xy) / self.map_resolution + self.map_size / 2.0)).astype(int)
return np.flip((th.Tensor(xy) / self.map_resolution + self.map_size / 2.0)).astype(int)

View File

@ -62,8 +62,8 @@ class SegmentationMap(BaseMap):
assert height == width, "room seg map is not a square"
assert img_ins.size == img_sem.size, "semantic and instance seg maps have different sizes"
map_size = int(height * self.map_default_resolution / self.map_resolution)
img_ins = np.array(img_ins.resize((map_size, map_size), Image.NEAREST))
img_sem = np.array(img_sem.resize((map_size, map_size), Image.NEAREST))
img_ins = th.Tensor(img_ins.resize((map_size, map_size), Image.NEAREST))
img_sem = th.Tensor(img_sem.resize((map_size, map_size), Image.NEAREST))
room_categories = os.path.join(gm.DATASET_PATH, "metadata", "room_categories.txt")
with open(room_categories, "r") as fp:
@ -122,14 +122,14 @@ class SegmentationMap(BaseMap):
return None, None
sem_id = self.room_sem_name_to_sem_id[room_type]
valid_idx = np.array(np.where(self.room_sem_map == sem_id))
valid_idx = th.Tensor(np.where(self.room_sem_map == sem_id))
random_point_map = valid_idx[:, np.random.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
return floor, th.Tensor([x, y, z])
def get_random_point_by_room_instance(self, room_instance):
"""
@ -148,14 +148,14 @@ class SegmentationMap(BaseMap):
return None, None
ins_id = self.room_ins_name_to_ins_id[room_instance]
valid_idx = np.array(np.where(self.room_ins_map == ins_id))
valid_idx = th.Tensor(np.where(self.room_ins_map == ins_id))
random_point_map = valid_idx[:, np.random.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
return floor, th.Tensor([x, y, z])
def get_room_type_by_point(self, xy):
"""

View File

@ -76,9 +76,9 @@ class TraversableMap(BaseMap):
for floor in range(len(self.floor_heights)):
if self.trav_map_with_objects:
# TODO: Shouldn't this be generated dynamically?
trav_map = np.array(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor))))
trav_map = th.Tensor(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor))))
else:
trav_map = np.array(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
trav_map = th.Tensor(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
# If we do not initialize the original size of the traversability map, we obtain it from the image
# Then, we compute the final map size as the factor of scaling (default_resolution/resolution) times the
@ -156,10 +156,10 @@ class TraversableMap(BaseMap):
else:
trav_space = np.where(trav_map == 255)
idx = np.random.randint(0, high=trav_space[0].shape[0])
xy_map = np.array([trav_space[0][idx], trav_space[1][idx]])
xy_map = th.Tensor([trav_space[0][idx], trav_space[1][idx]])
x, y = self.map_to_world(xy_map)
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
return floor, th.Tensor([x, y, z])
def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None):
"""

View File

@ -111,7 +111,7 @@ def compute_adjacencies(obj, axes, max_distance, use_aabb_center=True):
# Check for self-hit -- if so, record the position and terminate early
should_continue = True
if hit.rigid_body in obj_link_paths:
ray_starts[idx] = np.array(hit.position)
ray_starts[idx] = th.Tensor(hit.position)
should_continue = False
return should_continue
@ -165,7 +165,7 @@ class VerticalAdjacency(AbsoluteObjectState):
def _get_value(self):
# Call the adjacency computation with th Z axis.
bodies_by_axis = compute_adjacencies(
self.obj, np.array([[0, 0, 1]]), m.MAX_DISTANCE_VERTICAL, use_aabb_center=False
self.obj, th.Tensor([[0, 0, 1]]), m.MAX_DISTANCE_VERTICAL, use_aabb_center=False
)
# Return the adjacencies from the only axis we passed in.

View File

@ -307,7 +307,7 @@ class AttachedTo(
if joint_type == JointType.JOINT_FIXED:
# FixedJoint: the parent link, the child link and the joint frame all align.
parent_local_quat = np.array([0.0, 0.0, 0.0, 1.0])
parent_local_quat = th.Tensor([0.0, 0.0, 0.0, 1.0])
else:
# SphericalJoint: the same except that the rotation of the parent link doesn't align with the joint frame.
# The child link and the joint frame still align.
@ -338,7 +338,7 @@ class AttachedTo(
joint_frame_in_parent_frame_pos=np.zeros(3),
joint_frame_in_parent_frame_quat=parent_local_quat,
joint_frame_in_child_frame_pos=np.zeros(3),
joint_frame_in_child_frame_quat=np.array([0.0, 0.0, 0.0, 1.0]),
joint_frame_in_child_frame_quat=th.Tensor([0.0, 0.0, 0.0, 1.0]),
**kwargs,
)
@ -436,7 +436,7 @@ class AttachedTo(
log.warning(f"parent reference is not updated after attachment")
def serialize(self, state):
return np.array([state["attached_obj_uuid"]], dtype=float)
return th.Tensor([state["attached_obj_uuid"]], dtype=float)
def deserialize(self, state):
return dict(attached_obj_uuid=int(state[0])), 1

View File

@ -19,8 +19,8 @@ m.VISUAL_PARTICLE_OFFSET = 0.01 # Offset to visual particles' poses when checki
"""
ContainedParticlesData contains the following fields:
n_in_volume (int): number of particles in the container volume
positions (np.array): (N, 3) array representing the raw global particle positions
in_volume (np.array): (N,) boolean array representing whether each particle is inside the container volume or not
positions (th.Tensor): (N, 3) array representing the raw global particle positions
in_volume (th.Tensor): (N,) boolean array representing whether each particle is inside the container volume or not
"""
ContainedParticlesData = namedtuple("ContainedParticlesData", ("n_in_volume", "positions", "in_volume"))
@ -49,16 +49,16 @@ class ContainedParticles(RelativeObjectState, LinkBasedStateMixin):
Returns:
ContainedParticlesData: namedtuple with the following keys:
- n_in_volume (int): Number of @system's particles inside this object's container volume
- positions (np.array): (N, 3) Particle positions of all @system's particles
- in_volume (np.array): (N,) boolean array, True if the corresponding particle is inside this
- positions (th.Tensor): (N, 3) Particle positions of all @system's particles
- in_volume (th.Tensor): (N,) boolean array, True if the corresponding particle is inside this
object's container volume, else False
"""
# Value is false by default
n_particles_in_volume, raw_positions, checked_positions, particles_in_volume = (
0,
np.array([]),
np.array([]),
np.array([]),
th.Tensor([]),
th.Tensor([]),
th.Tensor([]),
)
# Only run additional computations if there are any particles

View File

@ -60,7 +60,7 @@ class FoldedLevel(AbsoluteObjectState, ClothStateMixin):
normals = cloth.compute_face_normals(face_ids=cloth.keyface_idx)
# projection onto the z-axis
proj = np.abs(np.dot(normals, np.array([0.0, 0.0, 1.0])))
proj = np.abs(np.dot(normals, th.Tensor([0.0, 0.0, 1.0])))
percentage = np.mean(proj > np.cos(m.NORMAL_Z_ANGLE_DIFF))
return percentage

View File

@ -225,7 +225,7 @@ class HeatSourceOrSink(AbsoluteObjectState, LinkBasedStateMixin, UpdateStateMixi
og.sim.psqi.overlap_box(
halfExtent=half_extent,
pos=aabb_center,
rot=np.array([0, 0, 0, 1.0]),
rot=th.Tensor([0, 0, 0, 1.0]),
reportFn=overlap_callback,
)

View File

@ -25,7 +25,7 @@ class MaxTemperature(TensorizedValueState):
super().global_initialize()
# Initialize other global variables
cls.TEMPERATURE_IDXS = np.array([], dtype=int)
cls.TEMPERATURE_IDXS = th.Tensor([], dtype=int)
# Add global callback to Temperature state so that temperature idxs will be updated
def _update_temperature_idxs(obj):

View File

@ -31,7 +31,7 @@ class NextTo(KinematicsMixin, RelativeObjectState, BooleanStateMixin):
glb = max(objA_lower[dim], objB_lower[dim])
lub = min(objA_upper[dim], objB_upper[dim])
distance_vec.append(max(0, glb - lub))
distance = np.linalg.norm(np.array(distance_vec))
distance = np.linalg.norm(th.Tensor(distance_vec))
objA_dims = objA_upper - objA_lower
objB_dims = objB_upper - objB_lower
avg_aabb_length = np.mean(objA_dims + objB_dims)

View File

@ -62,7 +62,7 @@ class Overlaid(KinematicsMixin, RelativeObjectState, BooleanStateMixin):
# Compute the base aligned bounding box of the rigid object.
bbox_center, bbox_orn, bbox_extent, _ = other.get_base_aligned_bbox(xy_aligned=True)
vertices_local = np.array(list(itertools.product((1, -1), repeat=3))) * (bbox_extent / 2)
vertices_local = th.Tensor(list(itertools.product((1, -1), repeat=3))) * (bbox_extent / 2)
vertices = trimesh.transformations.transform_points(vertices_local, T.pose2mat((bbox_center, bbox_orn)))
rigid_hull = ConvexHull(vertices[:, :2])

View File

@ -374,7 +374,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
if self._projection_mesh_params is None:
self._projection_mesh_params = {
"type": mesh_type,
"extents": np.array(pre_existing_mesh.GetAttribute("xformOp:scale").Get()),
"extents": th.Tensor(pre_existing_mesh.GetAttribute("xformOp:scale").Get()),
}
# Otherwise, make sure we don't have a mismatch between the pre-existing shape type and the
# desired type since we can't delete the original mesh
@ -403,7 +403,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
), f"Projection mesh should have shape-based attribute {shape_attr} == {default_val}! Got: {val}"
# Set the scale based on projection mesh params
self.projection_mesh.scale = np.array(self._projection_mesh_params["extents"])
self.projection_mesh.scale = th.Tensor(self._projection_mesh_params["extents"])
# Make sure the object updates its meshes, and assert that there's only a single visual mesh
self.link.update_meshes()
@ -420,7 +420,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
)
self.projection_mesh.set_local_pose(
position=np.array([0, 0, -z_offset]),
position=th.Tensor([0, 0, -z_offset]),
orientation=T.euler2quat([0, 0, 0]),
)
@ -457,7 +457,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
og.sim.psqi.overlap_box(
halfExtent=(aabb[1] - aabb[0]) / 2.0 + m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN,
pos=(aabb[1] + aabb[0]) / 2.0,
rot=np.array([0, 0, 0, 1.0]),
rot=th.Tensor([0, 0, 0, 1.0]),
reportFn=overlap_callback,
)
return valid_hit
@ -514,8 +514,8 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
cond = (
lambda obj: (
np.dot(
T.quat2mat(obj.states[self.__class__].link.get_orientation()) @ np.array([0, 0, 1]),
np.array([0, 0, 1]),
T.quat2mat(obj.states[self.__class__].link.get_orientation()) @ th.Tensor([0, 0, 1]),
th.Tensor([0, 0, 1]),
)
> 0
)
@ -751,7 +751,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
self._current_step = state["current_step"]
def serialize(self, state):
return np.array([state["current_step"]], dtype=float)
return th.Tensor([state["current_step"]], dtype=float)
def deserialize(self, state):
current_step = int(state[0])
@ -898,7 +898,7 @@ class ParticleRemover(ParticleModifier):
inbound_idxs = (
self._check_in_mesh(system.get_particles_position_orientation()[0]).nonzero()[0]
if self.obj.prim_type == PrimType.CLOTH or self.method == ParticleModifyMethod.PROJECTION
else np.array(list(self.obj.states[ContactParticles].get_value(system, self.link)))
else th.Tensor(list(self.obj.states[ContactParticles].get_value(system, self.link)))
)
modification_limit = self.physical_particle_modification_limit
@ -1093,7 +1093,7 @@ class ParticleApplier(ParticleModifier):
# metalink, and (b) zero relative orientation between the metalink and the projection mesh
local_pos, local_quat = self.projection_mesh.get_local_pose()
assert np.all(
np.isclose(local_pos + np.array([0, 0, height / 2.0]), 0.0)
np.isclose(local_pos + th.Tensor([0, 0, height / 2.0]), 0.0)
), "Projection mesh tip should align with metalink position!"
assert np.all(
np.isclose(T.quat2euler(local_quat), 0.0)
@ -1147,7 +1147,7 @@ class ParticleApplier(ParticleModifier):
# We now pre-compute local particle positions that are within the projection mesh used to infer spawn pos
# We sample over the entire object AABB, assuming most will be filtered out
sampling_distance = 2 * system.particle_radius
extent = np.array(self._projection_mesh_params["extents"])
extent = th.Tensor(self._projection_mesh_params["extents"])
h = extent[2]
low, high = self.obj.aabb
n_particles_per_axis = ((high - low) / sampling_distance).astype(int)
@ -1303,9 +1303,9 @@ class ParticleApplier(ParticleModifier):
# Generate particles for this group
system.generate_group_particles(
group=group,
positions=np.array(particle_info["positions"]),
orientations=np.array(particle_info["orientations"]),
scales=np.array(particles_info[group]["scales"]),
positions=th.Tensor(particle_info["positions"]),
orientations=th.Tensor(particle_info["orientations"]),
scales=th.Tensor(particles_info[group]["scales"]),
link_prim_paths=particle_info["link_prim_paths"],
)
# Update our particle count
@ -1322,11 +1322,11 @@ class ParticleApplier(ParticleModifier):
velocities = (
None
if self._initial_speed == 0
else -self._initial_speed * np.array([hit[1] for hit in hits[:n_particles]])
else -self._initial_speed * th.Tensor([hit[1] for hit in hits[:n_particles]])
)
system.generate_particles(
scene=self.obj.scene,
positions=np.array([hit[0] for hit in hits[:n_particles]]),
positions=th.Tensor([hit[0] for hit in hits[:n_particles]]),
velocities=velocities,
)
# Update our particle count
@ -1390,7 +1390,7 @@ class ParticleApplier(ParticleModifier):
n_samples = self._get_max_particles_limit_per_step(system=system)
r, h = self._projection_mesh_params["extents"][0] / 2, self._projection_mesh_params["extents"][2]
sampled_r_theta = np.random.rand(n_samples, 2)
sampled_r_theta = sampled_r_theta * np.array([r, np.pi * 2]).reshape(1, 2)
sampled_r_theta = sampled_r_theta * th.Tensor([r, np.pi * 2]).reshape(1, 2)
# Get start, end points in local link frame, start points to end points along the -z direction
end_points = np.stack(
[
@ -1407,7 +1407,7 @@ class ParticleApplier(ParticleModifier):
elif projection_type == "Cylinder":
# All start points are the parallel point for their corresponding end point
# i.e.: (x, y, 0)
start_points = end_points + np.array([0, 0, h]).reshape(1, 3)
start_points = end_points + th.Tensor([0, 0, h]).reshape(1, 3)
else:
# Other types not supported
raise ValueError(f"Unsupported projection mesh type: {projection_type}!")

View File

@ -15,7 +15,7 @@ class Pose(AbsoluteObjectState):
def _get_value(self):
pos = self.obj.get_position()
orn = self.obj.get_orientation()
return np.array(pos), np.array(orn)
return th.Tensor(pos), th.Tensor(orn)
def _has_changed(self, get_value_args, value, info):
# Only changed if the squared distance between old position and current position has

View File

@ -32,7 +32,7 @@ class IsGrasping(RelativeObjectState, BooleanStateMixin, RobotStateMixin):
# robot_pos = robot.get_position()
# object_pos = self.obj.get_position()
# return np.linalg.norm(object_pos - np.array(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD
# return np.linalg.norm(object_pos - th.Tensor(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD
# class InFOVOfRobot(AbsoluteObjectState, BooleanStateMixin):

View File

@ -69,7 +69,7 @@ class ModifiedParticles(RelativeObjectState):
}
def serialize(self, state):
state_flat = np.array([state["n_systems"]], dtype=float)
state_flat = th.Tensor([state["n_systems"]], dtype=float)
if state["n_systems"] > 0:
system_names = tuple(state.keys())[1:]
state_flat = np.concatenate(
@ -183,7 +183,7 @@ class Saturated(RelativeObjectState, BooleanStateMixin):
# We want diffuse_tint to sum to 2.5 to result in the final RGB to sum to 1.5 on average
# This is because an average RGB color sum to 1.5 (i.e. [0.5, 0.5, 0.5])
# (0.5 [original avg RGB per channel] + 0.1 [albedo_add]) * 2.5 = 1.5
diffuse_tint = np.array([0.5, 0.5, 0.5]) + avg_color / np.sum(avg_color)
diffuse_tint = th.Tensor([0.5, 0.5, 0.5]) + avg_color / np.sum(avg_color)
diffuse_tint = diffuse_tint.tolist()
return albedo_add, diffuse_tint
@ -227,7 +227,7 @@ class Saturated(RelativeObjectState, BooleanStateMixin):
self._limits[k] = v
def serialize(self, state):
state_flat = np.array([state["n_systems"], state["default_limit"]], dtype=float)
state_flat = th.Tensor([state["n_systems"], state["default_limit"]], dtype=float)
if state["n_systems"] > 0:
system_names = tuple(state.keys())[2:]
state_flat = np.concatenate(

View File

@ -39,8 +39,8 @@ class SlicerActive(TensorizedValueState, BooleanStateMixin):
# Initialize other global variables
cls.STEPS_TO_WAIT = max(1, int(np.ceil(m.REACTIVATION_DELAY / og.sim.get_rendering_dt())))
cls.DELAY_COUNTER = np.array([], dtype=int)
cls.PREVIOUSLY_TOUCHING = np.array([], dtype=bool)
cls.DELAY_COUNTER = th.Tensor([], dtype=int)
cls.PREVIOUSLY_TOUCHING = th.Tensor([], dtype=bool)
cls.SLICER_LINK_PATHS = []
@classmethod

View File

@ -28,7 +28,7 @@ class TensorizedValueState(AbsoluteObjectState, GlobalUpdateStateMixin):
super().global_initialize()
# Initialize the global variables
cls.VALUES = np.array([], dtype=cls.value_type).reshape(0, *cls.value_shape)
cls.VALUES = th.Tensor([], dtype=cls.value_type).reshape(0, *cls.value_shape)
cls.OBJ_IDXS = dict()
cls.CALLBACKS_ON_REMOVE = dict()
@ -50,10 +50,10 @@ class TensorizedValueState(AbsoluteObjectState, GlobalUpdateStateMixin):
Updates all internally tracked @values for this object state. Should be implemented by subclass.
Args:
values (np.array): Tensorized value array
values (th.Tensor): Tensorized value array
Returns:
np.array: Updated tensorized value array
th.Tensor: Updated tensorized value array
"""
raise NotImplementedError
@ -180,7 +180,7 @@ class TensorizedValueState(AbsoluteObjectState, GlobalUpdateStateMixin):
val = (
state[self.value_name]
if isinstance(state[self.value_name], np.ndarray)
else np.array([state[self.value_name]])
else th.Tensor([state[self.value_name]])
)
return val.flatten().astype(float)

View File

@ -88,7 +88,7 @@ class ToggledOn(AbsoluteObjectState, BooleanStateMixin, LinkBasedStateMixin, Upd
self.value = new_value
# Choose which color to apply to the toggle marker
self.visual_marker.color = np.array([0, 1.0, 0]) if self.value else np.array([1.0, 0, 0])
self.visual_marker.color = th.Tensor([0, 1.0, 0]) if self.value else th.Tensor([1.0, 0, 0])
return True
@ -114,7 +114,7 @@ class ToggledOn(AbsoluteObjectState, BooleanStateMixin, LinkBasedStateMixin, Upd
else:
# Infer radius from mesh if not specified as an input
lazy.omni.isaac.core.utils.bounds.recompute_extents(prim=pre_existing_mesh)
self.scale = np.array(pre_existing_mesh.GetAttribute("xformOp:scale").Get())
self.scale = th.Tensor(pre_existing_mesh.GetAttribute("xformOp:scale").Get())
# Create the visual geom instance referencing the generated mesh prim
relative_prim_path = absolute_prim_path_to_scene_relative(self.obj.scene, mesh_prim_path)
@ -190,7 +190,7 @@ class ToggledOn(AbsoluteObjectState, BooleanStateMixin, LinkBasedStateMixin, Upd
self.robot_can_toggle_steps = state["hand_in_marker_steps"]
def serialize(self, state):
return np.array([state["value"], state["hand_in_marker_steps"]], dtype=float)
return th.Tensor([state["value"], state["hand_in_marker_steps"]], dtype=float)
def deserialize(self, state):
return dict(value=bool(state[0]), hand_in_marker_steps=int(state[1])), 2

View File

@ -79,7 +79,7 @@ class ControllableObject(BaseObject):
# Store inputs
self._control_freq = control_freq
self._controller_config = controller_config
self._reset_joint_pos = None if reset_joint_pos is None else np.array(reset_joint_pos)
self._reset_joint_pos = None if reset_joint_pos is None else th.Tensor(reset_joint_pos)
# Make sure action type is valid, and also save
assert_valid_key(key=action_type, valid_keys={"discrete", "continuous"}, name="action type")
@ -301,8 +301,8 @@ class ControllableObject(BaseObject):
low, high = [], []
for controller in self._controllers.values():
limits = controller.command_input_limits
low.append(np.array([-np.inf] * controller.command_dim) if limits is None else limits[0])
high.append(np.array([np.inf] * controller.command_dim) if limits is None else limits[1])
low.append(th.Tensor([-np.inf] * controller.command_dim) if limits is None else limits[0])
high.append(th.Tensor([np.inf] * controller.command_dim) if limits is None else limits[1])
return gym.spaces.Box(shape=(self.action_dim,), low=np.concatenate(low), high=np.concatenate(high), dtype=float)
@ -320,7 +320,7 @@ class ControllableObject(BaseObject):
# If we're using discrete action space, we grab the specific action and use that to convert to control
if self._action_type == "discrete":
action = np.array(self.discrete_action_list[action])
action = th.Tensor(self.discrete_action_list[action])
# Check if the input action's length matches the action dimension
assert len(action) == self.action_dim, "Action must be dimension {}, got dim {} instead.".format(
@ -376,7 +376,7 @@ class ControllableObject(BaseObject):
# Compose controls
u_vec = np.zeros(self.n_dof)
# By default, the control type is None and the control value is 0 (np.zeros) - i.e. no control applied
u_type_vec = np.array([ControlType.NONE] * self.n_dof)
u_type_vec = th.Tensor([ControlType.NONE] * self.n_dof)
for group, ctrl in control.items():
idx = self._controllers[group].dof_idx
u_vec[idx] = ctrl["value"]
@ -507,15 +507,15 @@ class ControllableObject(BaseObject):
# set the targets for joints
if using_pos:
ControllableObjectViewAPI.set_joint_position_targets(
self.articulation_root_path, positions=np.array(pos_vec), indices=np.array(pos_idxs)
self.articulation_root_path, positions=th.Tensor(pos_vec), indices=th.Tensor(pos_idxs)
)
if using_vel:
ControllableObjectViewAPI.set_joint_velocity_targets(
self.articulation_root_path, velocities=np.array(vel_vec), indices=np.array(vel_idxs)
self.articulation_root_path, velocities=th.Tensor(vel_vec), indices=th.Tensor(vel_idxs)
)
if using_eff:
ControllableObjectViewAPI.set_joint_efforts(
self.articulation_root_path, efforts=np.array(eff_vec), indices=np.array(eff_idxs)
self.articulation_root_path, efforts=th.Tensor(eff_vec), indices=th.Tensor(eff_idxs)
)
def get_control_dict(self):

View File

@ -171,17 +171,17 @@ class DatasetObject(USDObject):
raise ValueError("No orientation probabilities set")
if len(self.orientations) == 0:
# Set default value
chosen_orientation = np.array([0, 0, 0, 1.0])
chosen_orientation = th.Tensor([0, 0, 0, 1.0])
else:
probabilities = [o["prob"] for o in self.orientations.values()]
probabilities = np.array(probabilities) / np.sum(probabilities)
chosen_orientation = np.array(
probabilities = th.Tensor(probabilities) / np.sum(probabilities)
chosen_orientation = th.Tensor(
np.random.choice(list(self.orientations.values()), p=probabilities)["rotation"]
)
# Randomize yaw from -pi to pi
rot_num = np.random.uniform(-1, 1)
rot_matrix = np.array(
rot_matrix = th.Tensor(
[
[math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0],
[math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0],
@ -228,10 +228,10 @@ class DatasetObject(USDObject):
scale = np.ones(3)
valid_idxes = self.native_bbox > 1e-4
scale[valid_idxes] = (
np.array(self._load_config["bounding_box"])[valid_idxes] / self.native_bbox[valid_idxes]
th.Tensor(self._load_config["bounding_box"])[valid_idxes] / self.native_bbox[valid_idxes]
)
else:
scale = np.ones(3) if self._load_config["scale"] is None else np.array(self._load_config["scale"])
scale = np.ones(3) if self._load_config["scale"] is None else th.Tensor(self._load_config["scale"])
# Assert that the scale does not have too small dimensions
assert np.all(scale > 1e-4), f"Scale of {self.name} is too small: {scale}"
@ -362,7 +362,7 @@ class DatasetObject(USDObject):
assert (
"ig:nativeBB" in self.property_names
), f"This dataset object '{self.name}' is expected to have native_bbox specified, but found none!"
return np.array(self.get_attribute(attr="ig:nativeBB"))
return th.Tensor(self.get_attribute(attr="ig:nativeBB"))
@property
def base_link_offset(self):
@ -372,7 +372,7 @@ class DatasetObject(USDObject):
Returns:
3-array: (x,y,z) base link offset if it exists
"""
return np.array(self.get_attribute(attr="ig:offsetBaseLink"))
return th.Tensor(self.get_attribute(attr="ig:offsetBaseLink"))
@property
def metadata(self):
@ -449,7 +449,7 @@ class DatasetObject(USDObject):
# Invert the child link relationship, and multiply the two rotations together to get the final rotation
local_ori = T.quat_multiply(quaternion1=T.quat_inverse(quat1), quaternion0=quat0)
jnt_frame_rot = T.quat2mat(local_ori)
scale_in_child_lf = np.absolute(jnt_frame_rot.T @ np.array(scale_in_parent_lf))
scale_in_child_lf = np.absolute(jnt_frame_rot.T @ th.Tensor(scale_in_parent_lf))
scales[child_name] = scale_in_child_lf
return scales

View File

@ -93,7 +93,7 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
# Create load config from inputs
load_config = dict() if load_config is None else load_config
load_config["scale"] = np.array(scale) if isinstance(scale, Iterable) else scale
load_config["scale"] = th.Tensor(scale) if isinstance(scale, Iterable) else scale
load_config["visible"] = visible
load_config["visual_only"] = visual_only
load_config["kinematic_only"] = kinematic_only
@ -141,7 +141,7 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
# merely set this to be a static collider, i.e.: kinematic-only
# The custom scaling / fixed joints requirement is needed because omniverse complains about scaling that
# occurs with respect to fixed joints, as omni will "snap" bodies together otherwise
scale = np.ones(3) if self._load_config["scale"] is None else np.array(self._load_config["scale"])
scale = np.ones(3) if self._load_config["scale"] is None else th.Tensor(self._load_config["scale"])
if (
self.n_joints == 0
and (np.all(np.isclose(scale, 1.0, atol=1e-3)) or self.n_fixed_joints == 0)

View File

@ -85,7 +85,7 @@ class PrimitiveObject(StatefulObject):
"""
# Compose load config and add rgba values
load_config = dict() if load_config is None else load_config
load_config["color"] = np.array(rgba[:3])
load_config["color"] = th.Tensor(rgba[:3])
load_config["opacity"] = rgba[3]
load_config["radius"] = radius
load_config["height"] = height
@ -211,17 +211,17 @@ class PrimitiveObject(StatefulObject):
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_RADIUS_OBJECTS, name="primitive object with radius")
# Update the extents variable
original_extent = np.array(self._extents)
original_extent = th.Tensor(self._extents)
self._extents = (
np.ones(3) * radius * 2.0
if self._primitive_type == "Sphere"
else np.array([radius * 2.0, radius * 2.0, self._extents[2]])
else th.Tensor([radius * 2.0, radius * 2.0, self._extents[2]])
)
attr_pairs = []
for geom in self._vis_geom, self._col_geom:
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
vals = np.array(attr.Get()).astype(np.float64)
vals = th.Tensor(attr.Get()).astype(np.float64)
attr_pairs.append([attr, vals])
geom.GetExtentAttr().Set(
lazy.pxr.Vt.Vec3fArray(
@ -266,7 +266,7 @@ class PrimitiveObject(StatefulObject):
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_HEIGHT_OBJECTS, name="primitive object with height")
# Update the extents variable
original_extent = np.array(self._extents)
original_extent = th.Tensor(self._extents)
self._extents[2] = height
# Calculate the correct scaling factor and scale the points and normals appropriately
@ -274,7 +274,7 @@ class PrimitiveObject(StatefulObject):
for geom in self._vis_geom, self._col_geom:
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
vals = np.array(attr.Get()).astype(np.float64)
vals = th.Tensor(attr.Get()).astype(np.float64)
# Scale the z axis by the scaling factor
vals[:, 2] = vals[:, 2] * scaling_factor
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
@ -310,7 +310,7 @@ class PrimitiveObject(StatefulObject):
assert_valid_key(key=self._primitive_type, valid_keys=VALID_SIZE_OBJECTS, name="primitive object with size")
# Update the extents variable
original_extent = np.array(self._extents)
original_extent = th.Tensor(self._extents)
self._extents = np.ones(3) * size
# Calculate the correct scaling factor and scale the points and normals appropriately
@ -319,7 +319,7 @@ class PrimitiveObject(StatefulObject):
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
# Scale all three axes by the scaling factor
vals = np.array(attr.Get()).astype(np.float64) * scaling_factor
vals = th.Tensor(attr.Get()).astype(np.float64) * scaling_factor
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
geom.GetExtentAttr().Set(
lazy.pxr.Vt.Vec3fArray(
@ -353,7 +353,7 @@ class PrimitiveObject(StatefulObject):
def _load_state(self, state):
super()._load_state(state=state)
# self._extents = np.array(state["extents"])
# self._extents = th.Tensor(state["extents"])
if self._primitive_type in VALID_RADIUS_OBJECTS:
self.radius = state["radius"]
if self._primitive_type in VALID_HEIGHT_OBJECTS:
@ -376,6 +376,6 @@ class PrimitiveObject(StatefulObject):
return np.concatenate(
[
state_flat,
np.array([state["radius"], state["height"], state["size"]]),
th.Tensor([state["radius"], state["height"], state["size"]]),
]
).astype(float)

View File

@ -401,7 +401,7 @@ class StatefulObject(BaseObject):
colormap.CreateAttribute("rgbaPoints", lazy.pxr.Sdf.ValueTypeNames.Float4Array, False).Set(rgbaPoints)
elif emitter_type == EmitterType.STEAM:
emitter.CreateAttribute("halfSize", lazy.pxr.Sdf.ValueTypeNames.Float3, False).Set(
tuple(bbox_extent_local * np.array(m.STEAM_EMITTER_SIZE_RATIO) / 2.0)
tuple(bbox_extent_local * th.Tensor(m.STEAM_EMITTER_SIZE_RATIO) / 2.0)
)
simulate.CreateAttribute("densityCellSize", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(
bbox_extent_local[2] * m.STEAM_EMITTER_DENSITY_CELL_RATIO
@ -560,7 +560,7 @@ class StatefulObject(BaseObject):
]
)
if len(state["non_kin"]) > 0
else np.array([])
else th.Tensor([])
)
# Combine these two arrays

View File

@ -130,7 +130,7 @@ class ClothPrim(GeomPrim):
self._prim.GetAttribute("primvars:isVolume").Set(False)
# Store the default position of the points in the local frame
self._default_positions = np.array(self.get_attribute(attr="points"))
self._default_positions = th.Tensor(self.get_attribute(attr="points"))
@property
def visual_aabb(self):
@ -172,7 +172,7 @@ class ClothPrim(GeomPrim):
idxs (n-array or None): If set, will only calculate the requested indexed particle state
Returns:
np.array: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
t, r = self.get_position_orientation()
@ -180,7 +180,7 @@ class ClothPrim(GeomPrim):
s = self.scale
# Don't copy to save compute, since we won't be returning a reference to the underlying object anyways
p_local = np.array(self.get_attribute(attr="points"), copy=False)
p_local = th.Tensor(self.get_attribute(attr="points"), copy=False)
p_local = p_local[idxs] if idxs is not None else p_local
p_world = (r @ (p_local * s).T).T + t
@ -207,7 +207,7 @@ class ClothPrim(GeomPrim):
# Fill the idxs if requested
if idxs is not None:
p_local_old = np.array(self.get_attribute(attr="points"))
p_local_old = th.Tensor(self.get_attribute(attr="points"))
p_local_old[idxs] = p_local
p_local = p_local_old
@ -235,10 +235,10 @@ class ClothPrim(GeomPrim):
Grabs particle indexes defining each of the faces for this cloth prim
Returns:
np.array: (N, 3) numpy array, where each of the N faces are defined by the 3 particle indices
th.Tensor: (N, 3) numpy array, where each of the N faces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return np.array(self.get_attribute("faceVertexIndices")).reshape(-1, 3)
return th.Tensor(self.get_attribute("faceVertexIndices")).reshape(-1, 3)
@property
def keyfaces(self):
@ -247,7 +247,7 @@ class ClothPrim(GeomPrim):
Total number of keyfaces is m.N_CLOTH_KEYFACES
Returns:
np.array: (N, 3) numpy array, where each of the N keyfaces are defined by the 3 particle indices
th.Tensor: (N, 3) numpy array, where each of the N keyfaces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return self.faces[self._keyface_idx]
@ -259,7 +259,7 @@ class ClothPrim(GeomPrim):
Total number of keypoints is m.N_CLOTH_KEYPOINTS
Returns:
np.array: (N, 3) numpy array, where each of the N keypoint particles' positions are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N keypoint particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=self._keypoint_idx)
@ -270,7 +270,7 @@ class ClothPrim(GeomPrim):
Grabs the individual particle that was pre-computed to be the closest to the centroid of this cloth prim.
Returns:
np.array: centroid particle's (x,y,z) cartesian coordinates relative to the world frame
th.Tensor: centroid particle's (x,y,z) cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=[self._centroid_idx])[0]
@ -280,11 +280,11 @@ class ClothPrim(GeomPrim):
Grabs individual particle velocities for this cloth prim
Returns:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# the velocities attribute is w.r.t the world frame already
return np.array(self.get_attribute(attr="velocities"))
return th.Tensor(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
@ -292,7 +292,7 @@ class ClothPrim(GeomPrim):
Set the particle velocities of this cloth
Args:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame
"""
assert (
@ -311,7 +311,7 @@ class ClothPrim(GeomPrim):
If None, all faces will be used
Returns:
np.array: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
faces = self.faces if face_ids is None else self.faces[face_ids]
@ -326,7 +326,7 @@ class ClothPrim(GeomPrim):
positions (n-array): (N, 3, 3) array specifying the per-face particle positions
Returns:
np.array: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# Shape [F, 3]
@ -419,7 +419,7 @@ class ClothPrim(GeomPrim):
Returns:
np.ndarray: current average linear velocity of the particles of the cloth prim. Shape (3,).
"""
return np.array(self._prim.GetAttribute("velocities").Get()).mean(axis=0)
return th.Tensor(self._prim.GetAttribute("velocities").Get()).mean(axis=0)
def get_angular_velocity(self):
"""
@ -555,13 +555,13 @@ class ClothPrim(GeomPrim):
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
self.particle_velocities = (
np.array(state["particle_velocities"])
th.Tensor(state["particle_velocities"])
if not isinstance(state["particle_velocities"], np.ndarray)
else state["particle_velocities"]
)
self.set_particle_positions(
positions=(
np.array(state["particle_positions"])
th.Tensor(state["particle_positions"])
if not isinstance(state["particle_positions"], np.ndarray)
else state["particle_positions"]
)

View File

@ -1042,7 +1042,7 @@ class EntityPrim(XFormPrim):
n-array: minimum values for this robot's joints. If joint does not have a range, returns -1000
for that joint
"""
return np.array([joint.lower_limit for joint in self._joints.values()])
return th.Tensor([joint.lower_limit for joint in self._joints.values()])
# TODO: These are cached, but they are not updated when the joint limit is changed
@cached_property
@ -1052,7 +1052,7 @@ class EntityPrim(XFormPrim):
n-array: maximum values for this robot's joints. If joint does not have a range, returns 1000
for that joint
"""
return np.array([joint.upper_limit for joint in self._joints.values()])
return th.Tensor([joint.upper_limit for joint in self._joints.values()])
@property
def joint_range(self):
@ -1068,7 +1068,7 @@ class EntityPrim(XFormPrim):
Returns:
n-array: maximum velocities for this robot's joints
"""
return np.array([joint.max_velocity for joint in self._joints.values()])
return th.Tensor([joint.max_velocity for joint in self._joints.values()])
@property
def max_joint_efforts(self):
@ -1076,7 +1076,7 @@ class EntityPrim(XFormPrim):
Returns:
n-array: maximum efforts for this robot's joints
"""
return np.array([joint.max_effort for joint in self._joints.values()])
return th.Tensor([joint.max_effort for joint in self._joints.values()])
@property
def joint_position_limits(self):
@ -1123,7 +1123,7 @@ class EntityPrim(XFormPrim):
Returns:
n-array: n-DOF length array specifying whether joint has a limit or not
"""
return np.array([j.has_limit for j in self._joints.values()])
return th.Tensor([j.has_limit for j in self._joints.values()])
@property
def disabled_collision_link_names(self):

View File

@ -78,7 +78,7 @@ class GeomPrim(XFormPrim):
return self.material.diffuse_color_constant
else:
color = self.get_attribute("primvars:displayColor")
return None if color is None else np.array(color)[0]
return None if color is None else th.Tensor(color)[0]
@color.setter
def color(self, rgb):
@ -91,7 +91,7 @@ class GeomPrim(XFormPrim):
if self.has_material():
self.material.diffuse_color_constant = rgb
else:
self.set_attribute("primvars:displayColor", np.array(rgb))
self.set_attribute("primvars:displayColor", th.Tensor(rgb))
@property
def opacity(self):
@ -103,7 +103,7 @@ class GeomPrim(XFormPrim):
return self.material.opacity_constant
else:
opacity = self.get_attribute("primvars:displayOpacity")
return None if opacity is None else np.array(opacity)[0]
return None if opacity is None else th.Tensor(opacity)[0]
@opacity.setter
def opacity(self, opacity):
@ -116,7 +116,7 @@ class GeomPrim(XFormPrim):
if self.has_material():
self.material.opacity_constant = opacity
else:
self.set_attribute("primvars:displayOpacity", np.array([opacity]))
self.set_attribute("primvars:displayOpacity", th.Tensor([opacity]))
@property
def points(self):
@ -129,10 +129,10 @@ class GeomPrim(XFormPrim):
mesh_type = mesh.GetPrimTypeInfo().GetTypeName()
if mesh_type == "Mesh":
# If the geom is a mesh we can directly return its points.
return np.array(self.prim.GetAttribute("points").Get())
return th.Tensor(self.prim.GetAttribute("points").Get())
else:
# Return the vertices of the trimesh
return np.array(mesh_prim_shape_to_trimesh_mesh(mesh).vertices)
return th.Tensor(mesh_prim_shape_to_trimesh_mesh(mesh).vertices)
@property
def points_in_parent_frame(self):

View File

@ -350,7 +350,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_velocities(np.array([[vel]]), joint_indices=self.dof_indices)
self._articulation_view.set_max_velocities(th.Tensor([[vel]]), joint_indices=self.dof_indices)
@property
def max_effort(self):
@ -376,7 +376,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_efforts(np.array([[effort]]), joint_indices=self.dof_indices)
self._articulation_view.set_max_efforts(th.Tensor([[effort]]), joint_indices=self.dof_indices)
@property
def stiffness(self):
@ -401,7 +401,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kps=np.array([[stiffness]]), joint_indices=self.dof_indices)
self._articulation_view.set_gains(kps=th.Tensor([[stiffness]]), joint_indices=self.dof_indices)
@property
def damping(self):
@ -426,7 +426,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kds=np.array([[damping]]), joint_indices=self.dof_indices)
self._articulation_view.set_gains(kds=th.Tensor([[damping]]), joint_indices=self.dof_indices)
@property
def friction(self):
@ -452,7 +452,7 @@ class JointPrim(BasePrim):
"""
self.set_attribute("physxJoint:jointFriction", friction)
if og.sim.is_playing():
self._articulation_view.set_friction_coefficients(np.array([[friction]]), joint_indices=self.dof_indices)
self._articulation_view.set_friction_coefficients(th.Tensor([[friction]]), joint_indices=self.dof_indices)
@property
def lower_limit(self):
@ -486,7 +486,7 @@ class JointPrim(BasePrim):
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(
np.array([[lower_limit, self.upper_limit]]), joint_indices=self.dof_indices
th.Tensor([[lower_limit, self.upper_limit]]), joint_indices=self.dof_indices
)
@property
@ -520,7 +520,7 @@ class JointPrim(BasePrim):
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(
np.array([[self.lower_limit, upper_limit]]), joint_indices=self.dof_indices
th.Tensor([[self.lower_limit, upper_limit]]), joint_indices=self.dof_indices
)
@property
@ -759,7 +759,7 @@ class JointPrim(BasePrim):
), "Trying to set joint position target, but control type is not position!"
# Standardize input
pos = np.array([pos]) if self._n_dof == 1 and not isinstance(pos, Iterable) else np.array(pos)
pos = th.Tensor([pos]) if self._n_dof == 1 and not isinstance(pos, Iterable) else th.Tensor(pos)
# Potentially de-normalize if the input is normalized
if normalized:
@ -795,7 +795,7 @@ class JointPrim(BasePrim):
), f"Trying to set joint velocity target for joint {self.name}, but control type is not velocity!"
# Standardize input
vel = np.array([vel]) if self._n_dof == 1 and not isinstance(vel, Iterable) else np.array(vel)
vel = th.Tensor([vel]) if self._n_dof == 1 and not isinstance(vel, Iterable) else th.Tensor(vel)
# Potentially de-normalize if the input is normalized
if normalized:
@ -823,7 +823,7 @@ class JointPrim(BasePrim):
assert self.articulated, "Can only set effort for articulated joints!"
# Standardize input
effort = np.array([effort]) if self._n_dof == 1 and not isinstance(effort, Iterable) else np.array(effort)
effort = th.Tensor([effort]) if self._n_dof == 1 and not isinstance(effort, Iterable) else th.Tensor(effort)
# Potentially de-normalize if the input is normalized
if normalized:
@ -842,8 +842,8 @@ class JointPrim(BasePrim):
self.set_effort(np.zeros(self.n_dof))
def _dump_state(self):
pos, vel, effort = self.get_state() if self.articulated else (np.array([]), np.array([]), np.array([]))
target_pos, target_vel = self.get_target() if self.articulated else (np.array([]), np.array([]))
pos, vel, effort = self.get_state() if self.articulated else (th.Tensor([]), th.Tensor([]), th.Tensor([]))
target_pos, target_vel = self.get_target() if self.articulated else (th.Tensor([]), th.Tensor([]))
return dict(
pos=pos,
vel=vel,

View File

@ -272,7 +272,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's applied (R,G,B) color
"""
return np.array(self.get_input(inp="diffuse_color_constant"))
return th.Tensor(self.get_input(inp="diffuse_color_constant"))
@diffuse_color_constant.setter
def diffuse_color_constant(self, color):
@ -280,7 +280,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's applied (R,G,B) color
"""
self.set_input(inp="diffuse_color_constant", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
self.set_input(inp="diffuse_color_constant", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
@property
def diffuse_texture(self):
@ -352,7 +352,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's applied (R,G,B) diffuse_tint
"""
return np.array(self.get_input(inp="diffuse_tint"))
return th.Tensor(self.get_input(inp="diffuse_tint"))
@diffuse_tint.setter
def diffuse_tint(self, color):
@ -360,7 +360,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's applied (R,G,B) diffuse_tint
"""
self.set_input(inp="diffuse_tint", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
self.set_input(inp="diffuse_tint", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
@property
def reflection_roughness_constant(self):
@ -568,7 +568,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's applied (R,G,B) emissive_color
"""
return np.array(self.get_input(inp="emissive_color"))
return th.Tensor(self.get_input(inp="emissive_color"))
@emissive_color.setter
def emissive_color(self, color):
@ -576,7 +576,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's applied emissive_color
"""
self.set_input(inp="emissive_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
self.set_input(inp="emissive_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
@property
def emissive_color_texture(self):
@ -882,7 +882,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied texture_translate
"""
return np.array(self.get_input(inp="texture_translate"))
return th.Tensor(self.get_input(inp="texture_translate"))
@texture_translate.setter
def texture_translate(self, translate):
@ -890,7 +890,7 @@ class MaterialPrim(BasePrim):
Args:
translate (2-array): this material's applied (x,y) texture_translate
"""
self.set_input(inp="texture_translate", val=lazy.pxr.Gf.Vec2f(*np.array(translate, dtype=float)))
self.set_input(inp="texture_translate", val=lazy.pxr.Gf.Vec2f(*th.Tensor(translate, dtype=float)))
@property
def texture_rotate(self):
@ -914,7 +914,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied texture_scale
"""
return np.array(self.get_input(inp="texture_scale"))
return th.Tensor(self.get_input(inp="texture_scale"))
@texture_scale.setter
def texture_scale(self, scale):
@ -922,7 +922,7 @@ class MaterialPrim(BasePrim):
Args:
scale (2-array): this material's applied (x,y) texture_scale
"""
self.set_input(inp="texture_scale", val=lazy.pxr.Gf.Vec2f(*np.array(scale, dtype=float)))
self.set_input(inp="texture_scale", val=lazy.pxr.Gf.Vec2f(*th.Tensor(scale, dtype=float)))
@property
def detail_texture_translate(self):
@ -930,7 +930,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied detail_texture_translate
"""
return np.array(self.get_input(inp="detail_texture_translate"))
return th.Tensor(self.get_input(inp="detail_texture_translate"))
@detail_texture_translate.setter
def detail_texture_translate(self, translate):
@ -938,7 +938,7 @@ class MaterialPrim(BasePrim):
Args:
translate (2-array): this material's applied detail_texture_translate
"""
self.set_input(inp="detail_texture_translate", val=lazy.pxr.Gf.Vec2f(*np.array(translate, dtype=float)))
self.set_input(inp="detail_texture_translate", val=lazy.pxr.Gf.Vec2f(*th.Tensor(translate, dtype=float)))
@property
def detail_texture_rotate(self):
@ -962,7 +962,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied detail_texture_scale
"""
return np.array(self.get_input(inp="detail_texture_scale"))
return th.Tensor(self.get_input(inp="detail_texture_scale"))
@detail_texture_scale.setter
def detail_texture_scale(self, scale):
@ -970,7 +970,7 @@ class MaterialPrim(BasePrim):
Args:
scale (2-array): this material's applied detail_texture_scale
"""
self.set_input(inp="detail_texture_scale", val=lazy.pxr.Gf.Vec2f(*np.array(scale, dtype=float)))
self.set_input(inp="detail_texture_scale", val=lazy.pxr.Gf.Vec2f(*th.Tensor(scale, dtype=float)))
@property
def exclude_from_white_mode(self):
@ -1042,7 +1042,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's diffuse_reflection_color in (R,G,B)
"""
return np.array(self.get_input(inp="diffuse_reflection_color"))
return th.Tensor(self.get_input(inp="diffuse_reflection_color"))
@diffuse_reflection_color.setter
def diffuse_reflection_color(self, color):
@ -1050,7 +1050,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's diffuse_reflection_color in (R,G,B)
"""
self.set_input(inp="diffuse_reflection_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
self.set_input(inp="diffuse_reflection_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
@property
def specular_reflection_color(self):
@ -1058,7 +1058,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's specular_reflection_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_reflection_color"))
return th.Tensor(self.get_input(inp="specular_reflection_color"))
@specular_reflection_color.setter
def specular_reflection_color(self, color):
@ -1066,7 +1066,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's specular_reflection_color in (R,G,B)
"""
self.set_input(inp="specular_reflection_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
self.set_input(inp="specular_reflection_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
@property
def specular_transmission_color(self):
@ -1074,7 +1074,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's specular_transmission_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_transmission_color"))
return th.Tensor(self.get_input(inp="specular_transmission_color"))
@specular_transmission_color.setter
def specular_transmission_color(self, color):
@ -1082,7 +1082,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's specular_transmission_color in (R,G,B)
"""
self.set_input(inp="specular_transmission_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
self.set_input(inp="specular_transmission_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
@property
def specular_transmission_scattering_color(self):
@ -1090,7 +1090,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's specular_transmission_scattering_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_transmission_scattering_color"))
return th.Tensor(self.get_input(inp="specular_transmission_scattering_color"))
@specular_transmission_scattering_color.setter
def specular_transmission_scattering_color(self, color):
@ -1099,7 +1099,7 @@ class MaterialPrim(BasePrim):
color (3-array): this material's specular_transmission_scattering_color in (R,G,B)
"""
self.set_input(
inp="specular_transmission_scattering_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float))
inp="specular_transmission_scattering_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float))
)
@property
@ -1144,7 +1144,7 @@ class MaterialPrim(BasePrim):
f"Tried to query glass_color shader input, "
f"but material at {self.prim_path} is not an OmniGlass material!"
)
return np.array(self.get_input(inp="glass_color"))
return th.Tensor(self.get_input(inp="glass_color"))
@glass_color.setter
def glass_color(self, color):
@ -1155,4 +1155,4 @@ class MaterialPrim(BasePrim):
assert self.is_glass, (
f"Tried to set glass_color shader input, " f"but material at {self.prim_path} is not an OmniGlass material!"
)
self.set_input(inp="glass_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
self.set_input(inp="glass_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))

View File

@ -226,7 +226,7 @@ class RigidPrim(XFormPrim):
# If we have any collision meshes, we aggregate their center of mass and volume values to set the center of mass
# for this link
if len(coms) > 0:
com = (np.array(coms) * np.array(vols).reshape(-1, 1)).sum(axis=0) / np.sum(vols)
com = (th.Tensor(coms) * th.Tensor(vols).reshape(-1, 1)).sum(axis=0) / np.sum(vols)
self.set_attribute("physics:centerOfMass", lazy.pxr.Gf.Vec3f(*com))
def enable_collisions(self):
@ -811,8 +811,8 @@ class RigidPrim(XFormPrim):
super()._load_state(state=state)
# Set velocities if not kinematic
self.set_linear_velocity(np.array(state["lin_vel"]))
self.set_angular_velocity(np.array(state["ang_vel"]))
self.set_linear_velocity(th.Tensor(state["lin_vel"]))
self.set_angular_velocity(th.Tensor(state["ang_vel"]))
def serialize(self, state):
# Run super first

View File

@ -66,7 +66,7 @@ class XFormPrim(BasePrim):
# Cache the original scale from the USD so that when EntityPrim sets the scale for each link (Rigid/ClothPrim),
# the new scale is with respect to the original scale. XFormPrim's scale always matches the scale in the USD.
self.original_scale = np.array(self.get_attribute("xformOp:scale"))
self.original_scale = th.Tensor(self.get_attribute("xformOp:scale"))
# Grab the attached material if it exists
if self.has_material():
@ -181,8 +181,8 @@ class XFormPrim(BasePrim):
"""
current_position, current_orientation = self.get_position_orientation()
position = current_position if position is None else np.array(position, dtype=float)
orientation = current_orientation if orientation is None else np.array(orientation, dtype=float)
position = current_position if position is None else th.Tensor(position, dtype=float)
orientation = current_orientation if orientation is None else th.Tensor(orientation, dtype=float)
assert np.isclose(
np.linalg.norm(orientation), 1, atol=1e-3
), f"{self.prim_path} desired orientation {orientation} is not a unit quaternion."
@ -287,14 +287,14 @@ class XFormPrim(BasePrim):
"""
properties = self.prim.GetPropertyNames()
if position is not None:
position = lazy.pxr.Gf.Vec3d(*np.array(position, dtype=float))
position = lazy.pxr.Gf.Vec3d(*th.Tensor(position, dtype=float))
if "xformOp:translate" not in properties:
lazy.carb.log_error(
"Translate property needs to be set for {} before setting its position".format(self.name)
)
self.set_attribute("xformOp:translate", position)
if orientation is not None:
orientation = np.array(orientation, dtype=float)[[3, 0, 1, 2]]
orientation = th.Tensor(orientation, dtype=float)[[3, 0, 1, 2]]
if "xformOp:orient" not in properties:
lazy.carb.log_error(
"Orient property needs to be set for {} before setting its orientation".format(self.name)
@ -329,7 +329,7 @@ class XFormPrim(BasePrim):
prim_tf = lazy.pxr.UsdGeom.Xformable(self._prim).ComputeLocalToWorldTransform(lazy.pxr.Usd.TimeCode.Default())
transform = lazy.pxr.Gf.Transform()
transform.SetMatrix(prim_tf)
return np.array(transform.GetScale())
return th.Tensor(transform.GetScale())
@property
def scaled_transform(self):
@ -351,7 +351,7 @@ class XFormPrim(BasePrim):
"""
scale = self.get_attribute("xformOp:scale")
assert scale is not None, "Attribute 'xformOp:scale' is None for prim {}".format(self.name)
return np.array(scale)
return th.Tensor(scale)
@scale.setter
def scale(self, scale):
@ -362,7 +362,7 @@ class XFormPrim(BasePrim):
scale (float or np.ndarray): scale to be applied to the prim's dimensions. shape is (3, ).
Defaults to None, which means left unchanged.
"""
scale = np.array(scale, dtype=float) if isinstance(scale, Iterable) else np.ones(3) * scale
scale = th.Tensor(scale, dtype=float) if isinstance(scale, Iterable) else np.ones(3) * scale
assert np.all(scale > 0), f"Scale {scale} must consist of positive numbers."
scale = lazy.pxr.Gf.Vec3d(*scale)
properties = self.prim.GetPropertyNames()
@ -424,7 +424,7 @@ class XFormPrim(BasePrim):
return dict(pos=pos, ori=ori)
def _load_state(self, state):
pos, orn = np.array(state["pos"]), np.array(state["ori"])
pos, orn = th.Tensor(state["pos"]), th.Tensor(state["ori"])
if self.scene is not None:
pos, orn = T.pose_transform(*self.scene.prim.get_position_orientation(), pos, orn)
self.set_position_orientation(pos, orn)

View File

@ -402,7 +402,7 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
@property
def assisted_grasp_start_points(self):
side_coefficients = {"left": np.array([1, -1, 1]), "right": np.array([1, 1, 1])}
side_coefficients = {"left": th.Tensor([1, -1, 1]), "right": th.Tensor([1, 1, 1])}
return {
arm: [
GraspingPoint(link_name=f"{arm}_{m.PALM_LINK_NAME}", position=m.PALM_BASE_POS),
@ -417,7 +417,7 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
@property
def assisted_grasp_end_points(self):
side_coefficients = {"left": np.array([1, -1, 1]), "right": np.array([1, 1, 1])}
side_coefficients = {"left": th.Tensor([1, -1, 1]), "right": th.Tensor([1, 1, 1])}
return {
arm: [
GraspingPoint(link_name=f"{arm}_{finger}", position=m.FINGER_TIP_POS * side_coefficients[arm])
@ -457,8 +457,8 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
# Update body action space
if teleop_action.is_valid["head"]:
head_pos, head_orn = teleop_action.head[:3], T.euler2quat(teleop_action.head[3:6])
des_body_pos = head_pos - np.array([0, 0, m.BODY_HEIGHT_OFFSET])
des_body_rpy = np.array([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]])
des_body_pos = head_pos - th.Tensor([0, 0, m.BODY_HEIGHT_OFFSET])
des_body_rpy = th.Tensor([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]])
des_body_orn = T.euler2quat(des_body_rpy)
else:
des_body_pos, des_body_orn = self.get_position_orientation()

View File

@ -164,7 +164,7 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
@property
def tucked_default_joint_pos(self):
return np.array(
return th.Tensor(
[
0.0,
0.0, # wheels
@ -188,28 +188,28 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
pos = np.zeros(self.n_joints)
pos[self.base_control_idx] = 0.0
pos[self.trunk_control_idx] = 0.02 + self.default_trunk_offset
pos[self.camera_control_idx] = np.array([0.0, 0.45])
pos[self.gripper_control_idx[self.default_arm]] = np.array([0.05, 0.05]) # open gripper
pos[self.camera_control_idx] = th.Tensor([0.0, 0.45])
pos[self.gripper_control_idx[self.default_arm]] = th.Tensor([0.05, 0.05]) # open gripper
# Choose arm based on setting
if self.default_arm_pose == "vertical":
pos[self.arm_control_idx[self.default_arm]] = np.array(
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
[-0.94121, -0.64134, 1.55186, 1.65672, -0.93218, 1.53416, 2.14474]
)
elif self.default_arm_pose == "diagonal15":
pos[self.arm_control_idx[self.default_arm]] = np.array(
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
[-0.95587, -0.34778, 1.46388, 1.47821, -0.93813, 1.4587, 1.9939]
)
elif self.default_arm_pose == "diagonal30":
pos[self.arm_control_idx[self.default_arm]] = np.array(
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
[-1.06595, -0.22184, 1.53448, 1.46076, -0.84995, 1.36904, 1.90996]
)
elif self.default_arm_pose == "diagonal45":
pos[self.arm_control_idx[self.default_arm]] = np.array(
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
[-1.11479, -0.0685, 1.5696, 1.37304, -0.74273, 1.3983, 1.79618]
)
elif self.default_arm_pose == "horizontal":
pos[self.arm_control_idx[self.default_arm]] = np.array(
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
[-1.43016, 0.20965, 1.86816, 1.77576, -0.27289, 1.31715, 2.01226]
)
else:
@ -370,7 +370,7 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return np.array([0, 1])
return th.Tensor([0, 1])
@property
def trunk_control_idx(self):
@ -378,7 +378,7 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to trunk joint.
"""
return np.array([2])
return th.Tensor([2])
@property
def camera_control_idx(self):
@ -386,15 +386,15 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [tilt, pan] camera joints.
"""
return np.array([3, 5])
return th.Tensor([3, 5])
@property
def arm_control_idx(self):
return {self.default_arm: np.array([4, 6, 7, 8, 9, 10, 11])}
return {self.default_arm: th.Tensor([4, 6, 7, 8, 9, 10, 11])}
@property
def gripper_control_idx(self):
return {self.default_arm: np.array([12, 13])}
return {self.default_arm: th.Tensor([12, 13])}
@property
def disabled_collision_pairs(self):

View File

@ -98,8 +98,8 @@ class FrankaPanda(ManipulationRobot):
self._eef_link_names = "panda_hand"
self._finger_link_names = ["panda_leftfinger", "panda_rightfinger"]
self._finger_joint_names = ["panda_finger_joint1", "panda_finger_joint2"]
self._default_robot_model_joint_pos = np.array([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75, 0.00, 0.00])
self._teleop_rotation_offset = np.array([-1, 0, 0, 0])
self._default_robot_model_joint_pos = th.Tensor([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75, 0.00, 0.00])
self._teleop_rotation_offset = th.Tensor([-1, 0, 0, 0])
self._ag_start_points = [
GraspingPoint(link_name="panda_rightfinger", position=[0.0, 0.001, 0.045]),
]
@ -116,7 +116,7 @@ class FrankaPanda(ManipulationRobot):
self._default_robot_model_joint_pos = np.concatenate(
([0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], np.zeros(16))
)
self._teleop_rotation_offset = np.array([0, 0.7071, 0, 0.7071])
self._teleop_rotation_offset = th.Tensor([0, 0.7071, 0, 0.7071])
self._ag_start_points = [
GraspingPoint(link_name=f"base_link", position=[0.015, 0, -0.03]),
GraspingPoint(link_name=f"base_link", position=[0.015, 0, -0.08]),
@ -141,7 +141,7 @@ class FrankaPanda(ManipulationRobot):
self._default_robot_model_joint_pos = np.concatenate(
([0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], np.zeros(16))
)
self._teleop_rotation_offset = np.array([-0.7071, 0.7071, 0, 0])
self._teleop_rotation_offset = th.Tensor([-0.7071, 0.7071, 0, 0])
self._ag_start_points = [
GraspingPoint(link_name=f"palm_center", position=[0, -0.025, 0.035]),
GraspingPoint(link_name=f"palm_center", position=[0, 0.03, 0.035]),
@ -163,7 +163,7 @@ class FrankaPanda(ManipulationRobot):
self._default_robot_model_joint_pos = np.concatenate(
([0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], np.zeros(12))
)
self._teleop_rotation_offset = np.array([0, 0, 0.707, 0.707])
self._teleop_rotation_offset = th.Tensor([0, 0, 0.707, 0.707])
# TODO: add ag support for inspire hand
self._ag_start_points = [
# GraspingPoint(link_name=f"base_link", position=[0, -0.025, 0.035]),
@ -258,7 +258,7 @@ class FrankaPanda(ManipulationRobot):
@property
def gripper_control_idx(self):
return {
self.default_arm: np.array(
self.default_arm: th.Tensor(
[list(self.joints.keys()).index(name) for name in self.finger_joint_names[self.default_arm]]
)
}

View File

@ -31,7 +31,7 @@ class Freight(TwoWheelRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return np.array([0, 1])
return th.Tensor([0, 1])
@property
def _default_joint_pos(self):

View File

@ -25,7 +25,7 @@ class Husky(LocomotionRobot):
@property
def base_control_idx(self):
return np.array([0, 1, 2, 3])
return th.Tensor([0, 1, 2, 3])
@property
def _default_joint_pos(self):

View File

@ -30,7 +30,7 @@ class Locobot(TwoWheelRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return np.array([1, 0])
return th.Tensor([1, 0])
@property
def _default_joint_pos(self):

View File

@ -124,7 +124,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float):float], (x,y,z) cartesian delta base position
"""
new_pos = np.array(delta) + self.get_position()
new_pos = th.Tensor(delta) + self.get_position()
self.set_position(position=new_pos)
def move_forward(self, delta=0.05):
@ -134,7 +134,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position forward
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([delta, 0, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([delta, 0, 0])))
def move_backward(self, delta=0.05):
"""
@ -143,7 +143,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position backward
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([-delta, 0, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([-delta, 0, 0])))
def move_left(self, delta=0.05):
"""
@ -152,7 +152,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position left
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([0, -delta, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([0, -delta, 0])))
def move_right(self, delta=0.05):
"""
@ -161,7 +161,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position right
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([0, delta, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([0, delta, 0])))
def turn_left(self, delta=0.03):
"""

View File

@ -441,7 +441,7 @@ class ManipulationRobot(BaseRobot):
self.articulation_root_path, self.eef_link_names[arm]
)
)
dic["grasp_{}".format(arm)] = np.array([self.is_grasping(arm)])
dic["grasp_{}".format(arm)] = th.Tensor([self.is_grasping(arm)])
dic["gripper_{}_qpos".format(arm)] = joint_positions[self.gripper_control_idx[arm]]
dic["gripper_{}_qvel".format(arm)] = joint_velocities[self.gripper_control_idx[arm]]
@ -837,7 +837,7 @@ class ManipulationRobot(BaseRobot):
if candidate_obj is None or link_name not in candidate_obj.links:
continue
candidate_link = candidate_obj.links[link_name]
dist = np.linalg.norm(np.array(candidate_link.get_position()) - np.array(gripper_center_pos))
dist = np.linalg.norm(th.Tensor(candidate_link.get_position()) - th.Tensor(gripper_center_pos))
candidate_data.append((prim_path, dist))
if not candidate_data:
@ -1005,8 +1005,8 @@ class ManipulationRobot(BaseRobot):
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_output_limits": (
np.array([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
np.array([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
th.Tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
th.Tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
),
"mode": "pose_delta_ori",
"smoothing_filter_size": 2,
@ -1031,8 +1031,8 @@ class ManipulationRobot(BaseRobot):
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_output_limits": (
np.array([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
np.array([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
th.Tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
th.Tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
),
"mode": "pose_delta_ori",
"workspace_pose_limiter": None,
@ -1188,7 +1188,7 @@ class ManipulationRobot(BaseRobot):
Default is "default" which corresponds to the first entry in self.arm_names
ag_data (None or 2-tuple): if specified, assisted-grasp object, link tuple (i.e. :(BaseObject, RigidPrim)).
Otherwise, does a no-op
contact_pos (None or np.array): if specified, contact position to use for grasp.
contact_pos (None or th.Tensor): if specified, contact position to use for grasp.
"""
arm = self.default_arm if arm == "default" else arm
@ -1205,7 +1205,7 @@ class ManipulationRobot(BaseRobot):
force_data, _ = self._find_gripper_contacts(arm=arm, return_contact_positions=True)
for c_link_prim_path, c_contact_pos in force_data:
if c_link_prim_path == ag_link.prim_path:
contact_pos = np.array(c_contact_pos)
contact_pos = th.Tensor(c_contact_pos)
break
assert contact_pos is not None
@ -1213,7 +1213,7 @@ class ManipulationRobot(BaseRobot):
# Need to find distance between robot and contact point in robot link's local frame and
# ag link and contact point in ag link's local frame
joint_frame_pos = contact_pos
joint_frame_orn = np.array([0, 0, 0, 1.0])
joint_frame_orn = th.Tensor([0, 0, 0, 1.0])
eef_link_pos, eef_link_orn = self.eef_links[arm].get_position_orientation()
parent_frame_pos, parent_frame_orn = T.relative_pose_transform(
joint_frame_pos, joint_frame_orn, eef_link_pos, eef_link_orn
@ -1523,7 +1523,7 @@ class ManipulationRobot(BaseRobot):
Rotational offset that will be applied for teleoperation
such that [0, 0, 0, 1] as action will keep the robot eef pointing at +x axis
"""
return {arm: np.array([0, 0, 0, 1]) for arm in self.arm_names}
return {arm: th.Tensor([0, 0, 0, 1]) for arm in self.arm_names}
def teleop_data_to_action(self, teleop_action) -> np.ndarray:
"""

View File

@ -200,10 +200,10 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
# Keep the current joint positions for the base joints
pos[self.base_idx] = self.get_joint_positions()[self.base_idx]
pos[self.trunk_control_idx] = 0
pos[self.camera_control_idx] = np.array([0.0, 0.0])
pos[self.camera_control_idx] = th.Tensor([0.0, 0.0])
for arm in self.arm_names:
pos[self.gripper_control_idx[arm]] = np.array([0.045, 0.045]) # open gripper
pos[self.arm_control_idx[arm]] = np.array([-1.10, 1.47, 2.71, 1.71, -1.57, 1.39, 0])
pos[self.gripper_control_idx[arm]] = th.Tensor([0.045, 0.045]) # open gripper
pos[self.arm_control_idx[arm]] = th.Tensor([-1.10, 1.47, 2.71, 1.71, -1.57, 1.39, 0])
return pos
@property
@ -212,28 +212,28 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
# Keep the current joint positions for the base joints
pos[self.base_idx] = self.get_joint_positions()[self.base_idx]
pos[self.trunk_control_idx] = 0.02 + self.default_trunk_offset
pos[self.camera_control_idx] = np.array([0.0, -0.45])
pos[self.camera_control_idx] = th.Tensor([0.0, -0.45])
# Choose arm joint pos based on setting
for arm in self.arm_names:
pos[self.gripper_control_idx[arm]] = np.array([0.045, 0.045]) # open gripper
pos[self.gripper_control_idx[arm]] = th.Tensor([0.045, 0.045]) # open gripper
if self.default_arm_pose == "vertical":
pos[self.arm_control_idx[arm]] = np.array(
pos[self.arm_control_idx[arm]] = th.Tensor(
[0.85846, -0.14852, 1.81008, 1.63368, 0.13764, -1.32488, -0.68415]
)
elif self.default_arm_pose == "diagonal15":
pos[self.arm_control_idx[arm]] = np.array(
pos[self.arm_control_idx[arm]] = th.Tensor(
[0.90522, -0.42811, 2.23505, 1.64627, 0.76867, -0.79464, 2.05251]
)
elif self.default_arm_pose == "diagonal30":
pos[self.arm_control_idx[arm]] = np.array(
pos[self.arm_control_idx[arm]] = th.Tensor(
[0.71883, -0.02787, 1.86002, 1.52897, 0.52204, -0.99741, 2.03113]
)
elif self.default_arm_pose == "diagonal45":
pos[self.arm_control_idx[arm]] = np.array(
pos[self.arm_control_idx[arm]] = th.Tensor(
[0.66058, -0.14251, 1.77547, 1.43345, 0.65988, -1.02741, 1.81302]
)
elif self.default_arm_pose == "horizontal":
pos[self.arm_control_idx[arm]] = np.array(
pos[self.arm_control_idx[arm]] = th.Tensor(
[0.61511, 0.49229, 1.46306, 1.24919, 1.08282, -1.28865, 1.50910]
)
else:
@ -316,7 +316,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
lin_vel_global, _ = T.pose_transform([0, 0, 0], cur_orn, u_vec[self.base_idx[:3]], [0, 0, 0, 1])
ang_vel_global, _ = T.pose_transform([0, 0, 0], cur_orn, u_vec[self.base_idx[3:]], [0, 0, 0, 1])
u_vec[self.base_control_idx] = np.array([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]])
u_vec[self.base_control_idx] = th.Tensor([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]])
return u_vec, u_type_vec
def _get_proprioception_dict(self):
@ -350,7 +350,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
fcns = super().get_control_dict()
native_fcn = fcns.get_fcn("eef_right_pos_relative")
fcns["eef_right_pos_relative"] = lambda: (
native_fcn() + np.array([0, 0, -self.get_joint_positions()[self.trunk_control_idx[0]]])
native_fcn() + th.Tensor([0, 0, -self.get_joint_positions()[self.trunk_control_idx[0]]])
)
return fcns
@ -458,7 +458,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
n-array: Indices in low-level control vector corresponding to the three controllable 1DoF base joints
"""
joints = list(self.joints.keys())
return np.array([joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "rz"]])
return th.Tensor([joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "rz"]])
@property
def base_idx(self):
@ -467,7 +467,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
n-array: Indices in low-level control vector corresponding to the six 1DoF base joints
"""
joints = list(self.joints.keys())
return np.array(
return th.Tensor(
[joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "z", "rx", "ry", "rz"]]
)
@ -477,7 +477,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to trunk joint.
"""
return np.array([6])
return th.Tensor([6])
@property
def camera_control_idx(self):
@ -485,19 +485,19 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [tilt, pan] camera joints.
"""
return np.array([9, 12])
return th.Tensor([9, 12])
@property
def arm_control_idx(self):
return {
"left": np.array([7, 10, 13, 15, 17, 19, 21]),
"right": np.array([8, 11, 14, 16, 18, 20, 22]),
"combined": np.array([7, 8, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
"left": th.Tensor([7, 10, 13, 15, 17, 19, 21]),
"right": th.Tensor([8, 11, 14, 16, 18, 20, 22]),
"combined": th.Tensor([7, 8, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
}
@property
def gripper_control_idx(self):
return {"left": np.array([23, 24]), "right": np.array([25, 26])}
return {"left": th.Tensor([23, 24]), "right": th.Tensor([25, 26])}
@property
def finger_lengths(self):
@ -707,7 +707,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
position = current_position
if orientation is None:
orientation = current_orientation
position, orientation = np.array(position), np.array(orientation)
position, orientation = th.Tensor(position), th.Tensor(orientation)
assert np.isclose(
np.linalg.norm(orientation), 1, atol=1e-3
), f"{self.name} desired orientation {orientation} is not a unit quaternion."

View File

@ -27,7 +27,7 @@ class Turtlebot(TwoWheelRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return np.array([0, 1])
return th.Tensor([0, 1])
@property
def _default_joint_pos(self):

View File

@ -78,8 +78,8 @@ class TwoWheelRobot(LocomotionRobot):
ang_vel = (r_vel - l_vel) / self.wheel_axle_length
# Add info
dic["dd_base_lin_vel"] = np.array([lin_vel])
dic["dd_base_ang_vel"] = np.array([ang_vel])
dic["dd_base_lin_vel"] = th.Tensor([lin_vel])
dic["dd_base_ang_vel"] = th.Tensor([ang_vel])
return dic
@ -165,5 +165,5 @@ class TwoWheelRobot(LocomotionRobot):
assert isinstance(
self._controllers["base"], DifferentialDriveController
), "Only DifferentialDriveController is supported!"
action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) * 0.3
action[self.base_action_idx] = th.Tensor([teleop_action.base[0], teleop_action.base[2]]) * 0.3
return action

View File

@ -138,7 +138,7 @@ class VX300S(ManipulationRobot):
@property
def _default_joint_pos(self):
return np.array([0.0, -0.849879, 0.258767, 0.0, 1.2831712, 0.0, 0.057, 0.057])
return th.Tensor([0.0, -0.849879, 0.258767, 0.0, 1.2831712, 0.0, 0.057, 0.057])
@property
def finger_lengths(self):

View File

@ -149,8 +149,8 @@ class SceneGraphBuilder(object):
# Update the position of everything that's already in the scene by using our relative position to last frame.
old_desired_to_new_desired = world_to_desired_frame @ self._last_desired_frame_to_world
nodes = list(self._G.nodes)
poses = np.array([self._G.nodes[obj]["pose"] for obj in nodes])
bbox_poses = np.array([self._G.nodes[obj]["bbox_pose"] for obj in nodes])
poses = th.Tensor([self._G.nodes[obj]["pose"] for obj in nodes])
bbox_poses = th.Tensor([self._G.nodes[obj]["bbox_pose"] for obj in nodes])
updated_poses = old_desired_to_new_desired @ poses
updated_bbox_poses = old_desired_to_new_desired @ bbox_poses
for i, obj in enumerate(nodes):

View File

@ -120,7 +120,7 @@ class StaticTraversableScene(TraversableScene):
height_adjustment = height - self.floor_heights[floor]
else:
height_adjustment = self.floor_heights[floor] - self._scene_prim.get_position()[2] + additional_elevation
self._scene_prim.set_position(np.array([0, 0, height_adjustment]))
self._scene_prim.set_position(th.Tensor([0, 0, height_adjustment]))
def get_floor_height(self, floor=0):
"""

View File

@ -172,7 +172,7 @@ class ScanSensor(BaseSensor):
)
# Convert into 3D unit vectors for each angle
unit_vector_laser = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angles])
unit_vector_laser = th.Tensor([[np.cos(ang), np.sin(ang), 0.0] for ang in angles])
# Scale unit vectors by corresponding laser scan distnaces
assert ((scan >= 0.0) & (scan <= 1.0)).all(), "scan out of valid range [0, 1]"
@ -186,7 +186,7 @@ class ScanSensor(BaseSensor):
base_pos, base_ori = self.occupancy_grid_local_link.get_position_orientation()
scan_local = quat2mat(base_ori).T.dot((scan_world - base_pos).T).T
scan_local = scan_local[:, :2]
scan_local = np.concatenate([np.array([[0, 0]]), scan_local, np.array([[0, 0]])], axis=0)
scan_local = np.concatenate([th.Tensor([[0, 0]]), scan_local, th.Tensor([[0, 0]])], axis=0)
# flip y axis
scan_local[:, 1] *= -1

View File

@ -25,10 +25,10 @@ class BaseSensorNoise(Registerable, metaclass=ABCMeta):
identical call to self.corrupt(...)
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
obs (th.Tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
th.Tensor: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
return self.corrupt(obs=obs)
@ -37,10 +37,10 @@ class BaseSensorNoise(Registerable, metaclass=ABCMeta):
If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading.
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
obs (th.Tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
th.Tensor: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
# Run sanity check to make sure obs is in acceptable range
assert len(obs[(obs < 0.0) | (obs > 1.0)]) == 0, "sensor reading has to be between [0.0, 1.0]"
@ -53,10 +53,10 @@ class BaseSensorNoise(Registerable, metaclass=ABCMeta):
Corrupts observation @obs by adding sensor noise to sensor reading
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
obs (th.Tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array
th.Tensor: Corrupted observation numpy array
"""
raise NotImplementedError()

View File

@ -652,7 +652,7 @@ class VisionSensor(BaseSensor):
Returns:
2-tuple: [min, max] value of the sensor's clipping range, in meters
"""
return np.array(self.get_attribute("clippingRange"))
return th.Tensor(self.get_attribute("clippingRange"))
@clipping_range.setter
def clipping_range(self, limits):
@ -723,7 +723,7 @@ class VisionSensor(BaseSensor):
cx = width / 2
cy = height / 2
intrinsic_matrix = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
intrinsic_matrix = th.Tensor([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
return intrinsic_matrix
@property

View File

@ -364,8 +364,8 @@ def launch_simulator(*args, **kwargs):
if gm.RENDER_VIEWER_CAMERA:
self._set_viewer_camera()
self.viewer_camera.set_position_orientation(
position=np.array(m.DEFAULT_VIEWER_CAMERA_POS),
orientation=np.array(m.DEFAULT_VIEWER_CAMERA_QUAT),
position=th.Tensor(m.DEFAULT_VIEWER_CAMERA_POS),
orientation=th.Tensor(m.DEFAULT_VIEWER_CAMERA_QUAT),
)
self.viewer_width = viewer_width
self.viewer_height = viewer_height
@ -531,7 +531,7 @@ def launch_simulator(*args, **kwargs):
name="ground_plane",
z_position=0,
size=None,
color=None if floor_plane_color is None else np.array(floor_plane_color),
color=None if floor_plane_color is None else th.Tensor(floor_plane_color),
visible=floor_plane_visible,
# TODO: update with new PhysicsMaterial API
# static_friction=static_friction,

View File

@ -157,9 +157,9 @@ class MacroParticleSystem(BaseSystem):
def _dump_state(self):
state = super()._dump_state()
state["scales"] = (
np.array([particle.scale for particle in self.particles.values()])
th.Tensor([particle.scale for particle in self.particles.values()])
if self.particles is not None
else np.array([])
else th.Tensor([])
)
state["particle_counter"] = self._particle_counter
return state
@ -339,7 +339,7 @@ class MacroParticleSystem(BaseSystem):
@property
def color(self):
return np.array(self._color)
return th.Tensor(self._color)
class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
@ -397,7 +397,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
self._particles_local_mat = None
# Maps group name to array of face_ids where particles are located if the group object is a cloth type
# Maps group name to np.array of face IDs (int) that particles are attached to
# Maps group name to th.Tensor of face IDs (int) that particles are attached to
self._cloth_face_ids = None
# Default behavior for this class -- whether to clip generated particles halfway into objects when sampling
@ -447,7 +447,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
orientations = T.axisangle2quat(T.vecs2axisangle(z_up, normals))
if not self._CLIP_INTO_OBJECTS and z_extent > 0:
z_offsets = (
np.array([z_extent * particle.scale[2] for particle in self._group_particles[group].values()])
th.Tensor([z_extent * particle.scale[2] for particle in self._group_particles[group].values()])
/ 2.0
)
# Shift the particles halfway up
@ -658,9 +658,9 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
if success:
self.generate_group_particles(
group=group,
positions=np.array(positions),
orientations=np.array(orientations),
scales=np.array(scales),
positions=th.Tensor(positions),
orientations=th.Tensor(orientations),
scales=th.Tensor(scales),
link_prim_paths=link_prim_paths,
)
# If we're a cloth, store the face_id as well
@ -685,7 +685,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
"""
n_particles = len(particles) if particles else 0
if n_particles == 0:
return (np.array([]).reshape(0, 3), np.array([]).reshape(0, 4))
return (th.Tensor([]).reshape(0, 3), th.Tensor([]).reshape(0, 4))
if local:
poses = np.zeros((n_particles, 4, 4))
@ -775,7 +775,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
pos, ori = self._compute_batch_particles_position_orientation(particles=particles, local=local)
positions = pos if positions is None else positions
orientations = ori if orientations is None else orientations
lens = np.array([len(particles), len(positions), len(orientations)])
lens = th.Tensor([len(particles), len(positions), len(orientations)])
assert lens.min() == lens.max(), "Got mismatched particles, positions, and orientations!"
particle_local_poses_batch = np.zeros((n_particles, 4, 4))
@ -888,7 +888,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
ignore_scale (bool): Whether to ignore the parent_link scale when computing the local transform
Returns:
np.array: (4, 4) homogeneous transform matrix
th.Tensor: (4, 4) homogeneous transform matrix
"""
particle = self.particles[name]
parent_obj = self._particles_info[name]["obj"]
@ -997,7 +997,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
# Also store the cloth face IDs as a vector
if is_cloth:
self._cloth_face_ids[self.get_group_name(obj)] = np.array(
self._cloth_face_ids[self.get_group_name(obj)] = th.Tensor(
[self._particles_info[particle_name]["face_id"] for particle_name in self._group_particles[name]]
)
@ -1053,7 +1053,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
particle_idns = []
particle_attached_references = []
indices_to_remove = np.array([], dtype=int)
indices_to_remove = th.Tensor([], dtype=int)
for info in state["groups"].values():
obj = self._scene.object_registry("uuid", info["particle_attached_obj_uuid"])
# obj will be None if an object with an attachment group is removed between dump_state() and load_state()
@ -1062,7 +1062,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
particle_idns.append(info["particle_idns"])
particle_attached_references.append(info["particle_attached_references"])
else:
indices_to_remove = np.append(indices_to_remove, np.array(info["particle_indices"], dtype=int))
indices_to_remove = np.append(indices_to_remove, th.Tensor(info["particle_indices"], dtype=int))
self._sync_particle_groups(
group_objects=group_objects,
particle_idns=particle_idns,
@ -1231,7 +1231,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
# Compute particle radius
vertices = (
np.array(self.particle_object.get_attribute("points"))
th.Tensor(self.particle_object.get_attribute("points"))
* self.particle_object.scale
* self.max_scale.reshape(1, 3)
)
@ -1291,7 +1291,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
pos, ori = tfs[:, :3], tfs[:, 3:]
pos = pos + T.quat2mat(ori) @ self._particle_offset
else:
pos, ori = np.array([]).reshape(0, 3), np.array([]).reshape(0, 4)
pos, ori = th.Tensor([]).reshape(0, 3), th.Tensor([]).reshape(0, 4)
return pos, ori
def get_particles_local_pose(self):
@ -1332,7 +1332,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
orientation = ori if orientation is None else orientation
position = pos if position is None else (position - T.quat2mat(orientation) @ self._particle_offset)
self.particles_view.set_transforms(
np.concatenate([position, orientation]).reshape(1, -1), indices=np.array([idx])
np.concatenate([position, orientation]).reshape(1, -1), indices=th.Tensor([idx])
)
def set_particle_local_pose(self, idx, position=None, orientation=None):
@ -1351,7 +1351,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
vels = self.particles_view.get_velocities()
lin_vel, ang_vel = vels[:, :3], vels[:, 3:]
else:
lin_vel, ang_vel = np.array([]).reshape(0, 3), np.array([]).reshape(0, 3)
lin_vel, ang_vel = th.Tensor([]).reshape(0, 3), th.Tensor([]).reshape(0, 3)
return lin_vel, ang_vel
def get_particle_velocities(self, idx):
@ -1389,7 +1389,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
l_vel, a_vel = self.get_particles_velocities()
lin_vel = l_vel if lin_vel is None else lin_vel
ang_vel = a_vel if ang_vel is None else ang_vel
self.particles_view.set_velocities(np.concatenate([lin_vel, ang_vel]).reshape(1, -1), indices=np.array([idx]))
self.particles_view.set_velocities(np.concatenate([lin_vel, ang_vel]).reshape(1, -1), indices=th.Tensor([idx]))
@property
def particle_radius(self):
@ -1422,14 +1422,14 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
Generates new particles
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be sampled randomly
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
angular_velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (ax,ay,az)
angular_velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (ax,ay,az)
angular velocities. If not specified, all will be set to 0
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""

View File

@ -126,12 +126,12 @@ class PhysxParticleInstancer(BasePrim):
"""
Adds particles to this particle instancer.
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be scale [1, 1, 1] by default
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
@ -156,7 +156,7 @@ class PhysxParticleInstancer(BasePrim):
Remove particles from this instancer, specified by their indices @idxs in the data array
Args:
idxs (list or np.array of int): IDs corresponding to the indices of specific particles to remove from this
idxs (list or th.Tensor of int): IDs corresponding to the indices of specific particles to remove from this
instancer
"""
if len(idxs) > 0:
@ -206,10 +206,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_positions(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="positions"))
return th.Tensor(self.get_attribute(attr="positions"))
@particle_positions.setter
def particle_positions(self, pos):
@ -217,7 +217,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle positions for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired positions are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' desired positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
self.set_attribute(attr="positions", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(pos.astype(float)))
@ -226,10 +226,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_orientations(self):
"""
Returns:
np.array: (N, 4) numpy array, where each of the N particles' orientations are expressed in (x,y,z,w)
th.Tensor: (N, 4) numpy array, where each of the N particles' orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="orientations"))
return th.Tensor(self.get_attribute(attr="orientations"))
@particle_orientations.setter
def particle_orientations(self, quat):
@ -237,7 +237,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle positions for this instancer
Args:
np.array: (N, 4) numpy array, where each of the N particles' desired orientations are expressed in (x,y,z,w)
th.Tensor: (N, 4) numpy array, where each of the N particles' desired orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
assert (
@ -253,10 +253,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_velocities(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="velocities"))
return th.Tensor(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
@ -264,7 +264,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle velocities for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired velocities are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' desired velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert (
@ -276,10 +276,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_scales(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' scales are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="scales"))
return th.Tensor(self.get_attribute(attr="scales"))
@particle_scales.setter
def particle_scales(self, scales):
@ -287,7 +287,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle scales for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired scales are expressed in (x,y,z)
th.Tensor: (N, 3) numpy array, where each of the N particles' desired scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert (
@ -299,10 +299,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_prototype_ids(self):
"""
Returns:
np.array: (N,) numpy array, where each of the N particles' prototype_id (i.e.: which prototype is being used
th.Tensor: (N,) numpy array, where each of the N particles' prototype_id (i.e.: which prototype is being used
for that particle)
"""
return np.array(self.get_attribute(attr="protoIndices"))
return th.Tensor(self.get_attribute(attr="protoIndices"))
@particle_prototype_ids.setter
def particle_prototype_ids(self, prototype_ids):
@ -310,7 +310,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle prototype_ids for this instancer
Args:
np.array: (N,) numpy array, where each of the N particles' desired prototype_id
th.Tensor: (N,) numpy array, where each of the N particles' desired prototype_id
(i.e.: which prototype is being used for that particle)
"""
assert (
@ -338,9 +338,9 @@ class PhysxParticleInstancer(BasePrim):
idn=self._idn,
particle_group=self.particle_group,
n_particles=self.n_particles,
particle_positions=np.array(local_positions),
particle_positions=th.Tensor(local_positions),
particle_velocities=self.particle_velocities,
particle_orientations=np.array(local_orientations),
particle_orientations=th.Tensor(local_orientations),
particle_scales=self.particle_scales,
particle_prototype_ids=self.particle_prototype_ids,
)
@ -356,16 +356,16 @@ class PhysxParticleInstancer(BasePrim):
f"instancer when loading state! Should be: {self.particle_group}, got: {state['particle_group']}."
)
local_positions = np.array(state["particle_positions"])
local_orientations = np.array(state["particle_orientations"])
local_positions = th.Tensor(state["particle_positions"])
local_orientations = th.Tensor(state["particle_orientations"])
global_positions, global_orientations = zip(
*[
T.pose_transform(*self.scene.prim.get_position_orientation(), local_pos, local_ori)
for local_pos, local_ori in zip(local_positions, local_orientations)
]
)
setattr(self, "particle_positions", np.array(global_positions))
setattr(self, "particle_orientations", np.array(global_orientations))
setattr(self, "particle_positions", th.Tensor(global_positions))
setattr(self, "particle_orientations", th.Tensor(global_orientations))
# Set values appropriately
keys = (
@ -376,7 +376,7 @@ class PhysxParticleInstancer(BasePrim):
for key in keys:
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
val = np.array(state[key]) if not isinstance(state[key], np.ndarray) else state[key]
val = th.Tensor(state[key]) if not isinstance(state[key], np.ndarray) else state[key]
setattr(self, key, val)
def serialize(self, state):
@ -449,7 +449,7 @@ class MicroParticleSystem(BaseSystem):
# Color of the generated material. Default is white [1.0, 1.0, 1.0]
# (NOTE: external queries should call self.color)
self._color = np.array([1.0, 1.0, 1.0])
self._color = th.Tensor([1.0, 1.0, 1.0])
def initialize(self, scene):
# Run super first
@ -491,7 +491,7 @@ class MicroParticleSystem(BaseSystem):
self.system_prim = None
self._material = None
self._color = np.array([1.0, 1.0, 1.0])
self._color = th.Tensor([1.0, 1.0, 1.0])
@property
def particle_radius(self):
@ -858,7 +858,7 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
Removes pre-existing particles from instancer @instancer_idn
Args:
idxs (np.array): (n_particles,) shaped array specifying IDs of particles to delete
idxs (th.Tensor): (n_particles,) shaped array specifying IDs of particles to delete
instancer_idn (None or int): Unique identification number of the particle instancer to delete the particles
from. If None, this system will delete particles from the default particle instancer
"""
@ -889,7 +889,7 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
of a newly generated instancer.
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
@ -898,11 +898,11 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@self.self_collision
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
@ -922,7 +922,7 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
prototype_indices = (
np.ones(n_particles, dtype=int) * prototype_indices
if isinstance(prototype_indices, int)
else np.array(prototype_indices, dtype=int)
else th.Tensor(prototype_indices, dtype=int)
)
else:
prototype_indices = np.random.choice(np.arange(len(self.particle_prototypes)), size=(n_particles,))
@ -979,13 +979,13 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@self.self_collision
positions (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
positions (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
If not specified, will be set to the origin by default
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
@ -1414,7 +1414,7 @@ class FluidSystem(MicroPhysicalParticleSystem):
total_weight = base_color_weight + transmission_weight
if total_weight == 0.0:
# If the fluid doesn't have any color, we add a "blue" tint by default
color = np.array([0.0, 0.0, 1.0])
color = th.Tensor([0.0, 0.0, 1.0])
else:
base_color_weight /= total_weight
transmission_weight /= total_weight
@ -1484,7 +1484,7 @@ class FluidSystem(MicroPhysicalParticleSystem):
def customize_particle_material_factory(attr, value):
def func(mat):
setattr(mat, attr, np.array(value))
setattr(mat, attr, th.Tensor(value))
return func
@ -1596,7 +1596,7 @@ class GranularSystem(MicroPhysicalParticleSystem):
# Store the contact offset based on a minimum sphere
# Threshold the lower-bound to avoid super small particles
vertices = np.array(prototype.get_attribute("points")) * prototype.scale
vertices = th.Tensor(prototype.get_attribute("points")) * prototype.scale
_, particle_contact_offset = trimesh.nsphere.minimum_nsphere(trimesh.Trimesh(vertices=vertices))
if particle_contact_offset < m.MIN_PARTICLE_CONTACT_OFFSET:
prototype.scale *= m.MIN_PARTICLE_CONTACT_OFFSET / particle_contact_offset
@ -1662,7 +1662,7 @@ class Cloth(MicroParticleSystem):
tm = mesh_prim_to_trimesh_mesh(
mesh_prim=mesh_prim, include_normals=True, include_texcoord=True, world_frame=False
)
texcoord = np.array(mesh_prim.GetAttribute("primvars:st").Get()) if has_uv_mapping else None
texcoord = th.Tensor(mesh_prim.GetAttribute("primvars:st").Get()) if has_uv_mapping else None
else:
# We will remesh in pymeshlab, but it doesn't allow programmatic construction of a mesh with texcoords so
# we convert our mesh into a trimesh mesh, then export it to a temp file, then load it into pymeshlab
@ -1730,7 +1730,7 @@ class Cloth(MicroParticleSystem):
new_faces = cm.face_matrix()
new_vertices = cm.vertex_matrix()
new_normals = cm.vertex_normal_matrix()
texcoord = np.array(cm.wedge_tex_coord_matrix()) if has_uv_mapping else None
texcoord = th.Tensor(cm.wedge_tex_coord_matrix()) if has_uv_mapping else None
tm = trimesh.Trimesh(
vertices=new_vertices,
faces=new_faces,
@ -1740,7 +1740,7 @@ class Cloth(MicroParticleSystem):
tm.apply_transform(np.linalg.inv(scaled_world_transform))
# Update the mesh prim
face_vertex_counts = np.array([len(face) for face in tm.faces], dtype=int)
face_vertex_counts = th.Tensor([len(face) for face in tm.faces], dtype=int)
mesh_prim.GetAttribute("faceVertexCounts").Set(face_vertex_counts)
mesh_prim.GetAttribute("points").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertices))
mesh_prim.GetAttribute("faceVertexIndices").Set(tm.faces.flatten())
@ -1798,7 +1798,7 @@ class Cloth(MicroParticleSystem):
def serialize(self, state):
# Nothing by default
return np.array([], dtype=float)
return th.Tensor([], dtype=float)
def deserialize(self, state):
# Nothing by default

View File

@ -160,7 +160,7 @@ class BaseSystem(Serializable):
Removes pre-existing particles
Args:
idxs (np.array): (n_particles,) shaped array specifying IDs of particles to delete
idxs (th.Tensor): (n_particles,) shaped array specifying IDs of particles to delete
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
raise NotImplementedError()
@ -178,10 +178,10 @@ class BaseSystem(Serializable):
Args:
scene (Scene): Scene object to generate particles in
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
@ -595,8 +595,8 @@ class VisualParticleSystem(BaseSystem):
# Convert these into scaling factors for the x and y axes for our particle object
particle_bbox = self.particle_object.aabb_extent
minimum = np.array([bbox_lower_limit / particle_bbox[0], bbox_lower_limit / particle_bbox[1], 1.0])
maximum = np.array([bbox_upper_limit / particle_bbox[0], bbox_upper_limit / particle_bbox[1], 1.0])
minimum = th.Tensor([bbox_lower_limit / particle_bbox[0], bbox_lower_limit / particle_bbox[1], 1.0])
maximum = th.Tensor([bbox_upper_limit / particle_bbox[0], bbox_upper_limit / particle_bbox[1], 1.0])
return minimum, maximum
@ -659,10 +659,10 @@ class VisualParticleSystem(BaseSystem):
Args:
group (str): Object on which to sample particle locations
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scaling in its
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scaling in its
local frame. If not specified, all we randomly sampled based on @self.min_scale and @self.max_scale
link_prim_paths (None or list of str): Determines which link each generated particle will
be attached to. If not specified, all will be attached to the group object's prim, NOT a link
@ -810,7 +810,7 @@ class PhysicalParticleSystem(BaseSystem):
object physically interacts with its non-uniform geometry.
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
Returns:
n-array: (n_particles,) boolean array, True if in contact, otherwise False
@ -938,13 +938,13 @@ class PhysicalParticleSystem(BaseSystem):
# assume the particles are extremely small - sample cuboids of size 0 for better performance
cuboid_dimensions=np.zeros(3),
# raycast start inside the aabb in x-y plane and outside the aabb in the z-axis
aabb_offset=np.array([-radius, -radius, radius]),
aabb_offset=th.Tensor([-radius, -radius, radius]),
# bottom padding should be the same as the particle radius
cuboid_bottom_padding=radius,
# undo_cuboid_bottom_padding should be False - the sampled positions are above the surface by its radius
undo_cuboid_bottom_padding=False,
)
particle_positions = np.array([result[0] for result in results if result[0] is not None])
particle_positions = th.Tensor([result[0] for result in results if result[0] is not None])
# Also potentially sub-sample if we're past our limit
if max_samples is not None and len(particle_positions) > max_samples:
particle_positions = particle_positions[
@ -1091,7 +1091,7 @@ def create_system_from_metadata(system_name):
def generate_customize_particle_material_fcn(mat_kwargs):
def customize_mat(mat):
for attr, val in mat_kwargs.items():
setattr(mat, attr, np.array(val) if isinstance(val, list) else val)
setattr(mat, attr, th.Tensor(val) if isinstance(val, list) else val)
return customize_mat

View File

@ -107,7 +107,7 @@ class BehaviorTask(BaseTask):
self.future_obj_instances = None # set of str
# Info for demonstration collection
self.instruction_order = None # np.array of int
self.instruction_order = None # th.Tensor of int
self.currently_viewed_index = None # int
self.currently_viewed_instruction = None # tuple of str
self.activity_natural_language_goal_conditions = None # str
@ -382,9 +382,9 @@ class BehaviorTask(BaseTask):
# Batch rpy calculations for much better efficiency
objs_exist = {obj: obj.exists for obj in self.object_scope.values() if not obj.is_system}
objs_rpy = T.quat2euler(
np.array(
th.Tensor(
[
obj.states[Pose].get_value()[1] if obj_exist else np.array([0, 0, 0, 1.0])
obj.states[Pose].get_value()[1] if obj_exist else th.Tensor([0, 0, 0, 1.0])
for obj, obj_exist in objs_exist.items()
]
)
@ -402,14 +402,14 @@ class BehaviorTask(BaseTask):
# TODO: May need to update checking here to USDObject? Or even baseobject?
# TODO: How to handle systems as part of obs?
if obj_exist:
low_dim_obs[f"{obj.bddl_inst}_real"] = np.array([1.0])
low_dim_obs[f"{obj.bddl_inst}_real"] = th.Tensor([1.0])
low_dim_obs[f"{obj.bddl_inst}_pos"] = obj.states[Pose].get_value()[0]
low_dim_obs[f"{obj.bddl_inst}_ori_cos"] = obj_rpy_cos
low_dim_obs[f"{obj.bddl_inst}_ori_sin"] = obj_rpy_sin
if obj.name != agent.name:
for arm in agent.arm_names:
grasping_object = agent.is_grasping(arm=arm, candidate_obj=obj.wrapped_obj)
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = np.array([float(grasping_object)])
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = th.Tensor([float(grasping_object)])
else:
low_dim_obs[f"{obj.bddl_inst}_real"] = np.zeros(1)
low_dim_obs[f"{obj.bddl_inst}_pos"] = np.zeros(3)

View File

@ -89,8 +89,8 @@ class GraspTask(BaseTask):
joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
robot_pose = random.choice(self._reset_poses)
robot.set_joint_positions(robot_pose["joint_pos"], joint_control_idx)
robot_pos = np.array(robot_pose["base_pos"])
robot_orn = np.array(robot_pose["base_ori"])
robot_pos = th.Tensor(robot_pose["base_pos"])
robot_orn = th.Tensor(robot_pose["base_ori"])
# Move it to the appropriate scene. TODO: The scene should provide a function for this.
robot_pos, robot_orn = T.pose_transform(*robot.scene.prim.get_position_orientation(), robot_pos, robot_orn)
robot.set_position_orientation(robot_pos, robot_orn)
@ -106,11 +106,11 @@ class GraspTask(BaseTask):
# For Tiago
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
# For Fetch
else:
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
with PlanningContext(
@ -149,7 +149,7 @@ class GraspTask(BaseTask):
# Check if the robot has toppled
rotation = R.from_quat(robot.get_orientation())
robot_up = rotation.apply(np.array([0, 0, 1]))
robot_up = rotation.apply(th.Tensor([0, 0, 1]))
if robot_up[2] < 0.75:
raise ValueError("Robot has toppled over")
@ -202,7 +202,7 @@ class GraspTask(BaseTask):
def _get_random_joint_position(self, robot):
joint_positions = []
joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
joints = np.array([joint for joint in robot.joints.values()])
joints = th.Tensor([joint for joint in robot.joints.values()])
arm_joints = joints[joint_control_idx]
for i, joint in enumerate(arm_joints):
val = random.uniform(joint.lower_limit, joint.upper_limit)

View File

@ -85,9 +85,9 @@ class PointNavigationTask(BaseTask):
# Store inputs
self._robot_idn = robot_idn
self._floor = floor
self._initial_pos = initial_pos if initial_pos is None else np.array(initial_pos)
self._initial_quat = initial_quat if initial_quat is None else np.array(initial_quat)
self._goal_pos = goal_pos if goal_pos is None else np.array(goal_pos)
self._initial_pos = initial_pos if initial_pos is None else th.Tensor(initial_pos)
self._initial_quat = initial_quat if initial_quat is None else th.Tensor(initial_quat)
self._goal_pos = goal_pos if goal_pos is None else th.Tensor(goal_pos)
self._goal_tolerance = goal_tolerance
self._goal_in_polar = goal_in_polar
self._path_range = path_range
@ -172,7 +172,7 @@ class PointNavigationTask(BaseTask):
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=np.array([1, 0, 0, 0.3]),
rgba=th.Tensor([1, 0, 0, 0.3]),
)
self._goal_pos_marker = PrimitiveObject(
relative_prim_path="/task_goal_pos_marker",
@ -181,7 +181,7 @@ class PointNavigationTask(BaseTask):
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=np.array([0, 0, 1, 0.3]),
rgba=th.Tensor([0, 0, 1, 0.3]),
)
# Load the objects into the simulator
@ -199,7 +199,7 @@ class PointNavigationTask(BaseTask):
radius=self._waypoint_width,
height=self._waypoint_height,
visual_only=True,
rgba=np.array([0, 1, 0, 0.3]),
rgba=th.Tensor([0, 1, 0, 0.3]),
)
env.scene.add_object(waypoint)
waypoints.append(waypoint)
@ -231,7 +231,7 @@ class PointNavigationTask(BaseTask):
# Possibly sample initial ori
initial_quat = (
T.euler2quat(np.array([0, 0, np.random.uniform(0, np.pi * 2)]))
T.euler2quat(th.Tensor([0, 0, np.random.uniform(0, np.pi * 2)]))
if self._randomize_initial_quat
else self._initial_quat
)
@ -379,14 +379,14 @@ class PointNavigationTask(BaseTask):
Returns:
3-array: (x,y,z) position in self._robot_idn agent's local frame
"""
delta_pos_global = np.array(pos) - env.robots[self._robot_idn].states[Pose].get_value()[0]
delta_pos_global = th.Tensor(pos) - env.robots[self._robot_idn].states[Pose].get_value()[0]
return T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T @ delta_pos_global
def _get_obs(self, env):
# Get relative position of goal with respect to the current agent position
xy_pos_to_goal = self._global_pos_to_robot_frame(env, self._goal_pos)[:2]
if self._goal_in_polar:
xy_pos_to_goal = np.array(T.cartesian_to_polar(*xy_pos_to_goal))
xy_pos_to_goal = th.Tensor(T.cartesian_to_polar(*xy_pos_to_goal))
# linear velocity and angular velocity
ori_t = T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T
@ -457,10 +457,10 @@ class PointNavigationTask(BaseTask):
num_nodes = min(self._n_vis_waypoints, shortest_path.shape[0])
for i in range(num_nodes):
self._waypoint_markers[i].set_position(
position=np.array([shortest_path[i][0], shortest_path[i][1], floor_height])
position=th.Tensor([shortest_path[i][0], shortest_path[i][1], floor_height])
)
for i in range(num_nodes, self._n_vis_waypoints):
self._waypoint_markers[i].set_position(position=np.array([0.0, 0.0, 100.0]))
self._waypoint_markers[i].set_position(position=th.Tensor([0.0, 0.0, 100.0]))
def step(self, env, action):
# Run super method first

View File

@ -288,7 +288,7 @@ class BaseTask(GymObservable, Registerable, metaclass=ABCMeta):
n-array: 1D-numpy array of flattened low-dim observations
"""
# By default, we simply concatenate all values in our obs dict
return np.concatenate([ob for ob in obs.values()]) if len(obs.values()) > 0 else np.array([])
return np.concatenate([ob for ob in obs.values()]) if len(obs.values()) > 0 else th.Tensor([])
def get_obs(self, env, flatten_low_dim=True):
# Args: env (Environment): environment instance

View File

@ -38,7 +38,7 @@ class Falling(FailureCondition):
# Terminate if the robot has toppled over
if self._topple:
rotation = R.from_quat(env.scene.robots[self._robot_idn].get_orientation())
robot_up = rotation.apply(np.array([0, 0, 1]))
robot_up = rotation.apply(th.Tensor([0, 0, 1]))
if robot_up[2] < self._tilt_tolerance:
return True

View File

@ -792,7 +792,7 @@ class WasherDryerRule(BaseTransitionRule):
"""
# Compute all obj
objects = [obj for scene in og.sim.scenes for obj in scene.objects]
obj_positions = np.array([obj.aabb_center for obj in objects])
obj_positions = th.Tensor([obj.aabb_center for obj in objects])
return dict(obj_positions=obj_positions)
@classmethod
@ -816,7 +816,7 @@ class WasherDryerRule(BaseTransitionRule):
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions)
objects = [obj for scene in og.sim.scenes for obj in scene.objects]
in_volume_objs = list(np.array(objects)[in_volume])
in_volume_objs = list(th.Tensor(objects)[in_volume])
# Remove the container itself
if container in in_volume_objs:
in_volume_objs.remove(container)
@ -967,8 +967,8 @@ class SlicingRule(BaseTransitionRule):
# List of dicts gets replaced by {'0':dict, '1':dict, ...}
# Get bounding box info
part_bb_pos = np.array(part["bb_pos"])
part_bb_orn = np.array(part["bb_orn"])
part_bb_pos = th.Tensor(part["bb_pos"])
part_bb_orn = th.Tensor(part["bb_orn"])
# Determine the relative scale to apply to the object part from the original object
# Note that proper (rotated) scaling can only be applied when the relative orientation of
@ -1304,7 +1304,7 @@ class RecipeRule(BaseTransitionRule):
category_to_valid_indices[obj_category].append(idx)
# Convert to numpy array for faster indexing
category_to_valid_indices[obj_category] = np.array(category_to_valid_indices[obj_category], dtype=int)
category_to_valid_indices[obj_category] = th.Tensor(category_to_valid_indices[obj_category], dtype=int)
return category_to_valid_indices
@classmethod
@ -1615,7 +1615,7 @@ class RecipeRule(BaseTransitionRule):
dict: Keyword-mapped global rule information
"""
# Compute all relevant object AABB positions
obj_positions = np.array([obj.aabb_center for obj in cls._OBJECTS])
obj_positions = th.Tensor([obj.aabb_center for obj in cls._OBJECTS])
return dict(obj_positions=obj_positions)
@classmethod
@ -1639,7 +1639,7 @@ class RecipeRule(BaseTransitionRule):
# Compute in volume for all relevant object positions
# We check for either the object AABB being contained OR the object being on top of the container, in the
# case that the container is too flat for the volume to contain the object
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions) | np.array(
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions) | th.Tensor(
[obj.states[OnTop].get_value(container) for obj in cls._OBJECTS]
)
@ -1681,7 +1681,7 @@ class RecipeRule(BaseTransitionRule):
i += 1
# Wrap relevant objects as numpy array so we can index into it efficiently
cls._OBJECTS = np.array(cls._OBJECTS)
cls._OBJECTS = th.Tensor(cls._OBJECTS)
@classproperty
def candidate_filters(cls):
@ -1796,7 +1796,7 @@ class RecipeRule(BaseTransitionRule):
for system_name, particle_idxs in execution_info["relevant_systems"].items():
system = get_system(system_name)
volume += len(particle_idxs) * np.pi * (system.particle_radius**3) * 4 / 3
system.remove_particles(idxs=np.array(list(particle_idxs)))
system.remove_particles(idxs=th.Tensor(list(particle_idxs)))
if not cls.is_multi_instance:
# Remove either all objects or only the ones specified in the input objects of the recipe
@ -1824,7 +1824,7 @@ class RecipeRule(BaseTransitionRule):
log.warning(
f"Failed to spawn object {obj.name} in container {container.name}! Directly placing on top instead."
)
pos = np.array(container.aabb_center) + np.array(
pos = th.Tensor(container.aabb_center) + th.Tensor(
[0, 0, container.aabb_extent[2] / 2.0 + obj.aabb_extent[2] / 2.0]
)
obj.set_bbox_center_position_orientation(position=pos)

View File

@ -1284,7 +1284,7 @@ class BDDLSampler:
self._env.scene.add_object(simulator_obj)
# Set these objects to be far-away locations
simulator_obj.set_position(np.array([100.0, 100.0, -100.0]) + np.ones(3) * num_new_obj * 5.0)
simulator_obj.set_position(th.Tensor([100.0, 100.0, -100.0]) + np.ones(3) * num_new_obj * 5.0)
self._sampled_objects.add(simulator_obj)
self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst, entity=simulator_obj)

View File

@ -108,12 +108,12 @@ class IKSolver:
None or n-array: Joint positions for reaching desired target_pos and target_quat, otherwise None if no
solution was found
"""
pos = np.array(target_pos, dtype=np.float64).reshape(3, 1)
rot = np.array(T.quat2mat(np.array([0, 0, 0, 1.0]) if target_quat is None else target_quat), dtype=np.float64)
pos = th.Tensor(target_pos, dtype=np.float64).reshape(3, 1)
rot = th.Tensor(T.quat2mat(th.Tensor([0, 0, 0, 1.0]) if target_quat is None else target_quat), dtype=np.float64)
ik_target_pose = lazy.lula.Pose3(lazy.lula.Rotation3(rot), pos)
# Set the cspace seed and tolerance
initial_joint_pos = self.reset_joint_pos if initial_joint_pos is None else np.array(initial_joint_pos)
initial_joint_pos = self.reset_joint_pos if initial_joint_pos is None else th.Tensor(initial_joint_pos)
self.config.cspace_seeds = [initial_joint_pos]
self.config.position_tolerance = tolerance_pos
self.config.orientation_tolerance = 100.0 if target_quat is None else tolerance_quat
@ -125,7 +125,7 @@ class IKSolver:
# Compute target joint positions
ik_results = lazy.lula.compute_ik_ccd(self.kinematics, ik_target_pose, self.eef_name, self.config)
if ik_results.success:
return np.array(ik_results.cspace_position)
return th.Tensor(ik_results.cspace_position)
else:
return None

View File

@ -432,13 +432,13 @@ class ArticulationView(_ArticulationView):
>>> # set all the articulation joints.
>>> # Since there are 5 envs, the joint positions are repeated 5 times
>>> positions = np.tile(np.array([0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8, 0.04, 0.04]), (num_envs, 1))
>>> positions = np.tile(th.Tensor([0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8, 0.04, 0.04]), (num_envs, 1))
>>> prims.set_joint_positions(positions)
>>>
>>> # set only the fingers in closed position: panda_finger_joint1 (7) and panda_finger_joint2 (8) to 0.0
>>> # for the first, middle and last of the 5 envs
>>> positions = np.tile(np.array([0.0, 0.0]), (3, 1))
>>> prims.set_joint_positions(positions, indices=np.array([0, 2, 4]), joint_indices=np.array([7, 8]))
>>> positions = np.tile(th.Tensor([0.0, 0.0]), (3, 1))
>>> prims.set_joint_positions(positions, indices=th.Tensor([0, 2, 4]), joint_indices=th.Tensor([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -498,13 +498,13 @@ class ArticulationView(_ArticulationView):
>>> # set the velocities for all the articulation joints to the indicated values.
>>> # Since there are 5 envs, the joint velocities are repeated 5 times
>>> velocities = np.tile(np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]), (num_envs, 1))
>>> velocities = np.tile(th.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]), (num_envs, 1))
>>> prims.set_joint_velocities(velocities)
>>>
>>> # set the fingers velocities: panda_finger_joint1 (7) and panda_finger_joint2 (8) to -0.1
>>> # for the first, middle and last of the 5 envs
>>> velocities = np.tile(np.array([-0.1, -0.1]), (3, 1))
>>> prims.set_joint_velocities(velocities, indices=np.array([0, 2, 4]), joint_indices=np.array([7, 8]))
>>> velocities = np.tile(th.Tensor([-0.1, -0.1]), (3, 1))
>>> prims.set_joint_velocities(velocities, indices=th.Tensor([0, 2, 4]), joint_indices=th.Tensor([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -565,13 +565,13 @@ class ArticulationView(_ArticulationView):
>>> # set the efforts for all the articulation joints to the indicated values.
>>> # Since there are 5 envs, the joint efforts are repeated 5 times
>>> efforts = np.tile(np.array([10, 20, 30, 40, 50, 60, 70, 80, 90]), (num_envs, 1))
>>> efforts = np.tile(th.Tensor([10, 20, 30, 40, 50, 60, 70, 80, 90]), (num_envs, 1))
>>> prims.set_joint_efforts(efforts)
>>>
>>> # set the fingers efforts: panda_finger_joint1 (7) and panda_finger_joint2 (8) to 10
>>> # for the first, middle and last of the 5 envs
>>> efforts = np.tile(np.array([10, 10]), (3, 1))
>>> prims.set_joint_efforts(efforts, indices=np.array([0, 2, 4]), joint_indices=np.array([7, 8]))
>>> efforts = np.tile(th.Tensor([10, 10]), (3, 1))
>>> prims.set_joint_efforts(efforts, indices=th.Tensor([0, 2, 4]), joint_indices=th.Tensor([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -681,7 +681,7 @@ def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3):
for bbox_2d in bboxes_2d_data:
semantic_id_list.append(bbox_2d["semanticId"])
bbox_2d_list.append(bbox_2d)
semantic_id_list_np = np.unique(np.array(semantic_id_list))
semantic_id_list_np = np.unique(th.Tensor(semantic_id_list))
color_list = random_colours(len(semantic_id_list_np.tolist()), True, num_channels)
for bbox_2d in bbox_2d_list:
index = np.where(semantic_id_list_np == bbox_2d["semanticId"])[0][0]
@ -697,5 +697,5 @@ def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3):
rgb_img_draw.rectangle(
[(bbox_2d["x_min"], bbox_2d["y_min"]), (bbox_2d["x_max"], bbox_2d["y_max"])], outline=outline, width=2
)
bboxes_2d_rgb = np.array(rgb_img)
bboxes_2d_rgb = th.Tensor(rgb_img)
return bboxes_2d_rgb

View File

@ -256,11 +256,11 @@ def _generate_convex_hull_volume_checker_functions(convex_hull_mesh):
in_volume = lambda mesh, particle_positions: check_points_in_convex_hull_mesh(
mesh_face_centroids=face_centroids,
mesh_face_normals=face_normals,
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array(
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
[*(mesh.GetAttribute("xformOp:orient").Get().imaginary), mesh.GetAttribute("xformOp:orient").Get().real]
),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
calc_volume = lambda mesh: trimesh_mesh.volume if trimesh_mesh.is_volume else trimesh_mesh.convex_hull.volume
@ -319,53 +319,53 @@ def generate_points_in_volume_checker_function(obj, volume_link, use_visual_mesh
elif mesh_type == "Sphere":
fcn = lambda mesh, particle_positions: check_points_in_sphere(
size=mesh.GetAttribute("radius").Get(),
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array(
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cylinder":
fcn = lambda mesh, particle_positions: check_points_in_cylinder(
size=[mesh.GetAttribute("radius").Get(), mesh.GetAttribute("height").Get()],
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array(
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cone":
fcn = lambda mesh, particle_positions: check_points_in_cone(
size=[mesh.GetAttribute("radius").Get(), mesh.GetAttribute("height").Get()],
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array(
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cube":
fcn = lambda mesh, particle_positions: check_points_in_cube(
size=mesh.GetAttribute("size").Get(),
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array(
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
else:

View File

@ -33,7 +33,7 @@ def get_grasp_poses_for_object_sticky(target_obj):
visual=False
)
grasp_center_pos = bbox_center_in_world + np.array([0, 0, np.max(bbox_extent_in_base_frame) + 0.05])
grasp_center_pos = bbox_center_in_world + th.Tensor([0, 0, np.max(bbox_extent_in_base_frame) + 0.05])
towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
towards_object_in_world_frame /= np.linalg.norm(towards_object_in_world_frame)
@ -74,7 +74,7 @@ def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
grasp_center_pos = T.mat2pose(
T.pose2mat((bbox_center_in_world, bbox_quat_in_world)) # base frame to world frame
@ T.pose2mat((grasp_center_in_base_frame, [0, 0, 0, 1])) # grasp pose in base frame
)[0] + np.array([0, 0, 0.02])
)[0] + th.Tensor([0, 0, 0.02])
towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
towards_object_in_world_frame /= np.linalg.norm(towards_object_in_world_frame)
@ -87,7 +87,7 @@ def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
grasp_y /= np.linalg.norm(grasp_y)
grasp_z = np.cross(grasp_x, grasp_y)
grasp_z /= np.linalg.norm(grasp_z)
grasp_mat = np.array([grasp_x, grasp_y, grasp_z]).T
grasp_mat = th.Tensor([grasp_x, grasp_y, grasp_z]).T
grasp_quat = R.from_matrix(grasp_mat).as_quat()
grasp_pose = (grasp_center_pos, grasp_quat)
@ -193,7 +193,7 @@ def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint
# Pick the closer of the two faces along the push axis as our favorite.
points_along_push_axis = (
np.array([canonical_push_axis, -canonical_push_axis]) * bbox_extent_in_link_frame[push_axis_idx] / 2
th.Tensor([canonical_push_axis, -canonical_push_axis]) * bbox_extent_in_link_frame[push_axis_idx] / 2
)
(
push_axis_closer_side_idx,
@ -300,7 +300,7 @@ def interpolate_waypoints(start_pose, end_pose, num_waypoints="default"):
pos_waypoints = np.linspace(start_pos, end_pose[0], num_waypoints)
# Also interpolate the rotations
combined_rotation = R.from_quat(np.array([start_orn, end_pose[1]]))
combined_rotation = R.from_quat(th.Tensor([start_orn, end_pose[1]]))
slerp = Slerp([0, 1], combined_rotation)
orn_waypoints = slerp(np.linspace(0, 1, num_waypoints))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
@ -334,7 +334,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
)
bbox_quat_in_world = link.get_orientation()
bbox_extent_in_link_frame = np.array(
bbox_extent_in_link_frame = th.Tensor(
target_obj.native_link_bboxes[link_name]["collision"]["axis_aligned"]["extent"]
)
bbox_wrt_origin = T.relative_pose_transform(
@ -347,7 +347,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
)[[1, 2, 3, 0]]
joint_axis = R.from_quat(joint_orientation).apply([1, 0, 0])
joint_axis /= np.linalg.norm(joint_axis)
origin_towards_bbox = np.array(bbox_wrt_origin[0])
origin_towards_bbox = th.Tensor(bbox_wrt_origin[0])
open_direction = np.cross(joint_axis, origin_towards_bbox)
open_direction /= np.linalg.norm(open_direction)
lateral_axis = np.cross(open_direction, joint_axis)
@ -362,7 +362,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
canonical_open_direction = np.eye(3)[open_axis_idx]
points_along_open_axis = (
np.array([canonical_open_direction, -canonical_open_direction]) * bbox_extent_in_link_frame[open_axis_idx] / 2
th.Tensor([canonical_open_direction, -canonical_open_direction]) * bbox_extent_in_link_frame[open_axis_idx] / 2
)
current_yaw = relevant_joint.get_state()[0][0]
closed_yaw = relevant_joint.lower_limit
@ -380,7 +380,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
canonical_joint_axis = np.eye(3)[joint_axis_idx]
lateral_away_from_origin = np.eye(3)[lateral_axis_idx] * np.sign(origin_towards_bbox[lateral_axis_idx])
min_lateral_pos_wrt_surface_center = (
lateral_away_from_origin * -np.array(origin_wrt_bbox[0])
lateral_away_from_origin * -th.Tensor(origin_wrt_bbox[0])
- canonical_joint_axis * bbox_extent_in_link_frame[lateral_axis_idx] / 2
)
max_lateral_pos_wrt_surface_center = (
@ -442,7 +442,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
)
# Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
movement_in_world_frame = np.array(targets[-1][0]) - np.array(offset_grasp_pose_in_world_frame[0])
movement_in_world_frame = th.Tensor(targets[-1][0]) - th.Tensor(offset_grasp_pose_in_world_frame[0])
grasp_required = np.dot(movement_in_world_frame, approach_direction_in_world_frame) < 0
return (
@ -472,7 +472,7 @@ def _get_orientation_facing_vector_with_random_yaw(vector):
side /= np.linalg.norm(3)
up = np.cross(forward, side)
# assert np.isclose(np.linalg.norm(up), 1, atol=1e-3)
rotmat = np.array([forward, side, up]).T
rotmat = th.Tensor([forward, side, up]).T
return R.from_matrix(rotmat).as_quat()
@ -515,14 +515,14 @@ def _get_closest_point_to_point_in_world_frame(
Returns:
tuple: The index of the closest vector, the closest vector in the arbitrary frame, and the closest vector in the world frame.
"""
vectors_in_world = np.array(
vectors_in_world = th.Tensor(
[
T.pose_transform(*arbitrary_frame_to_world_frame, vector, [0, 0, 0, 1])[0]
for vector in vectors_in_arbitrary_frame
]
)
vector_distances_to_point = np.linalg.norm(vectors_in_world - np.array(point_in_world)[None, :], axis=1)
vector_distances_to_point = np.linalg.norm(vectors_in_world - th.Tensor(point_in_world)[None, :], axis=1)
closer_option_idx = np.argmin(vector_distances_to_point)
vector_in_arbitrary_frame = vectors_in_arbitrary_frame[closer_option_idx]
vector_in_world_frame = vectors_in_world[closer_option_idx]

View File

@ -62,8 +62,8 @@ def plan_base_motion(
if not self.si.isValid(s2):
return False
start = np.array([s1.getX(), s1.getY(), s1.getYaw()])
goal = np.array([s2.getX(), s2.getY(), s2.getYaw()])
start = th.Tensor([s1.getX(), s1.getY(), s1.getYaw()])
goal = th.Tensor([s2.getX(), s2.getY(), s2.getYaw()])
segment_theta = self.get_angle_between_poses(start, goal)
# Start rotation
@ -238,17 +238,17 @@ def plan_arm_motion(
if torso_fixed:
joint_control_idx = robot.arm_control_idx[robot.default_arm]
dim = len(joint_control_idx)
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
else:
joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
dim = len(joint_control_idx)
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
else:
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
def state_valid_fn(q):
@ -261,7 +261,7 @@ def plan_arm_motion(
# set lower and upper bounds
bounds = ob.RealVectorBounds(dim)
joints = np.array([joint for joint in robot.joints.values()])
joints = th.Tensor([joint for joint in robot.joints.values()])
arm_joints = joints[joint_control_idx]
for i, joint in enumerate(arm_joints):
if end_conf[i] > joint.upper_limit:
@ -337,7 +337,7 @@ def plan_arm_motion_ik(
if torso_fixed:
joint_control_idx = robot.arm_control_idx[robot.default_arm]
dim = len(joint_control_idx)
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
robot_description_path = robot.robot_arm_descriptor_yamls["left_fixed"]
else:
@ -345,10 +345,10 @@ def plan_arm_motion_ik(
dim = len(joint_control_idx)
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
else:
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
robot_description_path = robot.robot_arm_descriptor_yamls[robot.default_arm]
@ -448,10 +448,10 @@ def set_base_and_detect_collision(context, pose):
robot_copy = context.robot_copy
robot_copy_type = context.robot_copy_type
translation = lazy.pxr.Gf.Vec3d(*np.array(pose[0], dtype=float))
translation = lazy.pxr.Gf.Vec3d(*th.Tensor(pose[0], dtype=float))
robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
orientation = th.Tensor(pose[1], dtype=float)[[3, 0, 1, 2]]
robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
return detect_robot_collision(context)
@ -480,9 +480,9 @@ def set_arm_and_detect_collision(context, joint_pos):
for mesh_name, mesh in robot_copy.meshes[robot_copy_type][link].items():
relative_pose = robot_copy.relative_poses[robot_copy_type][link][mesh_name]
mesh_pose = T.pose_transform(*pose, *relative_pose)
translation = lazy.pxr.Gf.Vec3d(*np.array(mesh_pose[0], dtype=float))
translation = lazy.pxr.Gf.Vec3d(*th.Tensor(mesh_pose[0], dtype=float))
mesh.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(mesh_pose[1], dtype=float)[[3, 0, 1, 2]]
orientation = th.Tensor(mesh_pose[1], dtype=float)[[3, 0, 1, 2]]
mesh.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
return detect_robot_collision(context)
@ -628,7 +628,7 @@ def astar(search_map, start, goal, eight_connected=True):
path.insert(0, current)
current = came_from[current]
path.insert(0, start)
return np.array(path)
return th.Tensor(path)
for neighbor in get_neighbors(current):
# Skip neighbors that are not valid or have already been visited

View File

@ -136,12 +136,12 @@ def sample_kinematics(
if hasattr(objA, "orientations") and objA.orientations is not None:
orientation = objA.sample_orientation()
else:
orientation = np.array([0, 0, 0, 1.0])
orientation = th.Tensor([0, 0, 0, 1.0])
# Orientation needs to be set for stable_z_on_aabb to work correctly
# Position needs to be set to be very far away because the object's
# original position might be blocking rays (use_ray_casting_method=True)
old_pos = np.array([100, 100, 10])
old_pos = th.Tensor([100, 100, 10])
objA.set_position_orientation(old_pos, orientation)
objA.keep_still()
# We also need to step physics to make sure the pose propagates downstream (e.g.: to Bounding Box computations)
@ -161,7 +161,7 @@ def sample_kinematics(
else:
aabb_lower, aabb_upper = objA.states[AABB].get_value()
parallel_bbox_center = (aabb_lower + aabb_upper) / 2.0
parallel_bbox_orn = np.array([0.0, 0.0, 0.0, 1.0])
parallel_bbox_orn = th.Tensor([0.0, 0.0, 0.0, 1.0])
parallel_bbox_extents = aabb_upper - aabb_lower
sampling_results = sample_cuboid_for_predicate(predicate, objB, parallel_bbox_extents)
@ -284,11 +284,11 @@ def sample_cloth_on_rigid(obj, other, max_trials=40, z_offset=0.05, randomize_xy
if randomize_xy:
# Sample a random position in the x-y plane within the other object's AABB
low = np.array([other_aabb_low[0], other_aabb_low[1], z_value])
high = np.array([other_aabb_high[0], other_aabb_high[1], z_value])
low = th.Tensor([other_aabb_low[0], other_aabb_low[1], z_value])
high = th.Tensor([other_aabb_high[0], other_aabb_high[1], z_value])
else:
# Always sample the center of the other object's AABB
low = np.array(
low = th.Tensor(
[(other_aabb_low[0] + other_aabb_high[0]) / 2.0, (other_aabb_low[1] + other_aabb_high[1]) / 2.0, z_value]
)
high = low
@ -297,7 +297,7 @@ def sample_cloth_on_rigid(obj, other, max_trials=40, z_offset=0.05, randomize_xy
# Sample a random position
pos = np.random.uniform(low, high)
# Sample a random orientation in the z-axis
orn = T.euler2quat(np.array([0.0, 0.0, np.random.uniform(0, np.pi * 2)]))
orn = T.euler2quat(th.Tensor([0.0, 0.0, np.random.uniform(0, np.pi * 2)]))
obj.set_position_orientation(pos, orn)
obj.root_link.reset()

View File

@ -28,7 +28,7 @@ def sample_stable_orientations(obj, n_samples=10, drop_aabb_offset=0.1):
assert np.all(obj.scale == 1.0)
aabb_extent = obj.aabb_extent
radius = np.linalg.norm(aabb_extent) / 2.0
drop_pos = np.array([0, 0, radius + drop_aabb_offset])
drop_pos = th.Tensor([0, 0, radius + drop_aabb_offset])
center_offset = obj.get_position() - obj.aabb_center
drop_orientations = R.random(n_samples).as_quat()
stable_orientations = np.zeros_like(drop_orientations)
@ -57,7 +57,7 @@ def compute_bbox_offset(obj):
"""
og.sim.stop()
assert np.all(obj.scale == 1.0)
obj.set_position_orientation(np.zeros(3), np.array([0, 0, 0, 1.0]))
obj.set_position_orientation(np.zeros(3), th.Tensor([0, 0, 0, 1.0]))
return obj.aabb_center - obj.get_position()
@ -74,7 +74,7 @@ def compute_native_bbox_extent(obj):
"""
og.sim.stop()
assert np.all(obj.scale == 1.0)
obj.set_position_orientation(np.zeros(3), np.array([0, 0, 0, 1.0]))
obj.set_position_orientation(np.zeros(3), th.Tensor([0, 0, 0, 1.0]))
return obj.aabb_extent
@ -93,7 +93,7 @@ def compute_base_aligned_bboxes(obj):
min_pt = np.min(pts_in_link_frame, axis=0)
extent = max_pt - min_pt
center = (max_pt + min_pt) / 2.0
transform = T.pose2mat((center, np.array([0, 0, 0, 1.0])))
transform = T.pose2mat((center, th.Tensor([0, 0, 0, 1.0])))
print(pts_in_link_frame.shape)
link_bounding_boxes[link_name][mesh_type] = {
"extent": extent,

Some files were not shown because too many files have changed in this diff Show More