Tensor - tensor

This commit is contained in:
hang-yin 2024-07-02 10:32:03 -07:00
parent 921ff40fbb
commit ababc59fa9
122 changed files with 985 additions and 984 deletions

View File

@ -79,7 +79,7 @@ m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2
m.TIAGO_TORSO_FIXED = False
m.JOINT_POS_DIFF_THRESHOLD = 0.01
m.JOINT_CONTROL_MIN_ACTION = 0.0
m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = th.deg2rad(th.Tensor([45])).item()
m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = th.deg2rad(th.tensor([45])).item()
log = create_module_logger(module_name=__name__)
@ -142,10 +142,10 @@ class PlanningContext(object):
if m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
joint_control_idx = self.robot.arm_control_idx["left"]
joint_pos = th.Tensor(self.robot.get_joint_positions()[joint_control_idx])
joint_pos = th.tensor(self.robot.get_joint_positions()[joint_control_idx])
else:
joint_combined_idx = th.cat([self.robot.trunk_control_idx, self.robot.arm_control_idx[fk_descriptor]])
joint_pos = th.Tensor(self.robot.get_joint_positions()[joint_combined_idx])
joint_pos = th.tensor(self.robot.get_joint_positions()[joint_combined_idx])
link_poses = self.fk_solver.get_link_poses(joint_pos, arm_links)
# Set position of robot copy root prim
@ -169,9 +169,9 @@ class PlanningContext(object):
self._set_prim_pose(copy_mesh, mesh_copy_pose)
def _set_prim_pose(self, prim, pose):
translation = lazy.pxr.Gf.Vec3d(*th.Tensor(pose[0], dtype=float))
translation = lazy.pxr.Gf.Vec3d(*th.tensor(pose[0], dtype=th.float32))
prim.GetAttribute("xformOp:translate").Set(translation)
orientation = th.Tensor(pose[1], dtype=float)[[3, 0, 1, 2]]
orientation = th.tensor(pose[1], dtype=float)[[3, 0, 1, 2]]
prim.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
def _construct_disabled_collision_pairs(self):
@ -355,9 +355,9 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
lazy.omni.usd.commands.CreatePrimCommand("Xform", rc["copy_path"]).do()
copy_robot = lazy.omni.isaac.core.utils.prims.get_prim_at_path(rc["copy_path"])
reset_pose = robot_copy.reset_pose[robot_type]
translation = lazy.pxr.Gf.Vec3d(*th.Tensor(reset_pose[0], dtype=float))
translation = lazy.pxr.Gf.Vec3d(*th.tensor(reset_pose[0], dtype=float))
copy_robot.GetAttribute("xformOp:translate").Set(translation)
orientation = th.Tensor(reset_pose[1], dtype=float)[[3, 0, 1, 2]]
orientation = th.tensor(reset_pose[1], dtype=float)[[3, 0, 1, 2]]
copy_robot.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
robot_to_copy = None
@ -383,7 +383,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
relative_pose = T.relative_pose_transform(
*mesh.get_position_orientation(), *link.get_position_orientation()
)
relative_pose = (relative_pose[0], th.Tensor([0, 0, 0, 1]))
relative_pose = (relative_pose[0], th.tensor([0, 0, 0, 1]))
if link_name not in copy_robot_meshes.keys():
copy_robot_meshes[link_name] = {mesh_name: copy_mesh}
copy_robot_meshes_relative_poses[link_name] = {mesh_name: relative_pose}
@ -460,7 +460,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
attempts (int): Number of attempts to make before raising an error
Yields:
th.Tensor or None: Action array for one step for the robot to execute the primitve or None if primitive completed
th.tensor or None: Action array for one step for the robot to execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
@ -615,7 +615,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
speed (float): base speed
Returns:
th.Tensor or None: Action array for one step for the robot to move base or None if its at the target pose
th.tensor or None: Action array for one step for the robot to move base or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
@ -632,7 +632,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
speed (float): eef speed
Returns:
th.Tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
th.tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
@ -649,7 +649,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
speed (float): eef speed
Returns:
th.Tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
th.tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
# TODO: Combine these movement functions.
for _ in range(steps):
@ -666,7 +666,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
StatefulObject: Object for robot to grasp
Returns:
th.Tensor or None: Action array for one step for the robot to grasp or None if grasp completed
th.tensor or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
@ -744,7 +744,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
th.Tensor or None: Action array for one step for the robot to place or None if place completed
th.tensor or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.OnTop)
@ -756,7 +756,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
th.Tensor or None: Action array for one step for the robot to place or None if place completed
th.tensor or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.Inside)
@ -799,7 +799,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
th.Tensor or None: Action array for one step for the robot to place or None if place completed
th.tensor or None: Action array for one step for the robot to place or None if place completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
@ -844,8 +844,8 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Returns:
2-tuple
- th.Tensor or None: Joint positions to reach target pose or None if impossible to reach target pose
- th.Tensor: Indices for joints in the robot
- th.tensor or None: Joint positions to reach target pose or None if impossible to reach target pose
- th.tensor: Indices for joints in the robot
"""
relative_target_pose = self._get_pose_in_robot_frame(target_pose)
joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose)
@ -915,8 +915,8 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Returns:
2-tuple
- th.Tensor or None: Joint positions to reach target pose or None if impossible to reach the target pose
- th.Tensor: Indices for joints in the robot
- th.tensor or None: Joint positions to reach target pose or None if impossible to reach the target pose
- th.tensor: Indices for joints in the robot
"""
ik_solver = IKSolver(
robot_description_path=self._manipulation_descriptor_path,
@ -941,7 +941,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
Returns:
th.Tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
th.tensor or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
yield from self._settle_robot()
controller_config = self.robot._controller_config["arm_" + self.arm]
@ -957,10 +957,10 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to move arm to reach the specified joint positions using the planner
Args:
joint_pos (th.Tensor): Joint positions for the arm
joint_pos (th.tensor): Joint positions for the arm
Returns:
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
th.tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
with PlanningContext(self.env, self.robot, self.robot_copy, "original") as context:
plan = plan_arm_motion(
@ -988,10 +988,10 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to move arm to reach the specified eef positions using the planner
Args:
eef_pose (th.Tensor): End Effector pose for the arm
eef_pose (th.tensor): End Effector pose for the arm
Returns:
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
th.tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
eef_pos = eef_pose[0]
eef_ori = T.quat2axisangle(eef_pose[1])
@ -1033,7 +1033,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Returns:
Array of arrays: Planned path with additional waypoints
"""
plan = th.Tensor(plan)
plan = th.tensor(plan)
interpolated_plan = []
for i in range(len(plan) - 1):
max_diff = max(plan[i + 1] - plan[i])
@ -1047,12 +1047,12 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to move its arm to reach the specified joint positions by directly actuating with no planner
Args:
joint_pos (th.Tensor): Array of joint positions for the arm
joint_pos (th.tensor): Array of joint positions for the arm
stop_on_contact (boolean): Determines whether to stop move once an object is hit
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
th.tensor or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
controller_name = f"arm_{self.arm}"
use_delta = self.robot._controllers[controller_name].use_delta_commands
@ -1062,7 +1062,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
for _ in range(m.MAX_STEPS_FOR_HAND_MOVE_JOINT):
current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx]
diff_joint_pos = th.Tensor(joint_pos) - th.Tensor(current_joint_pos)
diff_joint_pos = th.tensor(joint_pos) - th.tensor(current_joint_pos)
if th.max(th.abs(diff_joint_pos)) < m.JOINT_POS_DIFF_THRESHOLD:
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
@ -1180,7 +1180,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
th.Tensor or None: Action array for one step for the robot to move arm or None if its at the target pose
th.tensor or None: Action array for one step for the robot to move arm or None if its at the target pose
"""
# To make sure that this happens in a roughly linear fashion, we will divide the trajectory
# into 1cm-long pieces
@ -1190,7 +1190,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pos_waypoints = th.linspace(start_pos, target_pose[0], num_poses)
# Also interpolate the rotations
combined_rotation = Rotation.from_quat(th.Tensor([start_orn, target_pose[1]]))
combined_rotation = Rotation.from_quat(th.tensor([start_orn, target_pose[1]]))
slerp = Slerp([0, 1], combined_rotation)
orn_waypoints = slerp(th.linspace(0, 1, num_poses))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
@ -1219,9 +1219,9 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = th.norm(th.Tensor(current_pos) - th.Tensor(target_pose[0]))
pos_diff = th.norm(th.tensor(current_pos) - th.tensor(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.005 and orn_diff < th.deg2rad(th.Tensor([0.1])).item():
if pos_diff < 0.005 and orn_diff < th.deg2rad(th.tensor([0.1])).item():
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
@ -1264,9 +1264,9 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = th.norm(th.Tensor(current_pos) - th.Tensor(target_pose[0]))
pos_diff = th.norm(th.tensor(current_pos) - th.tensor(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.001 and orn_diff < th.deg2rad(th.Tensor([0.1])).item():
if pos_diff < 0.001 and orn_diff < th.deg2rad(th.tensor([0.1])).item():
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
@ -1283,7 +1283,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to grasp
Returns:
th.Tensor or None: Action array for one step for the robot to grasp or None if its done grasping
th.tensor or None: Action array for one step for the robot to grasp or None if its done grasping
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
joint_position = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]]
@ -1302,7 +1302,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action for the robot to release its grasp
Returns:
th.Tensor or None: Action array for one step for the robot to release or None if its done releasing
th.tensor or None: Action array for one step for the robot to release or None if its done releasing
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
joint_position = self.robot.get_joint_positions()[self.robot.gripper_control_idx[self.arm]]
@ -1408,7 +1408,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Get a no-op action that allows us to run simulation without changing robot configuration.
Returns:
th.Tensor or None: Action array for one step for the robot to do nothing
th.tensor or None: Action array for one step for the robot to do nothing
"""
action = th.zeros(self.robot.action_dim)
for name, controller in self.robot._controllers.items():
@ -1437,7 +1437,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields action to move the hand to the position optimal for executing subsequent action primitives
Returns:
th.Tensor or None: Action array for one step for the robot to reset its hand or None if it is done resetting
th.tensor or None: Action array for one step for the robot to reset its hand or None if it is done resetting
"""
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
@ -1460,16 +1460,16 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
def _get_reset_eef_pose(self):
# TODO: Add support for Fetch
if self.robot_model == "Tiago":
return th.Tensor([0.28493954, 0.37450749, 1.1512334]), th.Tensor(
return th.tensor([0.28493954, 0.37450749, 1.1512334]), th.tensor(
[-0.21533823, 0.05361032, -0.08631776, 0.97123871]
)
else:
return th.Tensor([0.48688125, -0.12507881, 0.97888719]), th.Tensor(
return th.tensor([0.48688125, -0.12507881, 0.97888719]), th.tensor(
[0.61324748, 0.61305553, -0.35266518, 0.35173529]
)
def _get_reset_joint_pos(self):
reset_pose_fetch = th.Tensor(
reset_pose_fetch = th.tensor(
[
0.0,
0.0, # wheels
@ -1488,7 +1488,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
]
)
reset_pose_tiago = th.Tensor(
reset_pose_tiago = th.tensor(
[
-1.78029833e-04,
3.20231302e-05,
@ -1529,7 +1529,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
with PlanningContext(self.env, self.robot, self.robot_copy, "simplified") as context:
plan = plan_base_motion(
@ -1586,7 +1586,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pose_on_obj (Iterable): (pos, quat) Pose
Returns:
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
if pose_on_obj is not None:
if self._target_in_reach_of_robot(pose_on_obj):
@ -1606,7 +1606,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pose_on_obj (Iterable): (pos, quat) pose
Returns:
th.Tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating
th.tensor or None: Action array for one step for the robot to navigate in range or None if it is done navigating
"""
pose = self._sample_pose_near_object(obj, pose_on_obj=pose_on_obj, **kwargs)
yield from self._navigate_to_pose(pose)
@ -1620,7 +1620,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
low_precision (bool): Determines whether to navigate to the pose within a large range (low precision) or small range (high precison)
Returns:
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
dist_threshold = m.LOW_PRECISION_DIST_THRESHOLD if low_precision else m.DEFAULT_DIST_THRESHOLD
angle_threshold = m.LOW_PRECISION_ANGLE_THRESHOLD if low_precision else m.DEFAULT_ANGLE_THRESHOLD
@ -1669,7 +1669,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
angle_threshold (float): The angle difference between the robot's current and end pose that determines when the robot is done rotating
Returns:
th.Tensor or None: Action array for one step for the robot to rotate or None if it is done rotating
th.tensor or None: Action array for one step for the robot to rotate or None if it is done rotating
"""
body_target_pose = self._get_pose_in_robot_frame(end_pose)
diff_yaw = T.quat2euler(body_target_pose[1])[2]
@ -1716,14 +1716,14 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT):
if pose_on_obj is None:
pos_on_obj = self._sample_position_on_aabb_side(obj)
pose_on_obj = [pos_on_obj, th.Tensor([0, 0, 0, 1])]
pose_on_obj = [pos_on_obj, th.tensor([0, 0, 0, 1])]
distance_lo, distance_hi = 0.0, 5.0
distance = (th.rand(1) * (distance_hi - distance_lo) + distance_lo).item()
yaw_lo, yaw_hi = -math.pi, math.pi
yaw = (th.rand(1) * (yaw_hi - yaw_lo) + yaw_lo).item()
avg_arm_workspace_range = th.mean(self.robot.arm_workspace_range[self.arm])
pose_2d = th.Tensor(
pose_2d = th.tensor(
[
pose_on_obj[0][0] + distance * th.cos(yaw),
pose_on_obj[0][1] + distance * th.sin(yaw),
@ -1826,7 +1826,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
sampling_results = sample_cuboid_for_predicate(pred_map[predicate], target_obj, bb_extents)
if sampling_results[0][0] is None:
continue
sampled_bb_center = sampling_results[0][0] + th.Tensor([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET])
sampled_bb_center = sampling_results[0][0] + th.tensor([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET])
sampled_bb_orn = sampling_results[0][2]
# Get the object pose by subtracting the offset
@ -1836,7 +1836,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Check that the pose is near one of the poses in the near_poses list if provided.
if near_poses:
sampled_pos = th.Tensor([sampled_obj_pose[0]])
sampled_pos = th.tensor([sampled_obj_pose[0]])
if not th.any(th.norm(near_poses - sampled_pos, dim=1) < near_poses_threshold):
continue
@ -1887,7 +1887,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
pos = th.Tensor([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR])
pos = th.tensor([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR])
orn = T.euler2quat([0, 0, pose_2d[2]])
return pos, orn
@ -1940,7 +1940,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
Yields a no op action for a few steps to allow the robot and physics to settle
Returns:
th.Tensor or None: Action array for one step for the robot to do nothing
th.tensor or None: Action array for one step for the robot to do nothing
"""
for _ in range(30):
empty_action = self._empty_action()

View File

@ -69,7 +69,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
attempts (int): Number of attempts to make before raising an error
Returns:
th.Tensor or None: Action array for one step for the robot tto execute the primitve or None if primitive completed
th.tensor or None: Action array for one step for the robot tto execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
@ -149,7 +149,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
DatasetObject: Object for robot to grasp
Returns:
th.Tensor or None: Action array for one step for the robot to grasp or None if grasp completed
th.tensor or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Don't do anything if the object is already grasped.
obj_in_hand = self._get_obj_in_hand()
@ -242,7 +242,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
th.Tensor or None: Action array for one step for the robot to place or None if place completed
th.tensor or None: Action array for one step for the robot to place or None if place completed
"""
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
@ -553,7 +553,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
)
# Get the position of the heat source on the thing we're placing near
heating_element_positions = th.Tensor(
heating_element_positions = th.tensor(
[link.get_position() for link in heat_source_obj.states[object_states.HeatSourceOrSink].links.values()]
)
heating_distance_threshold = heat_source_obj.states[object_states.HeatSourceOrSink].distance_threshold
@ -599,7 +599,7 @@ class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
th.Tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
th.tensor or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
robot_pose = self._get_robot_pose_from_2d_pose(pose_2d)
self.robot.set_position_orientation(*robot_pose)

View File

@ -106,12 +106,12 @@ class BaseController(Serializable, Registerable, Recreatable):
continue
self._control_limits[ControlType.get_type(motor_type)] = [
th.Tensor(control_limits[motor_type][0]),
th.Tensor(control_limits[motor_type][1]),
th.tensor(control_limits[motor_type][0]),
th.tensor(control_limits[motor_type][1]),
]
assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist."
self._dof_has_limits = control_limits["has_limit"]
self._dof_idx = th.Tensor(dof_idx, dtype=int)
self._dof_idx = th.tensor(dof_idx, dtype=int)
# Generate goal information
self._goal_shapes = self._get_goal_shapes()
@ -132,8 +132,8 @@ class BaseController(Serializable, Registerable, Recreatable):
)
command_output_limits = (
(
th.Tensor(self._control_limits[self.control_type][0])[self.dof_idx],
th.Tensor(self._control_limits[self.control_type][1])[self.dof_idx],
th.tensor(self._control_limits[self.control_type][0])[self.dof_idx],
th.tensor(self._control_limits[self.control_type][1])[self.dof_idx],
)
if type(command_input_limits) == str and command_input_limits == "default"
else command_output_limits
@ -167,8 +167,8 @@ class BaseController(Serializable, Registerable, Recreatable):
Returns:
Array[float]: Processed command vector
"""
# Make sure command is a th.Tensor
command = th.Tensor([command]) if type(command) in {int, float} else th.Tensor(command)
# Make sure command is a th.tensor
command = th.tensor([command]) if type(command) in {int, float} else th.tensor(command)
# We only clip and / or scale if self.command_input_limits exists
if self._command_input_limits is not None:
# Clip
@ -208,7 +208,7 @@ class BaseController(Serializable, Registerable, Recreatable):
), f"Commands must be dimension {self.command_dim}, got dim {len(command)} instead."
# Preprocess and run internal command
self._goal = self._update_goal(command=self._preprocess_command(th.Tensor(command)), control_dict=control_dict)
self._goal = self._update_goal(command=self._preprocess_command(th.tensor(command)), control_dict=control_dict)
def _update_goal(self, command, control_dict):
"""
@ -314,7 +314,7 @@ class BaseController(Serializable, Registerable, Recreatable):
self._goal = (
None
if state["goal"] is None
else {name: th.Tensor(goal_state) for name, goal_state in state["goal"].items()}
else {name: th.tensor(goal_state) for name, goal_state in state["goal"].items()}
)
def serialize(self, state):
@ -369,15 +369,15 @@ class BaseController(Serializable, Registerable, Recreatable):
dim (int): Size of array to broadcast input to
Returns:
th.Tensor: Array filled with values specified in @nums
th.tensor: Array filled with values specified in @nums
"""
# First run sanity check to make sure no strings are being inputted
if isinstance(nums, str):
raise TypeError("Error: Only numeric inputs are supported for this function, nums2array!")
# Check if input is an Iterable, if so, we simply convert the input to th.Tensor and return
# Check if input is an Iterable, if so, we simply convert the input to th.tensor and return
# Else, input is a single value, so we map to a numpy array of correct size and return
return th.Tensor(nums) if isinstance(nums, Iterable) else th.ones(dim) * nums
return th.tensor(nums) if isinstance(nums, Iterable) else th.ones(dim) * nums
@property
def state_size(self):
@ -465,7 +465,7 @@ class BaseController(Serializable, Registerable, Recreatable):
Returns:
Array[int]: DOF indices corresponding to the specific DOFs being controlled by this robot
"""
return th.Tensor(self._dof_idx)
return th.tensor(self._dof_idx)
@classproperty
def _do_not_register_classes(cls):

View File

@ -107,7 +107,7 @@ class DifferentialDriveController(LocomotionController):
right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius
# Return desired velocities
return th.Tensor([left_wheel_joint_vel, right_wheel_joint_vel])
return th.tensor([left_wheel_joint_vel, right_wheel_joint_vel])
def compute_no_op_goal(self, control_dict):
# This is zero-vector, since we want zero linear / angular velocity

View File

@ -244,8 +244,8 @@ class InverseKinematicsController(JointController, ManipulationController):
def _update_goal(self, command, control_dict):
# Grab important info from control dict
pos_relative = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
pos_relative = th.tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.tensor(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
@ -306,8 +306,8 @@ class InverseKinematicsController(JointController, ManipulationController):
Array[float]: outputted (non-clipped!) velocity control signal to deploy
"""
# Grab important info from control dict
pos_relative = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
pos_relative = th.tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.tensor(control_dict[f"{self.task_name}_quat_relative"])
target_pos = goal_dict["target_pos"]
target_quat = goal_dict["target_quat"]
@ -359,8 +359,8 @@ class InverseKinematicsController(JointController, ManipulationController):
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
return dict(
target_pos=th.Tensor(control_dict[f"{self.task_name}_pos_relative"]),
target_quat=th.Tensor(control_dict[f"{self.task_name}_quat_relative"]),
target_pos=th.tensor(control_dict[f"{self.task_name}_pos_relative"]),
target_quat=th.tensor(control_dict[f"{self.task_name}_quat_relative"]),
)
def _get_goal_shapes(self):

View File

@ -95,8 +95,8 @@ class MultiFingerGripperController(GripperController):
self._inverted = inverted
self._mode = mode
self._limit_tolerance = limit_tolerance
self._open_qpos = open_qpos if open_qpos is None else th.Tensor(open_qpos)
self._closed_qpos = closed_qpos if closed_qpos is None else th.Tensor(closed_qpos)
self._open_qpos = open_qpos if open_qpos is None else th.tensor(open_qpos)
self._closed_qpos = closed_qpos if closed_qpos is None else th.tensor(closed_qpos)
# Create other args to be filled in at runtime
self._is_grasping = IsGraspingState.FALSE
@ -125,9 +125,9 @@ class MultiFingerGripperController(GripperController):
# We extend this method to make sure command is always 2D
if self._mode != "independent":
command = (
th.Tensor([command] * self.command_dim)
th.tensor([command] * self.command_dim)
if type(command) in {int, float}
else th.Tensor([command[0]] * self.command_dim)
else th.tensor([command[0]] * self.command_dim)
)
# Flip the command if the direction is inverted.

View File

@ -55,7 +55,7 @@ class NullJointController(JointController):
applied
"""
# Store values
self._default_command = th.zeros(len(dof_idx)) if default_command is None else th.Tensor(default_command)
self._default_command = th.zeros(len(dof_idx)) if default_command is None else th.tensor(default_command)
# Run super init
super().__init__(
@ -77,7 +77,7 @@ class NullJointController(JointController):
def _preprocess_command(self, command):
# Override super and force the processed command to be internal stored default value
return th.Tensor(self._default_command)
return th.tensor(self._default_command)
def update_default_goal(self, target):
"""
@ -91,4 +91,4 @@ class NullJointController(JointController):
len(target) == self.control_dim
), f"Default control must be length: {self.control_dim}, got length: {len(target)}"
self._default_command = th.Tensor(target)
self._default_command = th.tensor(target)

View File

@ -134,9 +134,9 @@ class OperationalSpaceController(ManipulationController):
self.damping_ratio = damping_ratio
self.kp_null = nums2array(nums=kp_null, dim=control_dim, dtype=th.float32) if kp_null is not None else None
self.kd_null = 2 * th.sqrt(self.kp_null) if kp_null is not None else None # critically damped
self.kp_limits = th.Tensor(kp_limits, dtype=th.float32)
self.damping_ratio_limits = th.Tensor(damping_ratio_limits, dtype=th.float32)
self.kp_null_limits = th.Tensor(kp_null_limits, dtype=th.float32)
self.kp_limits = th.tensor(kp_limits, dtype=th.float32)
self.damping_ratio_limits = th.tensor(damping_ratio_limits, dtype=th.float32)
self.kp_null_limits = th.tensor(kp_null_limits, dtype=th.float32)
# Store settings for whether we're learning gains or not
self.variable_kp = self.kp is None
@ -295,8 +295,8 @@ class OperationalSpaceController(ManipulationController):
frame to control, computed in its local frame (e.g.: robot base frame)
"""
# Grab important info from control dict
pos_relative = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
pos_relative = th.tensor(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = th.tensor(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
@ -415,8 +415,8 @@ class OperationalSpaceController(ManipulationController):
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
target_pos = th.Tensor(control_dict[f"{self.task_name}_pos_relative"])
target_quat = th.Tensor(control_dict[f"{self.task_name}_quat_relative"])
target_pos = th.tensor(control_dict[f"{self.task_name}_pos_relative"])
target_quat = th.tensor(control_dict[f"{self.task_name}_quat_relative"])
# Convert quat into eef ori mat
return dict(

View File

@ -578,8 +578,8 @@ class Environment(gym.Env, GymObservable, Recreatable):
following OpenAI Gym's convention
Args:
action (gym.spaces.Dict or dict or th.Tensor): robot actions. If a dict is specified, each entry should
map robot name to corresponding action. If a th.Tensor, it should be the flattened, concatenated set
action (gym.spaces.Dict or dict or th.tensor): robot actions. If a dict is specified, each entry should
map robot name to corresponding action. If a th.tensor, it should be the flattened, concatenated set
of actions
Returns:
@ -654,7 +654,7 @@ class Environment(gym.Env, GymObservable, Recreatable):
exp_obs[key] = ("obs_space", key, value.dtype, value.shape)
real_obs = dict()
for key, value in recursively_generate_flat_dict(dic=check_obs).items():
if isinstance(value, th.Tensor):
if isinstance(value, th.tensor):
real_obs[key] = ("obs", key, value.dtype, value.shape)
else:
real_obs[key] = ("obs", key, type(value), "()")

View File

@ -46,7 +46,7 @@ class EnvironmentWrapper(Wrapper, Registerable):
By default, run the normal environment step() function
Args:
action (th.Tensor): action to take in environment
action (th.tensor): action to take in environment
Returns:
4-tuple:

View File

@ -41,7 +41,7 @@ class SB3VectorEnvironment(DummyVecEnv):
# Keep track of our last reset time
self.last_reset_time = time.time()
def step_async(self, actions: th.Tensor) -> None:
def step_async(self, actions: th.tensor) -> None:
# We go into this context in case the pre-step tries to call step / render
with og.sim.render_on_step(self.render_on_step):
global last_stepped_env, last_stepped_time

View File

@ -85,7 +85,7 @@ class CustomCombinedExtractor(BaseFeaturesExtractor):
# Update the features dim manually
self._features_dim = total_concat_size
def forward(self, observations) -> th.Tensor:
def forward(self, observations) -> th.tensor:
encoded_tensor_list = []
self.step_index += 1

View File

@ -111,8 +111,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set viewer camera pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([-1.689292, -2.11718198, 0.93332228]),
orientation=th.Tensor([0.57687967, -0.22995655, -0.29022759, 0.72807814]),
position=th.tensor([-1.689292, -2.11718198, 0.93332228]),
orientation=th.tensor([0.57687967, -0.22995655, -0.29022759, 0.72807814]),
)
for _ in range(10):
@ -121,7 +121,7 @@ def main(random_selection=False, headless=False, short_exec=False):
shelf_baseboard = env.scene.object_registry("name", "shelf_baseboard")
shelf_baseboard.set_position_orientation([0, -0.979, 0.26], [0, 0, 0, 1])
shelf_baseboard.keep_still()
shelf_baseboard.set_linear_velocity(th.Tensor([-0.2, 0, 0]))
shelf_baseboard.set_linear_velocity(th.tensor([-0.2, 0, 0]))
shelf_side_left = env.scene.object_registry("name", "shelf_side_left")
shelf_side_left.set_position_orientation([-0.4, 0.0, 0.2], [0, 0, 0, 1])

View File

@ -81,8 +81,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the simulator's viewer camera's pose so it points towards the table
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([0.544888, -0.412084, 1.11569]),
orientation=th.Tensor([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
position=th.tensor([0.544888, -0.412084, 1.11569]),
orientation=th.tensor([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
)
# Let apple settle
@ -91,7 +91,7 @@ def main(random_selection=False, headless=False, short_exec=False):
knife.keep_still()
knife.set_position_orientation(
position=apple.get_position() + th.Tensor([-0.15, 0.0, 0.2]),
position=apple.get_position() + th.tensor([-0.15, 0.0, 0.2]),
orientation=T.euler2quat([-math.pi / 2, 0, 0]),
)

View File

@ -67,8 +67,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set viewer camera
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([0.46382895, -2.66703958, 1.22616824]),
orientation=th.Tensor([0.58779174, -0.00231237, -0.00318273, 0.80900271]),
position=th.tensor([0.46382895, -2.66703958, 1.22616824]),
orientation=th.tensor([0.58779174, -0.00231237, -0.00318273, 0.80900271]),
)
def print_state():

View File

@ -38,8 +38,8 @@ def main():
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([-0.0792399, -1.30104, 1.51981]),
orientation=th.Tensor([0.54897692, 0.00110359, 0.00168013, 0.83583509]),
position=th.tensor([-0.0792399, -1.30104, 1.51981]),
orientation=th.tensor([0.54897692, 0.00110359, 0.00168013, 0.83583509]),
)
# Make sure necessary object states are included with the stove
@ -79,7 +79,7 @@ def main():
# Move stove, notify user
input("Heat source is now moving: Press ENTER to continue.")
stove.set_position(th.Tensor([0, 1.0, 0.61]))
stove.set_position(th.tensor([0, 1.0, 0.61]))
for i in range(100):
env.step(th.empty(0))

View File

@ -30,7 +30,7 @@ def main():
name=f"bowl{i}",
category="bowl",
model="ajzltc",
bounding_box=th.Tensor([0.329, 0.293, 0.168]) * scale,
bounding_box=th.tensor([0.329, 0.293, 0.168]) * scale,
abilities={"heatable": {}},
position=[x, 0, 0.2],
)
@ -49,8 +49,8 @@ def main():
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([0.182103, -2.07295, 0.14017]),
orientation=th.Tensor([0.77787037, 0.00267566, 0.00216149, 0.62841535]),
position=th.tensor([0.182103, -2.07295, 0.14017]),
orientation=th.tensor([0.77787037, 0.00267566, 0.00216149, 0.62841535]),
)
# Dim the skybox so we can see the bowls' steam effectively

View File

@ -56,8 +56,8 @@ def main():
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([1.7789, -1.68822, 1.13551]),
orientation=th.Tensor([0.57065614, 0.20331904, 0.267029, 0.74947212]),
position=th.tensor([1.7789, -1.68822, 1.13551]),
orientation=th.tensor([0.57065614, 0.20331904, 0.267029, 0.74947212]),
)
# Grab reference to object of interest

View File

@ -74,8 +74,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([-0.42246569, -0.34745704, 1.56810353]),
orientation=th.Tensor([0.50083786, -0.10407796, -0.17482619, 0.84128772]),
position=th.tensor([-0.42246569, -0.34745704, 1.56810353]),
orientation=th.tensor([0.50083786, -0.10407796, -0.17482619, 0.84128772]),
)
# Let objects settle
@ -86,10 +86,10 @@ def main(random_selection=False, headless=False, short_exec=False):
stove.states[object_states.ToggledOn].set_value(True)
# The first apple will be affected by the stove
apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.Tensor([0.11, 0, 0.1]))
apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.tensor([0.11, 0, 0.1]))
# The second apple will NOT be affected by the stove, but will be affected by the first apple once it's on fire.
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.Tensor([0.32, 0, 0.1]))
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.tensor([0.32, 0, 0.1]))
steps = 0
max_steps = -1 if not short_exec else 1000

View File

@ -56,8 +56,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([0.88215526, -1.40086216, 2.00311063]),
orientation=th.Tensor([0.42013364, 0.12342107, 0.25339685, 0.86258043]),
position=th.tensor([0.88215526, -1.40086216, 2.00311063]),
orientation=th.tensor([0.42013364, 0.12342107, 0.25339685, 0.86258043]),
)
max_steps = 100 if short_exec else -1

View File

@ -72,7 +72,7 @@ def main(random_selection=False, headless=False, short_exec=False):
# Either Cone or Cylinder; shape of the projection where particles can be applied / removed
"type": "Cone",
# Size of the cone
"extents": th.Tensor([0.1875, 0.1875, 0.375]),
"extents": th.tensor([0.1875, 0.1875, 0.375]),
},
}
@ -129,8 +129,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set the viewer camera appropriately
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([-1.61340969, -1.79803028, 2.53167412]),
orientation=th.Tensor([0.46291845, -0.12381886, -0.22679218, 0.84790371]),
position=th.tensor([-1.61340969, -1.79803028, 2.53167412]),
orientation=th.tensor([0.46291845, -0.12381886, -0.22679218, 0.84790371]),
)
# If we're using a projection volume, we manually add in the required metalink required in order to use the volume
@ -163,7 +163,7 @@ def main(random_selection=False, headless=False, short_exec=False):
modifier._post_load()
env.scene.object_registry.add(modifier)
og.sim.post_import_object(modifier)
modifier.set_position(th.Tensor([0, 0, 5.0]))
modifier.set_position(th.tensor([0, 0, 5.0]))
# Play the simulator and take some environment steps to let the objects settle
og.sim.play()
@ -193,16 +193,16 @@ def main(random_selection=False, headless=False, short_exec=False):
z = 1.22
modifier.keep_still()
modifier.set_position_orientation(
position=th.Tensor([0, 0.3, z]),
orientation=th.Tensor([0, 0, 0, 1.0]),
position=th.tensor([0, 0.3, z]),
orientation=th.tensor([0, 0, 0, 1.0]),
)
# Move object in square around table
deltas = [
[130, th.Tensor([-0.01, 0, 0])],
[60, th.Tensor([0, -0.01, 0])],
[130, th.Tensor([0.01, 0, 0])],
[60, th.Tensor([0, 0.01, 0])],
[130, th.tensor([-0.01, 0, 0])],
[60, th.tensor([0, -0.01, 0])],
[130, th.tensor([0.01, 0, 0])],
[60, th.tensor([0, 0.01, 0])],
]
for t, delta in deltas:
for i in range(t):

View File

@ -69,8 +69,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to ideal angle for viewing objects
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([0.37860532, -0.65396566, 1.4067066]),
orientation=th.Tensor([0.49909498, 0.15201752, 0.24857062, 0.81609284]),
position=th.tensor([0.37860532, -0.65396566, 1.4067066]),
orientation=th.tensor([0.49909498, 0.15201752, 0.24857062, 0.81609284]),
)
# Take a few steps to let the objects settle, and then turn on the sink

View File

@ -49,7 +49,7 @@ def main(random_selection=False, headless=False, short_exec=False):
name=f"plate{i}",
category="plate",
model="iawoof",
bounding_box=th.Tensor([0.20, 0.20, 0.05]),
bounding_box=th.tensor([0.20, 0.20, 0.05]),
)
for i in range(2)
]
@ -70,7 +70,7 @@ def main(random_selection=False, headless=False, short_exec=False):
name=f"shelf",
category="shelf",
model="pkgbcp",
bounding_box=th.Tensor([1.0, 0.4, 2.0]),
bounding_box=th.tensor([1.0, 0.4, 2.0]),
)
box_cfgs = [
@ -79,7 +79,7 @@ def main(random_selection=False, headless=False, short_exec=False):
name=f"box{i}",
category="box_of_crackers",
model="cmdigf",
bounding_box=th.Tensor([0.2, 0.05, 0.3]),
bounding_box=th.tensor([0.2, 0.05, 0.3]),
)
for i in range(5)
]
@ -129,7 +129,7 @@ def sample_microwave_plates_apples(env):
cabinet.set_orientation([0, 0, 0, 1.0])
env.step(th.empty(0))
offset = cabinet.get_position()[2] - cabinet.aabb_center[2]
cabinet.set_position(th.Tensor([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset)
cabinet.set_position(th.tensor([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset)
env.step(th.empty(0))
# Set microwave on top of the cabinet, open it, and step 100 times
@ -173,7 +173,7 @@ def sample_boxes_on_shelf(env):
shelf.set_orientation([0, 0, 0, 1.0])
env.step(th.empty(0))
offset = shelf.get_position()[2] - shelf.aabb_center[2]
shelf.set_position(th.Tensor([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset)
shelf.set_position(th.tensor([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset)
env.step(th.empty(0)) # One step is needed for the object to be fully initialized
og.log.info("Shelf placed.")

View File

@ -79,8 +79,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the simulator's viewer camera's pose so it points towards the table
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([0.544888, -0.412084, 1.11569]),
orientation=th.Tensor([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
position=th.tensor([0.544888, -0.412084, 1.11569]),
orientation=th.tensor([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
)
# Let apple settle
@ -89,7 +89,7 @@ def main(random_selection=False, headless=False, short_exec=False):
knife.keep_still()
knife.set_position_orientation(
position=apple.get_position() + th.Tensor([-0.15, 0.0, 0.2]),
position=apple.get_position() + th.tensor([-0.15, 0.0, 0.2]),
orientation=T.euler2quat([-math.pi / 2, 0, 0]),
)

View File

@ -132,8 +132,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([0.46938863, -3.97887141, 1.64106008]),
orientation=th.Tensor([0.63311689, 0.00127259, 0.00155577, 0.77405359]),
position=th.tensor([0.46938863, -3.97887141, 1.64106008]),
orientation=th.tensor([0.63311689, 0.00127259, 0.00155577, 0.77405359]),
)
# Let objects settle
@ -149,7 +149,7 @@ def main(random_selection=False, headless=False, short_exec=False):
for apple in apples:
apple.states[object_states.Temperature].set_value(-50)
apples[0].states[object_states.Inside].set_value(oven, True)
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.Tensor([0, 0, 0.1]))
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + th.tensor([0, 0, 0.1]))
apples[2].states[object_states.OnTop].set_value(tray, True)
apples[3].states[object_states.Inside].set_value(fridge, True)
apples[4].states[object_states.Inside].set_value(microwave, True)

View File

@ -52,8 +52,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=th.Tensor([-4.62785, -0.418575, 0.933943]),
orientation=th.Tensor([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
position=th.tensor([-4.62785, -0.418575, 0.933943]),
orientation=th.tensor([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
)
# Add bounding boxes to camera sensor

View File

@ -61,7 +61,7 @@ def main(random_selection=False, headless=False, short_exec=False):
# Place the object so it rests on the floor
obj = env.scene.object_registry("name", "obj")
center_offset = obj.get_position() - obj.aabb_center + th.Tensor([0, 0, obj.aabb_extent[2] / 2.0])
center_offset = obj.get_position() - obj.aabb_center + th.tensor([0, 0, obj.aabb_extent[2] / 2.0])
obj.set_position(center_offset)
# Step through the environment

View File

@ -95,8 +95,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([-0.00913503, -1.95750906, 1.36407314]),
orientation=th.Tensor([0.6350064, 0.0, 0.0, 0.77250687]),
position=th.tensor([-0.00913503, -1.95750906, 1.36407314]),
orientation=th.tensor([0.6350064, 0.0, 0.0, 0.77250687]),
)
# Grab the object references
@ -111,7 +111,7 @@ def main(random_selection=False, headless=False, short_exec=False):
env.step(th.empty(0))
# Move the object so that its center is at [0, 0, 1]
center_offset = obj.get_position() - obj.aabb_center + th.Tensor([0, 0, 1.0])
center_offset = obj.get_position() - obj.aabb_center + th.tensor([0, 0, 1.0])
obj.set_position(center_offset)
# Allow the user to easily move the camera around
@ -123,7 +123,7 @@ def main(random_selection=False, headless=False, short_exec=False):
max_steps = 100 if short_exec else 10000
for i in range(max_steps):
z_angle = 2 * math.pi * (i % steps_per_rotate) / steps_per_rotate
quat = T.euler2quat(th.Tensor([0, 0, z_angle]))
quat = T.euler2quat(th.tensor([0, 0, z_angle]))
pos = T.quat2mat(quat) @ center_offset
if obj.n_dof > 0:
frac = (i % steps_per_joint) / steps_per_joint

View File

@ -44,8 +44,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=th.Tensor([-4.62785, -0.418575, 0.933943]),
orientation=th.Tensor([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
position=th.tensor([-4.62785, -0.418575, 0.933943]),
orientation=th.tensor([0.52196595, -0.4231939, -0.46640436, 0.5752612]),
)
def steps(n):

View File

@ -59,14 +59,14 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the viewer camera's pose so that it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([4.32248, -5.74338, 6.85436]),
orientation=th.Tensor([0.39592, 0.13485, 0.29286, 0.85982]),
position=th.tensor([4.32248, -5.74338, 6.85436]),
orientation=th.tensor([0.39592, 0.13485, 0.29286, 0.85982]),
)
robot = env.robots[0]
# Set robot base at the origin
robot.set_position_orientation(th.Tensor([0, 0, 0]), th.Tensor([0, 0, 0, 1]))
robot.set_position_orientation(th.tensor([0, 0, 0]), th.tensor([0, 0, 0, 1]))
# At least one simulation step while the simulator is playing must occur for the robot (or in general, any object)
# to be fully initialized after it is imported into the simulator
og.sim.play()
@ -179,12 +179,12 @@ def main(random_selection=False, headless=False, short_exec=False):
def input_to_xyz_delta_command(inp, delta=0.01):
mapping = {
lazy.carb.input.KeyboardInput.W: th.Tensor([delta, 0, 0]),
lazy.carb.input.KeyboardInput.S: th.Tensor([-delta, 0, 0]),
lazy.carb.input.KeyboardInput.DOWN: th.Tensor([0, 0, -delta]),
lazy.carb.input.KeyboardInput.UP: th.Tensor([0, 0, delta]),
lazy.carb.input.KeyboardInput.A: th.Tensor([0, delta, 0]),
lazy.carb.input.KeyboardInput.D: th.Tensor([0, -delta, 0]),
lazy.carb.input.KeyboardInput.W: th.tensor([delta, 0, 0]),
lazy.carb.input.KeyboardInput.S: th.tensor([-delta, 0, 0]),
lazy.carb.input.KeyboardInput.DOWN: th.tensor([0, 0, -delta]),
lazy.carb.input.KeyboardInput.UP: th.tensor([0, 0, delta]),
lazy.carb.input.KeyboardInput.A: th.tensor([0, delta, 0]),
lazy.carb.input.KeyboardInput.D: th.tensor([0, -delta, 0]),
}
return mapping.get(inp)

View File

@ -44,8 +44,8 @@ def main(random_selection=False, headless=False, short_exec=False):
if not headless:
# Set viewer in front facing robot
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([2.69918369, -3.63686664, 4.57894564]),
orientation=th.Tensor([0.39592411, 0.1348514, 0.29286304, 0.85982]),
position=th.tensor([2.69918369, -3.63686664, 4.57894564]),
orientation=th.tensor([0.39592411, 0.1348514, 0.29286304, 0.85982]),
)
og.sim.enable_viewer_camera_teleoperation()

View File

@ -92,8 +92,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([-2.39951, 2.26469, 2.66227]),
orientation=th.Tensor([-0.23898481, 0.48475231, 0.75464013, -0.37204802]),
position=th.tensor([-2.39951, 2.26469, 2.66227]),
orientation=th.tensor([-0.23898481, 0.48475231, 0.75464013, -0.37204802]),
)
# Create teleop controller

View File

@ -125,8 +125,8 @@ def main(random_selection=False, headless=False, short_exec=False, quickstart=Fa
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([1.46949, -3.97358, 2.21529]),
orientation=th.Tensor([0.56829048, 0.09569975, 0.13571846, 0.80589577]),
position=th.tensor([1.46949, -3.97358, 2.21529]),
orientation=th.tensor([0.56829048, 0.09569975, 0.13571846, 0.80589577]),
)
# Reset environment and robot

View File

@ -82,7 +82,7 @@ def main(random_selection=False, headless=False, short_exec=False):
KeyboardEventHandler.add_keyboard_callback(
key=lazy.carb.input.KeyboardInput.J,
callback_fn=lambda: cam_mover.record_trajectory_from_waypoints(
waypoints=th.Tensor(waypoints),
waypoints=th.tensor(waypoints),
per_step_distance=0.02,
fps=30,
steps_per_frame=1,

View File

@ -26,7 +26,7 @@ def main(random_selection=False, headless=False, short_exec=False):
trav_map_erosion = 2
trav_map = Image.open(os.path.join(get_og_scene_path(scene_model), "layout", "floor_trav_0.png"))
trav_map = th.Tensor(trav_map.resize((trav_map_size, trav_map_size)))
trav_map = th.tensor(trav_map.resize((trav_map_size, trav_map_size)))
trav_map = cv2.erode(trav_map, th.ones((trav_map_erosion, trav_map_erosion)))
if not headless:

View File

@ -36,8 +36,8 @@ def main(random_selection=False, headless=False, short_exec=False):
# Set the camera to a good angle
def set_camera_pose():
og.sim.viewer_camera.set_position_orientation(
position=th.Tensor([-0.229375, -3.40576, 7.26143]),
orientation=th.Tensor([0.27619733, -0.00230233, -0.00801152, 0.9610648]),
position=th.tensor([-0.229375, -3.40576, 7.26143]),
orientation=th.tensor([0.27619733, -0.00230233, -0.00801152, 0.9610648]),
)
set_camera_pose()

View File

@ -55,5 +55,5 @@ class BaseMap:
xy: 2D location in world reference frame (metric)
:return: 2D location in map reference frame (image)
"""
point_wrt_map = th.Tensor(xy) / self.map_resolution + self.map_size / 2.0
point_wrt_map = th.tensor(xy) / self.map_resolution + self.map_size / 2.0
return th.flip(point_wrt_map, dims=tuple(range(point_wrt_map.dim()))).int()

View File

@ -63,8 +63,8 @@ class SegmentationMap(BaseMap):
assert height == width, "room seg map is not a square"
assert img_ins.size == img_sem.size, "semantic and instance seg maps have different sizes"
map_size = int(height * self.map_default_resolution / self.map_resolution)
img_ins = th.Tensor(img_ins.resize((map_size, map_size), Image.NEAREST))
img_sem = th.Tensor(img_sem.resize((map_size, map_size), Image.NEAREST))
img_ins = th.tensor(img_ins.resize((map_size, map_size), Image.NEAREST))
img_sem = th.tensor(img_sem.resize((map_size, map_size), Image.NEAREST))
room_categories = os.path.join(gm.DATASET_PATH, "metadata", "room_categories.txt")
with open(room_categories, "r") as fp:
@ -123,14 +123,14 @@ class SegmentationMap(BaseMap):
return None, None
sem_id = self.room_sem_name_to_sem_id[room_type]
valid_idx = th.Tensor(th.where(self.room_sem_map == sem_id))
valid_idx = th.tensor(th.where(self.room_sem_map == sem_id))
random_point_map = valid_idx[:, th.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, th.Tensor([x, y, z])
return floor, th.tensor([x, y, z])
def get_random_point_by_room_instance(self, room_instance):
"""
@ -149,14 +149,14 @@ class SegmentationMap(BaseMap):
return None, None
ins_id = self.room_ins_name_to_ins_id[room_instance]
valid_idx = th.Tensor(th.where(self.room_ins_map == ins_id))
valid_idx = th.tensor(th.where(self.room_ins_map == ins_id))
random_point_map = valid_idx[:, th.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, th.Tensor([x, y, z])
return floor, th.tensor([x, y, z])
def get_room_type_by_point(self, xy):
"""

View File

@ -77,9 +77,9 @@ class TraversableMap(BaseMap):
for floor in range(len(self.floor_heights)):
if self.trav_map_with_objects:
# TODO: Shouldn't this be generated dynamically?
trav_map = th.Tensor(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor))))
trav_map = th.tensor(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor))))
else:
trav_map = th.Tensor(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
trav_map = th.tensor(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
# If we do not initialize the original size of the traversability map, we obtain it from the image
# Then, we compute the final map size as the factor of scaling (default_resolution/resolution) times the
@ -157,10 +157,10 @@ class TraversableMap(BaseMap):
else:
trav_space = th.where(trav_map == 255)
idx = th.randint(0, high=trav_space[0].shape[0])
xy_map = th.Tensor([trav_space[0][idx], trav_space[1][idx]])
xy_map = th.tensor([trav_space[0][idx], trav_space[1][idx]])
x, y = self.map_to_world(xy_map)
z = self.floor_heights[floor]
return floor, th.Tensor([x, y, z])
return floor, th.tensor([x, y, z])
def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None):
"""

View File

@ -112,7 +112,7 @@ def compute_adjacencies(obj, axes, max_distance, use_aabb_center=True):
# Check for self-hit -- if so, record the position and terminate early
should_continue = True
if hit.rigid_body in obj_link_paths:
ray_starts[idx] = th.Tensor(hit.position)
ray_starts[idx] = th.tensor(hit.position)
should_continue = False
return should_continue
@ -166,7 +166,7 @@ class VerticalAdjacency(AbsoluteObjectState):
def _get_value(self):
# Call the adjacency computation with th Z axis.
bodies_by_axis = compute_adjacencies(
self.obj, th.Tensor([[0, 0, 1]]), m.MAX_DISTANCE_VERTICAL, use_aabb_center=False
self.obj, th.tensor([[0, 0, 1]]), m.MAX_DISTANCE_VERTICAL, use_aabb_center=False
)
# Return the adjacencies from the only axis we passed in.

View File

@ -26,7 +26,7 @@ m = create_module_macros(module_path=__file__)
m.ATTACHMENT_LINK_PREFIX = "attachment"
m.DEFAULT_POSITION_THRESHOLD = 0.05 # 5cm
m.DEFAULT_ORIENTATION_THRESHOLD = th.deg2rad(th.Tensor([5.0])).item() # 5 degrees
m.DEFAULT_ORIENTATION_THRESHOLD = th.deg2rad(th.tensor([5.0])).item() # 5 degrees
m.DEFAULT_JOINT_TYPE = JointType.JOINT_FIXED
m.DEFAULT_BREAK_FORCE = 1000 # Newton
m.DEFAULT_BREAK_TORQUE = 1000 # Newton-Meter
@ -307,7 +307,7 @@ class AttachedTo(
if joint_type == JointType.JOINT_FIXED:
# FixedJoint: the parent link, the child link and the joint frame all align.
parent_local_quat = th.Tensor([0.0, 0.0, 0.0, 1.0])
parent_local_quat = th.tensor([0.0, 0.0, 0.0, 1.0])
else:
# SphericalJoint: the same except that the rotation of the parent link doesn't align with the joint frame.
# The child link and the joint frame still align.
@ -338,7 +338,7 @@ class AttachedTo(
joint_frame_in_parent_frame_pos=th.zeros(3),
joint_frame_in_parent_frame_quat=parent_local_quat,
joint_frame_in_child_frame_pos=th.zeros(3),
joint_frame_in_child_frame_quat=th.Tensor([0.0, 0.0, 0.0, 1.0]),
joint_frame_in_child_frame_quat=th.tensor([0.0, 0.0, 0.0, 1.0]),
**kwargs,
)
@ -436,7 +436,7 @@ class AttachedTo(
log.warning(f"parent reference is not updated after attachment")
def serialize(self, state):
return th.Tensor([state["attached_obj_uuid"]], dtype=float)
return th.tensor([state["attached_obj_uuid"]], dtype=float)
def deserialize(self, state):
return dict(attached_obj_uuid=int(state[0])), 1

View File

@ -19,8 +19,8 @@ m.VISUAL_PARTICLE_OFFSET = 0.01 # Offset to visual particles' poses when checki
"""
ContainedParticlesData contains the following fields:
n_in_volume (int): number of particles in the container volume
positions (th.Tensor): (N, 3) array representing the raw global particle positions
in_volume (th.Tensor): (N,) boolean array representing whether each particle is inside the container volume or not
positions (th.tensor): (N, 3) array representing the raw global particle positions
in_volume (th.tensor): (N,) boolean array representing whether each particle is inside the container volume or not
"""
ContainedParticlesData = namedtuple("ContainedParticlesData", ("n_in_volume", "positions", "in_volume"))
@ -49,8 +49,8 @@ class ContainedParticles(RelativeObjectState, LinkBasedStateMixin):
Returns:
ContainedParticlesData: namedtuple with the following keys:
- n_in_volume (int): Number of @system's particles inside this object's container volume
- positions (th.Tensor): (N, 3) Particle positions of all @system's particles
- in_volume (th.Tensor): (N,) boolean array, True if the corresponding particle is inside this
- positions (th.tensor): (N, 3) Particle positions of all @system's particles
- in_volume (th.tensor): (N,) boolean array, True if the corresponding particle is inside this
object's container volume, else False
"""
# Value is false by default

View File

@ -25,7 +25,7 @@ m.NORMAL_Z_PERCENTAGE = 0.5
m.DEBUG_CLOTH_PROJ_VIS = False
# Angle threshold for checking smoothness of the cloth; surface normals need to be close enough to the z-axis
m.NORMAL_Z_ANGLE_DIFF = th.deg2rad(th.Tensor([45.0])).item()
m.NORMAL_Z_ANGLE_DIFF = th.deg2rad(th.tensor([45.0])).item()
"""
FoldedLevelData contains the following fields:
@ -60,7 +60,7 @@ class FoldedLevel(AbsoluteObjectState, ClothStateMixin):
normals = cloth.compute_face_normals(face_ids=cloth.keyface_idx)
# projection onto the z-axis
proj = th.abs(th.dot(normals, th.Tensor([0.0, 0.0, 1.0])))
proj = th.abs(th.dot(normals, th.tensor([0.0, 0.0, 1.0])))
percentage = th.mean(proj > th.cos(m.NORMAL_Z_ANGLE_DIFF))
return percentage

View File

@ -225,7 +225,7 @@ class HeatSourceOrSink(AbsoluteObjectState, LinkBasedStateMixin, UpdateStateMixi
og.sim.psqi.overlap_box(
halfExtent=half_extent,
pos=aabb_center,
rot=th.Tensor([0, 0, 0, 1.0]),
rot=th.tensor([0, 0, 0, 1.0]),
reportFn=overlap_callback,
)

View File

@ -10,7 +10,7 @@ class MaxTemperature(TensorizedValueState):
This state remembers the highest temperature reached by an object.
"""
# th.Tensor: Array of Temperature.VALUE indices that correspond to the internally tracked MaxTemperature objects
# th.tensor: Array of Temperature.VALUE indices that correspond to the internally tracked MaxTemperature objects
TEMPERATURE_IDXS = None
@classmethod

View File

@ -31,7 +31,7 @@ class NextTo(KinematicsMixin, RelativeObjectState, BooleanStateMixin):
glb = max(objA_lower[dim], objB_lower[dim])
lub = min(objA_upper[dim], objB_upper[dim])
distance_vec.append(max(0, glb - lub))
distance = th.norm(th.Tensor(distance_vec))
distance = th.norm(th.tensor(distance_vec))
objA_dims = objA_upper - objA_lower
objB_dims = objB_upper - objB_lower
avg_aabb_length = th.mean(objA_dims + objB_dims)

View File

@ -62,7 +62,7 @@ class Overlaid(KinematicsMixin, RelativeObjectState, BooleanStateMixin):
# Compute the base aligned bounding box of the rigid object.
bbox_center, bbox_orn, bbox_extent, _ = other.get_base_aligned_bbox(xy_aligned=True)
vertices_local = th.Tensor(list(itertools.product((1, -1), repeat=3))) * (bbox_extent / 2)
vertices_local = th.tensor(list(itertools.product((1, -1), repeat=3))) * (bbox_extent / 2)
vertices = trimesh.transformations.transform_points(vertices_local, T.pose2mat((bbox_center, bbox_orn)))
rigid_hull = ConvexHull(vertices[:, :2])

View File

@ -375,7 +375,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
if self._projection_mesh_params is None:
self._projection_mesh_params = {
"type": mesh_type,
"extents": th.Tensor(pre_existing_mesh.GetAttribute("xformOp:scale").Get()),
"extents": th.tensor(pre_existing_mesh.GetAttribute("xformOp:scale").Get()),
}
# Otherwise, make sure we don't have a mismatch between the pre-existing shape type and the
# desired type since we can't delete the original mesh
@ -404,7 +404,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
), f"Projection mesh should have shape-based attribute {shape_attr} == {default_val}! Got: {val}"
# Set the scale based on projection mesh params
self.projection_mesh.scale = th.Tensor(self._projection_mesh_params["extents"])
self.projection_mesh.scale = th.tensor(self._projection_mesh_params["extents"])
# Make sure the object updates its meshes, and assert that there's only a single visual mesh
self.link.update_meshes()
@ -421,7 +421,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
)
self.projection_mesh.set_local_pose(
position=th.Tensor([0, 0, -z_offset]),
position=th.tensor([0, 0, -z_offset]),
orientation=T.euler2quat([0, 0, 0]),
)
@ -458,7 +458,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
og.sim.psqi.overlap_box(
halfExtent=(aabb[1] - aabb[0]) / 2.0 + m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN,
pos=(aabb[1] + aabb[0]) / 2.0,
rot=th.Tensor([0, 0, 0, 1.0]),
rot=th.tensor([0, 0, 0, 1.0]),
reportFn=overlap_callback,
)
return valid_hit
@ -515,8 +515,8 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
cond = (
lambda obj: (
th.dot(
T.quat2mat(obj.states[self.__class__].link.get_orientation()) @ th.Tensor([0, 0, 1]),
th.Tensor([0, 0, 1]),
T.quat2mat(obj.states[self.__class__].link.get_orientation()) @ th.tensor([0, 0, 1]),
th.tensor([0, 0, 1]),
)
> 0
)
@ -752,7 +752,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
self._current_step = state["current_step"]
def serialize(self, state):
return th.Tensor([state["current_step"]], dtype=float)
return th.tensor([state["current_step"]], dtype=float)
def deserialize(self, state):
current_step = int(state[0])
@ -899,7 +899,7 @@ class ParticleRemover(ParticleModifier):
inbound_idxs = (
self._check_in_mesh(system.get_particles_position_orientation()[0]).nonzero()[0]
if self.obj.prim_type == PrimType.CLOTH or self.method == ParticleModifyMethod.PROJECTION
else th.Tensor(list(self.obj.states[ContactParticles].get_value(system, self.link)))
else th.tensor(list(self.obj.states[ContactParticles].get_value(system, self.link)))
)
modification_limit = self.physical_particle_modification_limit
@ -1094,7 +1094,7 @@ class ParticleApplier(ParticleModifier):
# metalink, and (b) zero relative orientation between the metalink and the projection mesh
local_pos, local_quat = self.projection_mesh.get_local_pose()
assert th.all(
th.isclose(local_pos + th.Tensor([0, 0, height / 2.0]), th.zeros_like(local_pos))
th.isclose(local_pos + th.tensor([0, 0, height / 2.0]), th.zeros_like(local_pos))
), "Projection mesh tip should align with metalink position!"
assert th.all(
th.isclose(T.quat2euler(local_quat), th.zeros_like(local_quat))
@ -1148,7 +1148,7 @@ class ParticleApplier(ParticleModifier):
# We now pre-compute local particle positions that are within the projection mesh used to infer spawn pos
# We sample over the entire object AABB, assuming most will be filtered out
sampling_distance = 2 * system.particle_radius
extent = th.Tensor(self._projection_mesh_params["extents"])
extent = th.tensor(self._projection_mesh_params["extents"])
h = extent[2]
low, high = self.obj.aabb
n_particles_per_axis = ((high - low) / sampling_distance).int()
@ -1302,9 +1302,9 @@ class ParticleApplier(ParticleModifier):
# Generate particles for this group
system.generate_group_particles(
group=group,
positions=th.Tensor(particle_info["positions"]),
orientations=th.Tensor(particle_info["orientations"]),
scales=th.Tensor(particles_info[group]["scales"]),
positions=th.tensor(particle_info["positions"]),
orientations=th.tensor(particle_info["orientations"]),
scales=th.tensor(particles_info[group]["scales"]),
link_prim_paths=particle_info["link_prim_paths"],
)
# Update our particle count
@ -1321,11 +1321,11 @@ class ParticleApplier(ParticleModifier):
velocities = (
None
if self._initial_speed == 0
else -self._initial_speed * th.Tensor([hit[1] for hit in hits[:n_particles]])
else -self._initial_speed * th.tensor([hit[1] for hit in hits[:n_particles]])
)
system.generate_particles(
scene=self.obj.scene,
positions=th.Tensor([hit[0] for hit in hits[:n_particles]]),
positions=th.tensor([hit[0] for hit in hits[:n_particles]]),
velocities=velocities,
)
# Update our particle count
@ -1389,7 +1389,7 @@ class ParticleApplier(ParticleModifier):
n_samples = self._get_max_particles_limit_per_step(system=system)
r, h = self._projection_mesh_params["extents"][0] / 2, self._projection_mesh_params["extents"][2]
sampled_r_theta = th.rand(n_samples, 2)
sampled_r_theta = sampled_r_theta * th.Tensor([r, math.pi * 2]).reshape(1, 2)
sampled_r_theta = sampled_r_theta * th.tensor([r, math.pi * 2]).reshape(1, 2)
# Get start, end points in local link frame, start points to end points along the -z direction
end_points = th.stack(
[
@ -1406,7 +1406,7 @@ class ParticleApplier(ParticleModifier):
elif projection_type == "Cylinder":
# All start points are the parallel point for their corresponding end point
# i.e.: (x, y, 0)
start_points = end_points + th.Tensor([0, 0, h]).reshape(1, 3)
start_points = end_points + th.tensor([0, 0, h]).reshape(1, 3)
else:
# Other types not supported
raise ValueError(f"Unsupported projection mesh type: {projection_type}!")

View File

@ -15,7 +15,7 @@ class Pose(AbsoluteObjectState):
def _get_value(self):
pos = self.obj.get_position()
orn = self.obj.get_orientation()
return th.Tensor(pos), th.Tensor(orn)
return th.tensor(pos), th.tensor(orn)
def _has_changed(self, get_value_args, value, info):
# Only changed if the squared distance between old position and current position has

View File

@ -32,7 +32,7 @@ class IsGrasping(RelativeObjectState, BooleanStateMixin, RobotStateMixin):
# robot_pos = robot.get_position()
# object_pos = self.obj.get_position()
# return th.norm(object_pos - th.Tensor(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD
# return th.norm(object_pos - th.tensor(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD
# class InFOVOfRobot(AbsoluteObjectState, BooleanStateMixin):

View File

@ -69,7 +69,7 @@ class ModifiedParticles(RelativeObjectState):
}
def serialize(self, state):
state_flat = th.Tensor([state["n_systems"]], dtype=float)
state_flat = th.tensor([state["n_systems"]], dtype=float)
if state["n_systems"] > 0:
system_names = tuple(state.keys())[1:]
state_flat = th.cat(
@ -183,7 +183,7 @@ class Saturated(RelativeObjectState, BooleanStateMixin):
# We want diffuse_tint to sum to 2.5 to result in the final RGB to sum to 1.5 on average
# This is because an average RGB color sum to 1.5 (i.e. [0.5, 0.5, 0.5])
# (0.5 [original avg RGB per channel] + 0.1 [albedo_add]) * 2.5 = 1.5
diffuse_tint = th.Tensor([0.5, 0.5, 0.5]) + avg_color / th.sum(avg_color)
diffuse_tint = th.tensor([0.5, 0.5, 0.5]) + avg_color / th.sum(avg_color)
diffuse_tint = diffuse_tint.tolist()
return albedo_add, diffuse_tint
@ -227,7 +227,7 @@ class Saturated(RelativeObjectState, BooleanStateMixin):
self._limits[k] = v
def serialize(self, state):
state_flat = th.Tensor([state["n_systems"], state["default_limit"]], dtype=float)
state_flat = th.tensor([state["n_systems"], state["default_limit"]], dtype=float)
if state["n_systems"] > 0:
system_names = tuple(state.keys())[2:]
state_flat = th.cat(

View File

@ -19,10 +19,10 @@ class SlicerActive(TensorizedValueState, BooleanStateMixin):
# int: Keep track of how many steps each object is waiting for
STEPS_TO_WAIT = None
# th.Tensor: Keep track of the current delay for a given slicer
# th.tensor: Keep track of the current delay for a given slicer
DELAY_COUNTER = None
# th.Tensor: Keep track of whether we touched a sliceable in the previous timestep
# th.tensor: Keep track of whether we touched a sliceable in the previous timestep
PREVIOUSLY_TOUCHING = None
# list of list of str: Body prim paths belonging to each slicer obj

View File

@ -50,10 +50,10 @@ class TensorizedValueState(AbsoluteObjectState, GlobalUpdateStateMixin):
Updates all internally tracked @values for this object state. Should be implemented by subclass.
Args:
values (th.Tensor): Tensorized value array
values (th.tensor): Tensorized value array
Returns:
th.Tensor: Updated tensorized value array
th.tensor: Updated tensorized value array
"""
raise NotImplementedError
@ -179,8 +179,8 @@ class TensorizedValueState(AbsoluteObjectState, GlobalUpdateStateMixin):
# If the state value is not an iterable, wrap it in a numpy array
val = (
state[self.value_name]
if isinstance(state[self.value_name], th.Tensor)
else th.Tensor([state[self.value_name]])
if isinstance(state[self.value_name], th.tensor)
else th.tensor([state[self.value_name]])
)
return val.flatten().float()

View File

@ -88,7 +88,7 @@ class ToggledOn(AbsoluteObjectState, BooleanStateMixin, LinkBasedStateMixin, Upd
self.value = new_value
# Choose which color to apply to the toggle marker
self.visual_marker.color = th.Tensor([0, 1.0, 0]) if self.value else th.Tensor([1.0, 0, 0])
self.visual_marker.color = th.tensor([0, 1.0, 0]) if self.value else th.tensor([1.0, 0, 0])
return True
@ -114,7 +114,7 @@ class ToggledOn(AbsoluteObjectState, BooleanStateMixin, LinkBasedStateMixin, Upd
else:
# Infer radius from mesh if not specified as an input
lazy.omni.isaac.core.utils.bounds.recompute_extents(prim=pre_existing_mesh)
self.scale = th.Tensor(pre_existing_mesh.GetAttribute("xformOp:scale").Get())
self.scale = th.tensor(pre_existing_mesh.GetAttribute("xformOp:scale").Get())
# Create the visual geom instance referencing the generated mesh prim
relative_prim_path = absolute_prim_path_to_scene_relative(self.obj.scene, mesh_prim_path)
@ -190,7 +190,7 @@ class ToggledOn(AbsoluteObjectState, BooleanStateMixin, LinkBasedStateMixin, Upd
self.robot_can_toggle_steps = state["hand_in_marker_steps"]
def serialize(self, state):
return th.Tensor([state["value"], state["hand_in_marker_steps"]], dtype=float)
return th.tensor([state["value"], state["hand_in_marker_steps"]], dtype=float)
def deserialize(self, state):
return dict(value=bool(state[0]), hand_in_marker_steps=int(state[1])), 2

View File

@ -80,7 +80,7 @@ class ControllableObject(BaseObject):
# Store inputs
self._control_freq = control_freq
self._controller_config = controller_config
self._reset_joint_pos = None if reset_joint_pos is None else th.Tensor(reset_joint_pos)
self._reset_joint_pos = None if reset_joint_pos is None else th.tensor(reset_joint_pos)
# Make sure action type is valid, and also save
assert_valid_key(key=action_type, valid_keys={"discrete", "continuous"}, name="action type")
@ -302,8 +302,8 @@ class ControllableObject(BaseObject):
low, high = [], []
for controller in self._controllers.values():
limits = controller.command_input_limits
low.append(th.Tensor([-float("inf")] * controller.command_dim) if limits is None else limits[0])
high.append(th.Tensor([float("inf")] * controller.command_dim) if limits is None else limits[1])
low.append(th.tensor([-float("inf")] * controller.command_dim) if limits is None else limits[0])
high.append(th.tensor([float("inf")] * controller.command_dim) if limits is None else limits[1])
return gym.spaces.Box(shape=(self.action_dim,), low=th.cat(low), high=th.cat(high), dtype=float)
@ -321,7 +321,7 @@ class ControllableObject(BaseObject):
# If we're using discrete action space, we grab the specific action and use that to convert to control
if self._action_type == "discrete":
action = th.Tensor(self.discrete_action_list[action])
action = th.tensor(self.discrete_action_list[action])
# Check if the input action's length matches the action dimension
assert len(action) == self.action_dim, "Action must be dimension {}, got dim {} instead.".format(
@ -377,7 +377,7 @@ class ControllableObject(BaseObject):
# Compose controls
u_vec = th.zeros(self.n_dof)
# By default, the control type is None and the control value is 0 (th.zeros) - i.e. no control applied
u_type_vec = th.Tensor([ControlType.NONE] * self.n_dof)
u_type_vec = th.tensor([ControlType.NONE] * self.n_dof)
for group, ctrl in control.items():
idx = self._controllers[group].dof_idx
u_vec[idx] = ctrl["value"]
@ -508,15 +508,15 @@ class ControllableObject(BaseObject):
# set the targets for joints
if using_pos:
ControllableObjectViewAPI.set_joint_position_targets(
self.articulation_root_path, positions=th.Tensor(pos_vec), indices=th.Tensor(pos_idxs)
self.articulation_root_path, positions=th.tensor(pos_vec), indices=th.tensor(pos_idxs)
)
if using_vel:
ControllableObjectViewAPI.set_joint_velocity_targets(
self.articulation_root_path, velocities=th.Tensor(vel_vec), indices=th.Tensor(vel_idxs)
self.articulation_root_path, velocities=th.tensor(vel_vec), indices=th.tensor(vel_idxs)
)
if using_eff:
ControllableObjectViewAPI.set_joint_efforts(
self.articulation_root_path, efforts=th.Tensor(eff_vec), indices=th.Tensor(eff_idxs)
self.articulation_root_path, efforts=th.tensor(eff_vec), indices=th.tensor(eff_idxs)
)
def get_control_dict(self):

View File

@ -172,18 +172,18 @@ class DatasetObject(USDObject):
raise ValueError("No orientation probabilities set")
if len(self.orientations) == 0:
# Set default value
chosen_orientation = th.Tensor([0, 0, 0, 1.0])
chosen_orientation = th.tensor([0, 0, 0, 1.0])
else:
probabilities = [o["prob"] for o in self.orientations.values()]
probabilities = th.Tensor(probabilities) / th.sum(probabilities)
chosen_orientation = th.Tensor(list(self.orientations.values()))[
probabilities = th.tensor(probabilities) / th.sum(probabilities)
chosen_orientation = th.tensor(list(self.orientations.values()))[
th.multinomial(th.tensor(probabilities), 1)
].item()["rotation"]
# Randomize yaw from -pi to pi
rot_lo, rot_hi = -1, 1
rot_num = (th.rand(1) * (rot_hi - rot_lo) + rot_lo).item()
rot_matrix = th.Tensor(
rot_matrix = th.tensor(
[
[math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0],
[math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0],
@ -230,10 +230,10 @@ class DatasetObject(USDObject):
scale = th.ones(3)
valid_idxes = self.native_bbox > 1e-4
scale[valid_idxes] = (
th.Tensor(self._load_config["bounding_box"])[valid_idxes] / self.native_bbox[valid_idxes]
th.tensor(self._load_config["bounding_box"])[valid_idxes] / self.native_bbox[valid_idxes]
)
else:
scale = th.ones(3) if self._load_config["scale"] is None else th.Tensor(self._load_config["scale"])
scale = th.ones(3) if self._load_config["scale"] is None else th.tensor(self._load_config["scale"])
# Assert that the scale does not have too small dimensions
assert th.all(scale > 1e-4), f"Scale of {self.name} is too small: {scale}"
@ -364,7 +364,7 @@ class DatasetObject(USDObject):
assert (
"ig:nativeBB" in self.property_names
), f"This dataset object '{self.name}' is expected to have native_bbox specified, but found none!"
return th.Tensor(self.get_attribute(attr="ig:nativeBB"))
return th.tensor(self.get_attribute(attr="ig:nativeBB"))
@property
def base_link_offset(self):
@ -374,7 +374,7 @@ class DatasetObject(USDObject):
Returns:
3-array: (x,y,z) base link offset if it exists
"""
return th.Tensor(self.get_attribute(attr="ig:offsetBaseLink"))
return th.tensor(self.get_attribute(attr="ig:offsetBaseLink"))
@property
def metadata(self):

View File

@ -94,7 +94,7 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
# Create load config from inputs
load_config = dict() if load_config is None else load_config
load_config["scale"] = th.Tensor(scale) if isinstance(scale, Iterable) else scale
load_config["scale"] = th.tensor(scale) if isinstance(scale, Iterable) else scale
load_config["visible"] = visible
load_config["visual_only"] = visual_only
load_config["kinematic_only"] = kinematic_only
@ -142,7 +142,7 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
# merely set this to be a static collider, i.e.: kinematic-only
# The custom scaling / fixed joints requirement is needed because omniverse complains about scaling that
# occurs with respect to fixed joints, as omni will "snap" bodies together otherwise
scale = th.ones(3) if self._load_config["scale"] is None else th.Tensor(self._load_config["scale"])
scale = th.ones(3) if self._load_config["scale"] is None else th.tensor(self._load_config["scale"])
if (
self.n_joints == 0
and (th.all(math.isclose(scale, 1.0, abs_tol=1e-3)) or self.n_fixed_joints == 0)

View File

@ -85,7 +85,7 @@ class PrimitiveObject(StatefulObject):
"""
# Compose load config and add rgba values
load_config = dict() if load_config is None else load_config
load_config["color"] = th.Tensor(rgba[:3])
load_config["color"] = th.tensor(rgba[:3])
load_config["opacity"] = rgba[3]
load_config["radius"] = radius
load_config["height"] = height
@ -211,17 +211,17 @@ class PrimitiveObject(StatefulObject):
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_RADIUS_OBJECTS, name="primitive object with radius")
# Update the extents variable
original_extent = th.Tensor(self._extents)
original_extent = th.tensor(self._extents)
self._extents = (
th.ones(3) * radius * 2.0
if self._primitive_type == "Sphere"
else th.Tensor([radius * 2.0, radius * 2.0, self._extents[2]])
else th.tensor([radius * 2.0, radius * 2.0, self._extents[2]])
)
attr_pairs = []
for geom in self._vis_geom, self._col_geom:
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
vals = th.Tensor(attr.Get()).double()
vals = th.tensor(attr.Get()).double()
attr_pairs.append([attr, vals])
geom.GetExtentAttr().Set(
lazy.pxr.Vt.Vec3fArray(
@ -266,7 +266,7 @@ class PrimitiveObject(StatefulObject):
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_HEIGHT_OBJECTS, name="primitive object with height")
# Update the extents variable
original_extent = th.Tensor(self._extents)
original_extent = th.tensor(self._extents)
self._extents[2] = height
# Calculate the correct scaling factor and scale the points and normals appropriately
@ -274,7 +274,7 @@ class PrimitiveObject(StatefulObject):
for geom in self._vis_geom, self._col_geom:
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
vals = th.Tensor(attr.Get()).double()
vals = th.tensor(attr.Get()).double()
# Scale the z axis by the scaling factor
vals[:, 2] = vals[:, 2] * scaling_factor
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
@ -310,7 +310,7 @@ class PrimitiveObject(StatefulObject):
assert_valid_key(key=self._primitive_type, valid_keys=VALID_SIZE_OBJECTS, name="primitive object with size")
# Update the extents variable
original_extent = th.Tensor(self._extents)
original_extent = th.tensor(self._extents)
self._extents = th.ones(3) * size
# Calculate the correct scaling factor and scale the points and normals appropriately
@ -319,7 +319,7 @@ class PrimitiveObject(StatefulObject):
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
# Scale all three axes by the scaling factor
vals = th.Tensor(attr.Get()).double() * scaling_factor
vals = th.tensor(attr.Get()).double() * scaling_factor
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
geom.GetExtentAttr().Set(
lazy.pxr.Vt.Vec3fArray(
@ -353,7 +353,7 @@ class PrimitiveObject(StatefulObject):
def _load_state(self, state):
super()._load_state(state=state)
# self._extents = th.Tensor(state["extents"])
# self._extents = th.tensor(state["extents"])
if self._primitive_type in VALID_RADIUS_OBJECTS:
self.radius = state["radius"]
if self._primitive_type in VALID_HEIGHT_OBJECTS:
@ -376,6 +376,6 @@ class PrimitiveObject(StatefulObject):
return th.cat(
[
state_flat,
th.Tensor([state["radius"], state["height"], state["size"]]),
th.tensor([state["radius"], state["height"], state["size"]]),
]
).float()

View File

@ -401,7 +401,7 @@ class StatefulObject(BaseObject):
colormap.CreateAttribute("rgbaPoints", lazy.pxr.Sdf.ValueTypeNames.Float4Array, False).Set(rgbaPoints)
elif emitter_type == EmitterType.STEAM:
emitter.CreateAttribute("halfSize", lazy.pxr.Sdf.ValueTypeNames.Float3, False).Set(
tuple(bbox_extent_local * th.Tensor(m.STEAM_EMITTER_SIZE_RATIO) / 2.0)
tuple(bbox_extent_local * th.tensor(m.STEAM_EMITTER_SIZE_RATIO) / 2.0)
)
simulate.CreateAttribute("densityCellSize", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(
bbox_extent_local[2] * m.STEAM_EMITTER_DENSITY_CELL_RATIO

View File

@ -130,7 +130,7 @@ class ClothPrim(GeomPrim):
self._prim.GetAttribute("primvars:isVolume").Set(False)
# Store the default position of the points in the local frame
self._default_positions = th.Tensor(self.get_attribute(attr="points"))
self._default_positions = th.tensor(self.get_attribute(attr="points"))
@property
def visual_aabb(self):
@ -172,7 +172,7 @@ class ClothPrim(GeomPrim):
idxs (n-array or None): If set, will only calculate the requested indexed particle state
Returns:
th.Tensor: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
t, r = self.get_position_orientation()
@ -180,7 +180,7 @@ class ClothPrim(GeomPrim):
s = self.scale
# Don't copy to save compute, since we won't be returning a reference to the underlying object anyways
p_local = th.Tensor(self.get_attribute(attr="points"), copy=False)
p_local = th.tensor(self.get_attribute(attr="points"), copy=False)
p_local = p_local[idxs] if idxs is not None else p_local
p_world = (r @ (p_local * s).T).T + t
@ -207,7 +207,7 @@ class ClothPrim(GeomPrim):
# Fill the idxs if requested
if idxs is not None:
p_local_old = th.Tensor(self.get_attribute(attr="points"))
p_local_old = th.tensor(self.get_attribute(attr="points"))
p_local_old[idxs] = p_local
p_local = p_local_old
@ -235,10 +235,10 @@ class ClothPrim(GeomPrim):
Grabs particle indexes defining each of the faces for this cloth prim
Returns:
th.Tensor: (N, 3) numpy array, where each of the N faces are defined by the 3 particle indices
th.tensor: (N, 3) numpy array, where each of the N faces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return th.Tensor(self.get_attribute("faceVertexIndices")).reshape(-1, 3)
return th.tensor(self.get_attribute("faceVertexIndices")).reshape(-1, 3)
@property
def keyfaces(self):
@ -247,7 +247,7 @@ class ClothPrim(GeomPrim):
Total number of keyfaces is m.N_CLOTH_KEYFACES
Returns:
th.Tensor: (N, 3) numpy array, where each of the N keyfaces are defined by the 3 particle indices
th.tensor: (N, 3) numpy array, where each of the N keyfaces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return self.faces[self._keyface_idx]
@ -259,7 +259,7 @@ class ClothPrim(GeomPrim):
Total number of keypoints is m.N_CLOTH_KEYPOINTS
Returns:
th.Tensor: (N, 3) numpy array, where each of the N keypoint particles' positions are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N keypoint particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=self._keypoint_idx)
@ -270,7 +270,7 @@ class ClothPrim(GeomPrim):
Grabs the individual particle that was pre-computed to be the closest to the centroid of this cloth prim.
Returns:
th.Tensor: centroid particle's (x,y,z) cartesian coordinates relative to the world frame
th.tensor: centroid particle's (x,y,z) cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=[self._centroid_idx])[0]
@ -280,11 +280,11 @@ class ClothPrim(GeomPrim):
Grabs individual particle velocities for this cloth prim
Returns:
th.Tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# the velocities attribute is w.r.t the world frame already
return th.Tensor(self.get_attribute(attr="velocities"))
return th.tensor(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
@ -292,7 +292,7 @@ class ClothPrim(GeomPrim):
Set the particle velocities of this cloth
Args:
th.Tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame
"""
assert (
@ -311,7 +311,7 @@ class ClothPrim(GeomPrim):
If None, all faces will be used
Returns:
th.Tensor: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
faces = self.faces if face_ids is None else self.faces[face_ids]
@ -326,7 +326,7 @@ class ClothPrim(GeomPrim):
positions (n-array): (N, 3, 3) array specifying the per-face particle positions
Returns:
th.Tensor: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# Shape [F, 3]
@ -417,14 +417,14 @@ class ClothPrim(GeomPrim):
def get_linear_velocity(self):
"""
Returns:
th.Tensor: current average linear velocity of the particles of the cloth prim. Shape (3,).
th.tensor: current average linear velocity of the particles of the cloth prim. Shape (3,).
"""
return th.Tensor(self._prim.GetAttribute("velocities").Get()).mean(dim=0)
return th.tensor(self._prim.GetAttribute("velocities").Get()).mean(dim=0)
def get_angular_velocity(self):
"""
Returns:
th.Tensor: zero vector as a placeholder because a cloth prim doesn't have an angular velocity. Shape (3,).
th.tensor: zero vector as a placeholder because a cloth prim doesn't have an angular velocity. Shape (3,).
"""
return th.zeros(3)
@ -433,7 +433,7 @@ class ClothPrim(GeomPrim):
Sets the linear velocity of all the particles of the cloth prim.
Args:
velocity (th.Tensor): linear velocity to set all the particles of the cloth prim to. Shape (3,).
velocity (th.tensor): linear velocity to set all the particles of the cloth prim to. Shape (3,).
"""
vel = self.particle_velocities
vel[:] = velocity
@ -444,7 +444,7 @@ class ClothPrim(GeomPrim):
Simply returns because a cloth prim doesn't have an angular velocity
Args:
velocity (th.Tensor): linear velocity to set all the particles of the cloth prim to. Shape (3,).
velocity (th.tensor): linear velocity to set all the particles of the cloth prim to. Shape (3,).
"""
return
@ -555,14 +555,14 @@ class ClothPrim(GeomPrim):
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
self.particle_velocities = (
th.Tensor(state["particle_velocities"])
if not isinstance(state["particle_velocities"], th.Tensor)
th.tensor(state["particle_velocities"])
if not isinstance(state["particle_velocities"], th.tensor)
else state["particle_velocities"]
)
self.set_particle_positions(
positions=(
th.Tensor(state["particle_positions"])
if not isinstance(state["particle_positions"], th.Tensor)
th.tensor(state["particle_positions"])
if not isinstance(state["particle_positions"], th.tensor)
else state["particle_positions"]
)
)

View File

@ -650,7 +650,7 @@ class EntityPrim(XFormPrim):
is actively running!
Args:
positions (th.Tensor): positions to set. This should be n-DOF length if all joints are being set,
positions (th.tensor): positions to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @positions must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF positions to set.
@ -681,7 +681,7 @@ class EntityPrim(XFormPrim):
is actively running!
Args:
velocities (th.Tensor): velocities to set. This should be n-DOF length if all joints are being set,
velocities (th.tensor): velocities to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @velocities must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF velocities to set.
@ -711,7 +711,7 @@ class EntityPrim(XFormPrim):
is actively running!
Args:
efforts (th.Tensor): efforts to set. This should be n-DOF length if all joints are being set,
efforts (th.tensor): efforts to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @efforts must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF efforts to set.
@ -914,12 +914,12 @@ class EntityPrim(XFormPrim):
# Possibly normalize values when returning
return self._normalize_efforts(efforts=joint_efforts) if normalized else joint_efforts
def set_linear_velocity(self, velocity: th.Tensor):
def set_linear_velocity(self, velocity: th.tensor):
"""
Sets the linear velocity of the root prim in stage.
Args:
velocity (th.Tensor): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
velocity (th.tensor): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
self.root_link.set_linear_velocity(velocity)
@ -928,7 +928,7 @@ class EntityPrim(XFormPrim):
Gets the linear velocity of the root prim in stage.
Returns:
velocity (th.Tensor): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
velocity (th.tensor): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
return self.root_link.get_linear_velocity()
@ -937,7 +937,7 @@ class EntityPrim(XFormPrim):
Sets the angular velocity of the root prim in stage.
Args:
velocity (th.Tensor): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
velocity (th.tensor): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
self.root_link.set_angular_velocity(velocity)
@ -945,7 +945,7 @@ class EntityPrim(XFormPrim):
"""Gets the angular velocity of the root prim in stage.
Returns:
velocity (th.Tensor): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
velocity (th.tensor): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
return self.root_link.get_angular_velocity()
@ -1042,7 +1042,7 @@ class EntityPrim(XFormPrim):
n-array: minimum values for this robot's joints. If joint does not have a range, returns -1000
for that joint
"""
return th.Tensor([joint.lower_limit for joint in self._joints.values()])
return th.tensor([joint.lower_limit for joint in self._joints.values()])
# TODO: These are cached, but they are not updated when the joint limit is changed
@cached_property
@ -1052,7 +1052,7 @@ class EntityPrim(XFormPrim):
n-array: maximum values for this robot's joints. If joint does not have a range, returns 1000
for that joint
"""
return th.Tensor([joint.upper_limit for joint in self._joints.values()])
return th.tensor([joint.upper_limit for joint in self._joints.values()])
@property
def joint_range(self):
@ -1068,7 +1068,7 @@ class EntityPrim(XFormPrim):
Returns:
n-array: maximum velocities for this robot's joints
"""
return th.Tensor([joint.max_velocity for joint in self._joints.values()])
return th.tensor([joint.max_velocity for joint in self._joints.values()])
@property
def max_joint_efforts(self):
@ -1076,7 +1076,7 @@ class EntityPrim(XFormPrim):
Returns:
n-array: maximum efforts for this robot's joints
"""
return th.Tensor([joint.max_effort for joint in self._joints.values()])
return th.tensor([joint.max_effort for joint in self._joints.values()])
@property
def joint_position_limits(self):
@ -1123,7 +1123,7 @@ class EntityPrim(XFormPrim):
Returns:
n-array: n-DOF length array specifying whether joint has a limit or not
"""
return th.Tensor([j.has_limit for j in self._joints.values()])
return th.tensor([j.has_limit for j in self._joints.values()])
@property
def disabled_collision_link_names(self):

View File

@ -78,7 +78,7 @@ class GeomPrim(XFormPrim):
return self.material.diffuse_color_constant
else:
color = self.get_attribute("primvars:displayColor")
return None if color is None else th.Tensor(color)[0]
return None if color is None else th.tensor(color)[0]
@color.setter
def color(self, rgb):
@ -91,7 +91,7 @@ class GeomPrim(XFormPrim):
if self.has_material():
self.material.diffuse_color_constant = rgb
else:
self.set_attribute("primvars:displayColor", th.Tensor(rgb))
self.set_attribute("primvars:displayColor", th.tensor(rgb))
@property
def opacity(self):
@ -103,7 +103,7 @@ class GeomPrim(XFormPrim):
return self.material.opacity_constant
else:
opacity = self.get_attribute("primvars:displayOpacity")
return None if opacity is None else th.Tensor(opacity)[0]
return None if opacity is None else th.tensor(opacity)[0]
@opacity.setter
def opacity(self, opacity):
@ -116,23 +116,23 @@ class GeomPrim(XFormPrim):
if self.has_material():
self.material.opacity_constant = opacity
else:
self.set_attribute("primvars:displayOpacity", th.Tensor([opacity]))
self.set_attribute("primvars:displayOpacity", th.tensor([opacity]))
@property
def points(self):
"""
Returns:
th.Tensor: Local poses of all points
th.tensor: Local poses of all points
"""
# If the geom is a mesh we can directly return its points.
mesh = self.prim
mesh_type = mesh.GetPrimTypeInfo().GetTypeName()
if mesh_type == "Mesh":
# If the geom is a mesh we can directly return its points.
return th.Tensor(self.prim.GetAttribute("points").Get())
return th.tensor(self.prim.GetAttribute("points").Get())
else:
# Return the vertices of the trimesh
return th.Tensor(mesh_prim_shape_to_trimesh_mesh(mesh).vertices)
return th.tensor(mesh_prim_shape_to_trimesh_mesh(mesh).vertices)
@property
def points_in_parent_frame(self):
@ -185,7 +185,7 @@ class GeomPrim(XFormPrim):
def extent(self):
"""
Returns:
th.Tensor: The unscaled 3d extent of the mesh in its local frame.
th.tensor: The unscaled 3d extent of the mesh in its local frame.
"""
points = self.points
return th.max(points, dim=0) - th.min(points, dim=0)

View File

@ -350,7 +350,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_velocities(th.Tensor([[vel]]), joint_indices=self.dof_indices)
self._articulation_view.set_max_velocities(th.tensor([[vel]]), joint_indices=self.dof_indices)
@property
def max_effort(self):
@ -376,7 +376,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_efforts(th.Tensor([[effort]]), joint_indices=self.dof_indices)
self._articulation_view.set_max_efforts(th.tensor([[effort]]), joint_indices=self.dof_indices)
@property
def stiffness(self):
@ -401,7 +401,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kps=th.Tensor([[stiffness]]), joint_indices=self.dof_indices)
self._articulation_view.set_gains(kps=th.tensor([[stiffness]]), joint_indices=self.dof_indices)
@property
def damping(self):
@ -426,7 +426,7 @@ class JointPrim(BasePrim):
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kds=th.Tensor([[damping]]), joint_indices=self.dof_indices)
self._articulation_view.set_gains(kds=th.tensor([[damping]]), joint_indices=self.dof_indices)
@property
def friction(self):
@ -452,7 +452,7 @@ class JointPrim(BasePrim):
"""
self.set_attribute("physxJoint:jointFriction", friction)
if og.sim.is_playing():
self._articulation_view.set_friction_coefficients(th.Tensor([[friction]]), joint_indices=self.dof_indices)
self._articulation_view.set_friction_coefficients(th.tensor([[friction]]), joint_indices=self.dof_indices)
@property
def lower_limit(self):
@ -486,7 +486,7 @@ class JointPrim(BasePrim):
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(
th.Tensor([[lower_limit, self.upper_limit]]), joint_indices=self.dof_indices
th.tensor([[lower_limit, self.upper_limit]]), joint_indices=self.dof_indices
)
@property
@ -520,7 +520,7 @@ class JointPrim(BasePrim):
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(
th.Tensor([[self.lower_limit, upper_limit]]), joint_indices=self.dof_indices
th.tensor([[self.lower_limit, upper_limit]]), joint_indices=self.dof_indices
)
@property
@ -759,7 +759,7 @@ class JointPrim(BasePrim):
), "Trying to set joint position target, but control type is not position!"
# Standardize input
pos = th.Tensor([pos]) if self._n_dof == 1 and not isinstance(pos, Iterable) else th.Tensor(pos)
pos = th.tensor([pos]) if self._n_dof == 1 and not isinstance(pos, Iterable) else th.tensor(pos)
# Potentially de-normalize if the input is normalized
if normalized:
@ -795,7 +795,7 @@ class JointPrim(BasePrim):
), f"Trying to set joint velocity target for joint {self.name}, but control type is not velocity!"
# Standardize input
vel = th.Tensor([vel]) if self._n_dof == 1 and not isinstance(vel, Iterable) else th.Tensor(vel)
vel = th.tensor([vel]) if self._n_dof == 1 and not isinstance(vel, Iterable) else th.tensor(vel)
# Potentially de-normalize if the input is normalized
if normalized:
@ -823,7 +823,7 @@ class JointPrim(BasePrim):
assert self.articulated, "Can only set effort for articulated joints!"
# Standardize input
effort = th.Tensor([effort]) if self._n_dof == 1 and not isinstance(effort, Iterable) else th.Tensor(effort)
effort = th.tensor([effort]) if self._n_dof == 1 and not isinstance(effort, Iterable) else th.tensor(effort)
# Potentially de-normalize if the input is normalized
if normalized:

View File

@ -272,7 +272,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's applied (R,G,B) color
"""
return th.Tensor(self.get_input(inp="diffuse_color_constant"))
return th.tensor(self.get_input(inp="diffuse_color_constant"))
@diffuse_color_constant.setter
def diffuse_color_constant(self, color):
@ -280,7 +280,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's applied (R,G,B) color
"""
self.set_input(inp="diffuse_color_constant", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
self.set_input(inp="diffuse_color_constant", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float)))
@property
def diffuse_texture(self):
@ -352,7 +352,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's applied (R,G,B) diffuse_tint
"""
return th.Tensor(self.get_input(inp="diffuse_tint"))
return th.tensor(self.get_input(inp="diffuse_tint"))
@diffuse_tint.setter
def diffuse_tint(self, color):
@ -360,7 +360,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's applied (R,G,B) diffuse_tint
"""
self.set_input(inp="diffuse_tint", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
self.set_input(inp="diffuse_tint", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float)))
@property
def reflection_roughness_constant(self):
@ -568,7 +568,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's applied (R,G,B) emissive_color
"""
return th.Tensor(self.get_input(inp="emissive_color"))
return th.tensor(self.get_input(inp="emissive_color"))
@emissive_color.setter
def emissive_color(self, color):
@ -576,7 +576,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's applied emissive_color
"""
self.set_input(inp="emissive_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
self.set_input(inp="emissive_color", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float)))
@property
def emissive_color_texture(self):
@ -882,7 +882,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied texture_translate
"""
return th.Tensor(self.get_input(inp="texture_translate"))
return th.tensor(self.get_input(inp="texture_translate"))
@texture_translate.setter
def texture_translate(self, translate):
@ -890,7 +890,7 @@ class MaterialPrim(BasePrim):
Args:
translate (2-array): this material's applied (x,y) texture_translate
"""
self.set_input(inp="texture_translate", val=lazy.pxr.Gf.Vec2f(*th.Tensor(translate, dtype=float)))
self.set_input(inp="texture_translate", val=lazy.pxr.Gf.Vec2f(*th.tensor(translate, dtype=float)))
@property
def texture_rotate(self):
@ -914,7 +914,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied texture_scale
"""
return th.Tensor(self.get_input(inp="texture_scale"))
return th.tensor(self.get_input(inp="texture_scale"))
@texture_scale.setter
def texture_scale(self, scale):
@ -922,7 +922,7 @@ class MaterialPrim(BasePrim):
Args:
scale (2-array): this material's applied (x,y) texture_scale
"""
self.set_input(inp="texture_scale", val=lazy.pxr.Gf.Vec2f(*th.Tensor(scale, dtype=float)))
self.set_input(inp="texture_scale", val=lazy.pxr.Gf.Vec2f(*th.tensor(scale, dtype=float)))
@property
def detail_texture_translate(self):
@ -930,7 +930,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied detail_texture_translate
"""
return th.Tensor(self.get_input(inp="detail_texture_translate"))
return th.tensor(self.get_input(inp="detail_texture_translate"))
@detail_texture_translate.setter
def detail_texture_translate(self, translate):
@ -938,7 +938,7 @@ class MaterialPrim(BasePrim):
Args:
translate (2-array): this material's applied detail_texture_translate
"""
self.set_input(inp="detail_texture_translate", val=lazy.pxr.Gf.Vec2f(*th.Tensor(translate, dtype=float)))
self.set_input(inp="detail_texture_translate", val=lazy.pxr.Gf.Vec2f(*th.tensor(translate, dtype=float)))
@property
def detail_texture_rotate(self):
@ -962,7 +962,7 @@ class MaterialPrim(BasePrim):
Returns:
2-array: this material's applied detail_texture_scale
"""
return th.Tensor(self.get_input(inp="detail_texture_scale"))
return th.tensor(self.get_input(inp="detail_texture_scale"))
@detail_texture_scale.setter
def detail_texture_scale(self, scale):
@ -970,7 +970,7 @@ class MaterialPrim(BasePrim):
Args:
scale (2-array): this material's applied detail_texture_scale
"""
self.set_input(inp="detail_texture_scale", val=lazy.pxr.Gf.Vec2f(*th.Tensor(scale, dtype=float)))
self.set_input(inp="detail_texture_scale", val=lazy.pxr.Gf.Vec2f(*th.tensor(scale, dtype=float)))
@property
def exclude_from_white_mode(self):
@ -1042,7 +1042,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's diffuse_reflection_color in (R,G,B)
"""
return th.Tensor(self.get_input(inp="diffuse_reflection_color"))
return th.tensor(self.get_input(inp="diffuse_reflection_color"))
@diffuse_reflection_color.setter
def diffuse_reflection_color(self, color):
@ -1050,7 +1050,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's diffuse_reflection_color in (R,G,B)
"""
self.set_input(inp="diffuse_reflection_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
self.set_input(inp="diffuse_reflection_color", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float)))
@property
def specular_reflection_color(self):
@ -1058,7 +1058,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's specular_reflection_color in (R,G,B)
"""
return th.Tensor(self.get_input(inp="specular_reflection_color"))
return th.tensor(self.get_input(inp="specular_reflection_color"))
@specular_reflection_color.setter
def specular_reflection_color(self, color):
@ -1066,7 +1066,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's specular_reflection_color in (R,G,B)
"""
self.set_input(inp="specular_reflection_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
self.set_input(inp="specular_reflection_color", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float)))
@property
def specular_transmission_color(self):
@ -1074,7 +1074,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's specular_transmission_color in (R,G,B)
"""
return th.Tensor(self.get_input(inp="specular_transmission_color"))
return th.tensor(self.get_input(inp="specular_transmission_color"))
@specular_transmission_color.setter
def specular_transmission_color(self, color):
@ -1082,7 +1082,7 @@ class MaterialPrim(BasePrim):
Args:
color (3-array): this material's specular_transmission_color in (R,G,B)
"""
self.set_input(inp="specular_transmission_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
self.set_input(inp="specular_transmission_color", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float)))
@property
def specular_transmission_scattering_color(self):
@ -1090,7 +1090,7 @@ class MaterialPrim(BasePrim):
Returns:
3-array: this material's specular_transmission_scattering_color in (R,G,B)
"""
return th.Tensor(self.get_input(inp="specular_transmission_scattering_color"))
return th.tensor(self.get_input(inp="specular_transmission_scattering_color"))
@specular_transmission_scattering_color.setter
def specular_transmission_scattering_color(self, color):
@ -1099,7 +1099,7 @@ class MaterialPrim(BasePrim):
color (3-array): this material's specular_transmission_scattering_color in (R,G,B)
"""
self.set_input(
inp="specular_transmission_scattering_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float))
inp="specular_transmission_scattering_color", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float))
)
@property
@ -1144,7 +1144,7 @@ class MaterialPrim(BasePrim):
f"Tried to query glass_color shader input, "
f"but material at {self.prim_path} is not an OmniGlass material!"
)
return th.Tensor(self.get_input(inp="glass_color"))
return th.tensor(self.get_input(inp="glass_color"))
@glass_color.setter
def glass_color(self, color):
@ -1155,4 +1155,4 @@ class MaterialPrim(BasePrim):
assert self.is_glass, (
f"Tried to set glass_color shader input, " f"but material at {self.prim_path} is not an OmniGlass material!"
)
self.set_input(inp="glass_color", val=lazy.pxr.Gf.Vec3f(*th.Tensor(color, dtype=float)))
self.set_input(inp="glass_color", val=lazy.pxr.Gf.Vec3f(*th.tensor(color, dtype=float)))

View File

@ -227,7 +227,7 @@ class RigidPrim(XFormPrim):
# If we have any collision meshes, we aggregate their center of mass and volume values to set the center of mass
# for this link
if len(coms) > 0:
com = (th.Tensor(coms) * th.Tensor(vols).reshape(-1, 1)).sum(dim=0) / th.sum(vols)
com = (th.tensor(coms) * th.tensor(vols).reshape(-1, 1)).sum(dim=0) / th.sum(vols)
self.set_attribute("physics:centerOfMass", lazy.pxr.Gf.Vec3f(*com))
def enable_collisions(self):
@ -280,14 +280,14 @@ class RigidPrim(XFormPrim):
Sets the linear velocity of the prim in stage.
Args:
velocity (th.Tensor): linear velocity to set the rigid prim to. Shape (3,).
velocity (th.tensor): linear velocity to set the rigid prim to. Shape (3,).
"""
self._rigid_prim_view.set_linear_velocities(velocity[None, :])
def get_linear_velocity(self):
"""
Returns:
th.Tensor: current linear velocity of the the rigid prim. Shape (3,).
th.tensor: current linear velocity of the the rigid prim. Shape (3,).
"""
return self._rigid_prim_view.get_linear_velocities()[0]
@ -296,14 +296,14 @@ class RigidPrim(XFormPrim):
Sets the angular velocity of the prim in stage.
Args:
velocity (th.Tensor): angular velocity to set the rigid prim to. Shape (3,).
velocity (th.tensor): angular velocity to set the rigid prim to. Shape (3,).
"""
self._rigid_prim_view.set_angular_velocities(velocity[None, :])
def get_angular_velocity(self):
"""
Returns:
th.Tensor: current angular velocity of the the rigid prim. Shape (3,).
th.tensor: current angular velocity of the the rigid prim. Shape (3,).
"""
return self._rigid_prim_view.get_angular_velocities()[0]
@ -630,7 +630,7 @@ class RigidPrim(XFormPrim):
def _compute_points_on_convex_hull(self, visual):
"""
Returns:
th.Tensor or None: points on the convex hull of all points from child geom prims
th.tensor or None: points on the convex hull of all points from child geom prims
"""
meshes = self._visual_meshes if visual else self._collision_meshes
points = []
@ -657,7 +657,7 @@ class RigidPrim(XFormPrim):
def visual_boundary_points_local(self):
"""
Returns:
th.Tensor: local coords of points on the convex hull of all points from child geom prims
th.tensor: local coords of points on the convex hull of all points from child geom prims
"""
return self._compute_points_on_convex_hull(visual=True)
@ -665,7 +665,7 @@ class RigidPrim(XFormPrim):
def visual_boundary_points_world(self):
"""
Returns:
th.Tensor: world coords of points on the convex hull of all points from child geom prims
th.tensor: world coords of points on the convex hull of all points from child geom prims
"""
local_points = self.visual_boundary_points_local
if local_points is None:
@ -676,7 +676,7 @@ class RigidPrim(XFormPrim):
def collision_boundary_points_local(self):
"""
Returns:
th.Tensor: local coords of points on the convex hull of all points from child geom prims
th.tensor: local coords of points on the convex hull of all points from child geom prims
"""
return self._compute_points_on_convex_hull(visual=False)
@ -684,7 +684,7 @@ class RigidPrim(XFormPrim):
def collision_boundary_points_world(self):
"""
Returns:
th.Tensor: world coords of points on the convex hull of all points from child geom prims
th.tensor: world coords of points on the convex hull of all points from child geom prims
"""
local_points = self.collision_boundary_points_local
if local_points is None:
@ -812,8 +812,8 @@ class RigidPrim(XFormPrim):
super()._load_state(state=state)
# Set velocities if not kinematic
self.set_linear_velocity(th.Tensor(state["lin_vel"]))
self.set_angular_velocity(th.Tensor(state["ang_vel"]))
self.set_linear_velocity(th.tensor(state["lin_vel"]))
self.set_angular_velocity(th.tensor(state["ang_vel"]))
def serialize(self, state):
# Run super first

View File

@ -3,7 +3,6 @@ from collections.abc import Iterable
import torch as th
import trimesh.transformations
from scipy.spatial.transform import Rotation as R
import omnigibson as og
import omnigibson.lazy as lazy
@ -67,7 +66,7 @@ class XFormPrim(BasePrim):
# Cache the original scale from the USD so that when EntityPrim sets the scale for each link (Rigid/ClothPrim),
# the new scale is with respect to the original scale. XFormPrim's scale always matches the scale in the USD.
self.original_scale = th.Tensor(self.get_attribute("xformOp:scale"))
self.original_scale = th.tensor(self.get_attribute("xformOp:scale"))
# Grab the attached material if it exists
if self.has_material():
@ -288,14 +287,14 @@ class XFormPrim(BasePrim):
"""
properties = self.prim.GetPropertyNames()
if position is not None:
position = lazy.pxr.Gf.Vec3d(*th.Tensor(position, dtype=float))
position = lazy.pxr.Gf.Vec3d(*th.tensor(position, dtype=float))
if "xformOp:translate" not in properties:
lazy.carb.log_error(
"Translate property needs to be set for {} before setting its position".format(self.name)
)
self.set_attribute("xformOp:translate", position)
if orientation is not None:
orientation = th.Tensor(orientation, dtype=float)[[3, 0, 1, 2]]
orientation = th.tensor(orientation, dtype=float)[[3, 0, 1, 2]]
if "xformOp:orient" not in properties:
lazy.carb.log_error(
"Orient property needs to be set for {} before setting its orientation".format(self.name)
@ -325,12 +324,12 @@ class XFormPrim(BasePrim):
Gets prim's scale with respect to the world's frame.
Returns:
th.Tensor: scale applied to the prim's dimensions in the world frame. shape is (3, ).
th.tensor: scale applied to the prim's dimensions in the world frame. shape is (3, ).
"""
prim_tf = lazy.pxr.UsdGeom.Xformable(self._prim).ComputeLocalToWorldTransform(lazy.pxr.Usd.TimeCode.Default())
transform = lazy.pxr.Gf.Transform()
transform.SetMatrix(prim_tf)
return th.Tensor(transform.GetScale())
return th.tensor(transform.GetScale())
@property
def scaled_transform(self):
@ -348,11 +347,11 @@ class XFormPrim(BasePrim):
Gets prim's scale with respect to the local frame (the parent's frame).
Returns:
th.Tensor: scale applied to the prim's dimensions in the local frame. shape is (3, ).
th.tensor: scale applied to the prim's dimensions in the local frame. shape is (3, ).
"""
scale = self.get_attribute("xformOp:scale")
assert scale is not None, "Attribute 'xformOp:scale' is None for prim {}".format(self.name)
return th.Tensor(scale)
return th.tensor(scale)
@scale.setter
def scale(self, scale):
@ -360,10 +359,10 @@ class XFormPrim(BasePrim):
Sets prim's scale with respect to the local frame (the prim's parent frame).
Args:
scale (float or th.Tensor): scale to be applied to the prim's dimensions. shape is (3, ).
scale (float or th.tensor): scale to be applied to the prim's dimensions. shape is (3, ).
Defaults to None, which means left unchanged.
"""
scale = th.Tensor(scale, dtype=float) if isinstance(scale, Iterable) else th.ones(3) * scale
scale = th.tensor(scale, dtype=float) if isinstance(scale, Iterable) else th.ones(3) * scale
assert th.all(scale > 0), f"Scale {scale} must consist of positive numbers."
scale = lazy.pxr.Gf.Vec3d(*scale)
properties = self.prim.GetPropertyNames()
@ -425,7 +424,7 @@ class XFormPrim(BasePrim):
return dict(pos=pos, ori=ori)
def _load_state(self, state):
pos, orn = th.Tensor(state["pos"]), th.Tensor(state["ori"])
pos, orn = th.tensor(state["pos"]), th.tensor(state["ori"])
if self.scene is not None:
pos, orn = T.pose_transform(*self.scene.prim.get_position_orientation(), pos, orn)
self.set_position_orientation(pos, orn)

View File

@ -163,7 +163,7 @@ class SettingItem:
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.COLOR3:
assert (
isinstance(value, (list, tuple, th.Tensor)) and len(value) == 3
isinstance(value, (list, tuple, th.tensor)) and len(value) == 3
), f"Setting {self.path} must be a list of 3 numbers within range [0,1]."
for v in value:
assert (
@ -181,7 +181,7 @@ class SettingItem:
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.DOUBLE3:
assert (
isinstance(value, (list, tuple, th.Tensor)) and len(value) == 3
isinstance(value, (list, tuple, th.tensor)) and len(value) == 3
), f"Setting {self.path} must be a list of 3 floats."
for v in value:
assert isinstance(v, (int, float)), f"Setting {self.path} must be a list of 3 floats."
@ -189,7 +189,7 @@ class SettingItem:
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.INT2:
assert (
isinstance(value, (list, tuple, th.Tensor)) and len(value) == 2
isinstance(value, (list, tuple, th.tensor)) and len(value) == 2
), f"Setting {self.path} must be a list of 2 ints."
for v in value:
assert isinstance(v, int), f"Setting {self.path} must be a list of 2 ints."
@ -197,7 +197,7 @@ class SettingItem:
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.DOUBLE2:
assert (
isinstance(value, (list, tuple, th.Tensor)) and len(value) == 2
isinstance(value, (list, tuple, th.tensor)) and len(value) == 2
), f"Setting {self.path} must be a list of 2 floats."
for v in value:
assert isinstance(v, (int, float)), f"Setting {self.path} must be a list of 2 floats."

View File

@ -403,7 +403,7 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
@property
def assisted_grasp_start_points(self):
side_coefficients = {"left": th.Tensor([1, -1, 1]), "right": th.Tensor([1, 1, 1])}
side_coefficients = {"left": th.tensor([1, -1, 1]), "right": th.tensor([1, 1, 1])}
return {
arm: [
GraspingPoint(link_name=f"{arm}_{m.PALM_LINK_NAME}", position=m.PALM_BASE_POS),
@ -418,7 +418,7 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
@property
def assisted_grasp_end_points(self):
side_coefficients = {"left": th.Tensor([1, -1, 1]), "right": th.Tensor([1, 1, 1])}
side_coefficients = {"left": th.tensor([1, -1, 1]), "right": th.tensor([1, 1, 1])}
return {
arm: [
GraspingPoint(link_name=f"{arm}_{finger}", position=m.FINGER_TIP_POS * side_coefficients[arm])
@ -438,7 +438,7 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
[len(finger.contact_list()) > 0 for finger in self.finger_links[hand_name]]
)
def teleop_data_to_action(self, teleop_action) -> th.Tensor:
def teleop_data_to_action(self, teleop_action) -> th.tensor:
"""
Generates an action for the BehaviorRobot to perform based on teleop action data dict.
@ -458,8 +458,8 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
# Update body action space
if teleop_action.is_valid["head"]:
head_pos, head_orn = teleop_action.head[:3], T.euler2quat(teleop_action.head[3:6])
des_body_pos = head_pos - th.Tensor([0, 0, m.BODY_HEIGHT_OFFSET])
des_body_rpy = th.Tensor([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]])
des_body_pos = head_pos - th.tensor([0, 0, m.BODY_HEIGHT_OFFSET])
des_body_rpy = th.tensor([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]])
des_body_orn = T.euler2quat(des_body_rpy)
else:
des_body_pos, des_body_orn = self.get_position_orientation()

View File

@ -165,7 +165,7 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
@property
def tucked_default_joint_pos(self):
return th.Tensor(
return th.tensor(
[
0.0,
0.0, # wheels
@ -189,28 +189,28 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
pos = th.zeros(self.n_joints)
pos[self.base_control_idx] = 0.0
pos[self.trunk_control_idx] = 0.02 + self.default_trunk_offset
pos[self.camera_control_idx] = th.Tensor([0.0, 0.45])
pos[self.gripper_control_idx[self.default_arm]] = th.Tensor([0.05, 0.05]) # open gripper
pos[self.camera_control_idx] = th.tensor([0.0, 0.45])
pos[self.gripper_control_idx[self.default_arm]] = th.tensor([0.05, 0.05]) # open gripper
# Choose arm based on setting
if self.default_arm_pose == "vertical":
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
pos[self.arm_control_idx[self.default_arm]] = th.tensor(
[-0.94121, -0.64134, 1.55186, 1.65672, -0.93218, 1.53416, 2.14474]
)
elif self.default_arm_pose == "diagonal15":
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
pos[self.arm_control_idx[self.default_arm]] = th.tensor(
[-0.95587, -0.34778, 1.46388, 1.47821, -0.93813, 1.4587, 1.9939]
)
elif self.default_arm_pose == "diagonal30":
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
pos[self.arm_control_idx[self.default_arm]] = th.tensor(
[-1.06595, -0.22184, 1.53448, 1.46076, -0.84995, 1.36904, 1.90996]
)
elif self.default_arm_pose == "diagonal45":
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
pos[self.arm_control_idx[self.default_arm]] = th.tensor(
[-1.11479, -0.0685, 1.5696, 1.37304, -0.74273, 1.3983, 1.79618]
)
elif self.default_arm_pose == "horizontal":
pos[self.arm_control_idx[self.default_arm]] = th.Tensor(
pos[self.arm_control_idx[self.default_arm]] = th.tensor(
[-1.43016, 0.20965, 1.86816, 1.77576, -0.27289, 1.31715, 2.01226]
)
else:
@ -371,7 +371,7 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return th.Tensor([0, 1])
return th.tensor([0, 1])
@property
def trunk_control_idx(self):
@ -379,7 +379,7 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to trunk joint.
"""
return th.Tensor([2])
return th.tensor([2])
@property
def camera_control_idx(self):
@ -387,15 +387,15 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [tilt, pan] camera joints.
"""
return th.Tensor([3, 5])
return th.tensor([3, 5])
@property
def arm_control_idx(self):
return {self.default_arm: th.Tensor([4, 6, 7, 8, 9, 10, 11])}
return {self.default_arm: th.tensor([4, 6, 7, 8, 9, 10, 11])}
@property
def gripper_control_idx(self):
return {self.default_arm: th.Tensor([12, 13])}
return {self.default_arm: th.tensor([12, 13])}
@property
def disabled_collision_pairs(self):
@ -493,7 +493,7 @@ class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
@property
def arm_workspace_range(self):
return {self.default_arm: [th.deg2rad(th.Tensor([-45])).item(), th.deg2rad(th.Tensor([45])).item()]}
return {self.default_arm: [th.deg2rad(th.tensor([-45])).item(), th.deg2rad(th.tensor([45])).item()]}
@property
def eef_usd_path(self):

View File

@ -98,8 +98,8 @@ class FrankaPanda(ManipulationRobot):
self._eef_link_names = "panda_hand"
self._finger_link_names = ["panda_leftfinger", "panda_rightfinger"]
self._finger_joint_names = ["panda_finger_joint1", "panda_finger_joint2"]
self._default_robot_model_joint_pos = th.Tensor([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75, 0.00, 0.00])
self._teleop_rotation_offset = th.Tensor([-1, 0, 0, 0])
self._default_robot_model_joint_pos = th.tensor([0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75, 0.00, 0.00])
self._teleop_rotation_offset = th.tensor([-1, 0, 0, 0])
self._ag_start_points = [
GraspingPoint(link_name="panda_rightfinger", position=[0.0, 0.001, 0.045]),
]
@ -114,7 +114,7 @@ class FrankaPanda(ManipulationRobot):
self._finger_joint_names = [f"joint_{i}_0" for i in [12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3]]
# position where the hand is parallel to the ground
self._default_robot_model_joint_pos = th.cat(([0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], th.zeros(16)))
self._teleop_rotation_offset = th.Tensor([0, 0.7071, 0, 0.7071])
self._teleop_rotation_offset = th.tensor([0, 0.7071, 0, 0.7071])
self._ag_start_points = [
GraspingPoint(link_name=f"base_link", position=[0.015, 0, -0.03]),
GraspingPoint(link_name=f"base_link", position=[0.015, 0, -0.08]),
@ -137,7 +137,7 @@ class FrankaPanda(ManipulationRobot):
]
# position where the hand is parallel to the ground
self._default_robot_model_joint_pos = th.cat(([0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], th.zeros(16)))
self._teleop_rotation_offset = th.Tensor([-0.7071, 0.7071, 0, 0])
self._teleop_rotation_offset = th.tensor([-0.7071, 0.7071, 0, 0])
self._ag_start_points = [
GraspingPoint(link_name=f"palm_center", position=[0, -0.025, 0.035]),
GraspingPoint(link_name=f"palm_center", position=[0, 0.03, 0.035]),
@ -157,7 +157,7 @@ class FrankaPanda(ManipulationRobot):
self._finger_joint_names = [f"joint{i}" for i in hand_part_names]
# position where the hand is parallel to the ground
self._default_robot_model_joint_pos = th.cat(([0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], th.zeros(12)))
self._teleop_rotation_offset = th.Tensor([0, 0, 0.707, 0.707])
self._teleop_rotation_offset = th.tensor([0, 0, 0.707, 0.707])
# TODO: add ag support for inspire hand
self._ag_start_points = [
# GraspingPoint(link_name=f"base_link", position=[0, -0.025, 0.035]),
@ -252,7 +252,7 @@ class FrankaPanda(ManipulationRobot):
@property
def gripper_control_idx(self):
return {
self.default_arm: th.Tensor(
self.default_arm: th.tensor(
[list(self.joints.keys()).index(name) for name in self.finger_joint_names[self.default_arm]]
)
}

View File

@ -31,7 +31,7 @@ class Freight(TwoWheelRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return th.Tensor([0, 1])
return th.tensor([0, 1])
@property
def _default_joint_pos(self):

View File

@ -25,7 +25,7 @@ class Husky(LocomotionRobot):
@property
def base_control_idx(self):
return th.Tensor([0, 1, 2, 3])
return th.tensor([0, 1, 2, 3])
@property
def _default_joint_pos(self):

View File

@ -30,7 +30,7 @@ class Locobot(TwoWheelRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return th.Tensor([1, 0])
return th.tensor([1, 0])
@property
def _default_joint_pos(self):

View File

@ -124,7 +124,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float):float], (x,y,z) cartesian delta base position
"""
new_pos = th.Tensor(delta) + self.get_position()
new_pos = th.tensor(delta) + self.get_position()
self.set_position(position=new_pos)
def move_forward(self, delta=0.05):
@ -134,7 +134,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position forward
"""
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([delta, 0, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([delta, 0, 0])))
def move_backward(self, delta=0.05):
"""
@ -143,7 +143,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position backward
"""
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([-delta, 0, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([-delta, 0, 0])))
def move_left(self, delta=0.05):
"""
@ -152,7 +152,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position left
"""
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([0, -delta, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([0, -delta, 0])))
def move_right(self, delta=0.05):
"""
@ -161,7 +161,7 @@ class LocomotionRobot(BaseRobot):
Args:
delta (float): delta base position right
"""
self.move_by(quat2mat(self.get_orientation()).dot(th.Tensor([0, delta, 0])))
self.move_by(quat2mat(self.get_orientation()).dot(th.tensor([0, delta, 0])))
def turn_left(self, delta=0.03):
"""

View File

@ -442,7 +442,7 @@ class ManipulationRobot(BaseRobot):
self.articulation_root_path, self.eef_link_names[arm]
)
)
dic["grasp_{}".format(arm)] = th.Tensor([self.is_grasping(arm)])
dic["grasp_{}".format(arm)] = th.tensor([self.is_grasping(arm)])
dic["gripper_{}_qpos".format(arm)] = joint_positions[self.gripper_control_idx[arm]]
dic["gripper_{}_qvel".format(arm)] = joint_velocities[self.gripper_control_idx[arm]]
@ -838,7 +838,7 @@ class ManipulationRobot(BaseRobot):
if candidate_obj is None or link_name not in candidate_obj.links:
continue
candidate_link = candidate_obj.links[link_name]
dist = th.norm(th.Tensor(candidate_link.get_position()) - th.Tensor(gripper_center_pos))
dist = th.norm(th.tensor(candidate_link.get_position()) - th.tensor(gripper_center_pos))
candidate_data.append((prim_path, dist))
if not candidate_data:
@ -1006,8 +1006,8 @@ class ManipulationRobot(BaseRobot):
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_output_limits": (
th.Tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
th.Tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
th.tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
th.tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
),
"mode": "pose_delta_ori",
"smoothing_filter_size": 2,
@ -1032,8 +1032,8 @@ class ManipulationRobot(BaseRobot):
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_output_limits": (
th.Tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
th.Tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
th.tensor([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
th.tensor([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
),
"mode": "pose_delta_ori",
"workspace_pose_limiter": None,
@ -1189,7 +1189,7 @@ class ManipulationRobot(BaseRobot):
Default is "default" which corresponds to the first entry in self.arm_names
ag_data (None or 2-tuple): if specified, assisted-grasp object, link tuple (i.e. :(BaseObject, RigidPrim)).
Otherwise, does a no-op
contact_pos (None or th.Tensor): if specified, contact position to use for grasp.
contact_pos (None or th.tensor): if specified, contact position to use for grasp.
"""
arm = self.default_arm if arm == "default" else arm
@ -1206,7 +1206,7 @@ class ManipulationRobot(BaseRobot):
force_data, _ = self._find_gripper_contacts(arm=arm, return_contact_positions=True)
for c_link_prim_path, c_contact_pos in force_data:
if c_link_prim_path == ag_link.prim_path:
contact_pos = th.Tensor(c_contact_pos)
contact_pos = th.tensor(c_contact_pos)
break
assert contact_pos is not None
@ -1214,7 +1214,7 @@ class ManipulationRobot(BaseRobot):
# Need to find distance between robot and contact point in robot link's local frame and
# ag link and contact point in ag link's local frame
joint_frame_pos = contact_pos
joint_frame_orn = th.Tensor([0, 0, 0, 1.0])
joint_frame_orn = th.tensor([0, 0, 0, 1.0])
eef_link_pos, eef_link_orn = self.eef_links[arm].get_position_orientation()
parent_frame_pos, parent_frame_orn = T.relative_pose_transform(
joint_frame_pos, joint_frame_orn, eef_link_pos, eef_link_orn
@ -1328,7 +1328,7 @@ class ManipulationRobot(BaseRobot):
"""
Same as _calculate_in_hand_object_rigid, except for cloth. Only one should be used at any given time.
Calculates which object to assisted-grasp for arm @arm. Returns an (BaseObject, RigidPrim, th.Tensor) tuple or
Calculates which object to assisted-grasp for arm @arm. Returns an (BaseObject, RigidPrim, th.tensor) tuple or
None if no valid AG-enabled object can be found.
1) Check if the gripper is closed enough
@ -1344,7 +1344,7 @@ class ManipulationRobot(BaseRobot):
Returns:
None or 3-tuple: If a valid assisted-grasp object is found,
returns the corresponding (object, object_link, attachment_point_position), i.e.
((BaseObject, RigidPrim, th.Tensor)) to the contacted in-hand object. Otherwise, returns None
((BaseObject, RigidPrim, th.tensor)) to the contacted in-hand object. Otherwise, returns None
"""
# TODO (eric): Assume joint_pos = 0 means fully closed
GRIPPER_FINGER_CLOSE_THRESHOLD = 0.03
@ -1385,7 +1385,7 @@ class ManipulationRobot(BaseRobot):
arm (str): specific arm to establish grasp.
Default is "default" which corresponds to the first entry in self.arm_names
ag_data (None or 3-tuple): If specified, should be the corresponding
(object, object_link, attachment_point_position), i.e. ((BaseObject, RigidPrim, th.Tensor)) to the]
(object, object_link, attachment_point_position), i.e. ((BaseObject, RigidPrim, th.tensor)) to the]
contacted in-hand object
"""
arm = self.default_arm if arm == "default" else arm
@ -1524,9 +1524,9 @@ class ManipulationRobot(BaseRobot):
Rotational offset that will be applied for teleoperation
such that [0, 0, 0, 1] as action will keep the robot eef pointing at +x axis
"""
return {arm: th.Tensor([0, 0, 0, 1]) for arm in self.arm_names}
return {arm: th.tensor([0, 0, 0, 1]) for arm in self.arm_names}
def teleop_data_to_action(self, teleop_action) -> th.Tensor:
def teleop_data_to_action(self, teleop_action) -> th.tensor:
"""
Generate action data from teleoperation action data
NOTE: This implementation only supports IK/OSC controller for arm and MultiFingerGripperController for gripper.
@ -1534,7 +1534,7 @@ class ManipulationRobot(BaseRobot):
Args:
teleop_action (TeleopAction): teleoperation action data
Returns:
th.Tensor: array of action data for arm and gripper
th.tensor: array of action data for arm and gripper
"""
action = super().teleop_data_to_action(teleop_action)
hands = ["left", "right"] if self.n_arms == 2 else ["right"]

View File

@ -510,13 +510,13 @@ class BaseRobot(USDObject, ControllableObject, GymObservable):
"""
return self._reset_joint_pos_aabb_extent
def teleop_data_to_action(self, teleop_action) -> th.Tensor:
def teleop_data_to_action(self, teleop_action) -> th.tensor:
"""
Generate action data from teleoperation action data
Args:
teleop_action (TeleopAction): teleoperation action data
Returns:
th.Tensor: array of action data filled with update value
th.tensor: array of action data filled with update value
"""
return th.zeros(self.action_dim)

View File

@ -201,10 +201,10 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
# Keep the current joint positions for the base joints
pos[self.base_idx] = self.get_joint_positions()[self.base_idx]
pos[self.trunk_control_idx] = 0
pos[self.camera_control_idx] = th.Tensor([0.0, 0.0])
pos[self.camera_control_idx] = th.tensor([0.0, 0.0])
for arm in self.arm_names:
pos[self.gripper_control_idx[arm]] = th.Tensor([0.045, 0.045]) # open gripper
pos[self.arm_control_idx[arm]] = th.Tensor([-1.10, 1.47, 2.71, 1.71, -1.57, 1.39, 0])
pos[self.gripper_control_idx[arm]] = th.tensor([0.045, 0.045]) # open gripper
pos[self.arm_control_idx[arm]] = th.tensor([-1.10, 1.47, 2.71, 1.71, -1.57, 1.39, 0])
return pos
@property
@ -213,28 +213,28 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
# Keep the current joint positions for the base joints
pos[self.base_idx] = self.get_joint_positions()[self.base_idx]
pos[self.trunk_control_idx] = 0.02 + self.default_trunk_offset
pos[self.camera_control_idx] = th.Tensor([0.0, -0.45])
pos[self.camera_control_idx] = th.tensor([0.0, -0.45])
# Choose arm joint pos based on setting
for arm in self.arm_names:
pos[self.gripper_control_idx[arm]] = th.Tensor([0.045, 0.045]) # open gripper
pos[self.gripper_control_idx[arm]] = th.tensor([0.045, 0.045]) # open gripper
if self.default_arm_pose == "vertical":
pos[self.arm_control_idx[arm]] = th.Tensor(
pos[self.arm_control_idx[arm]] = th.tensor(
[0.85846, -0.14852, 1.81008, 1.63368, 0.13764, -1.32488, -0.68415]
)
elif self.default_arm_pose == "diagonal15":
pos[self.arm_control_idx[arm]] = th.Tensor(
pos[self.arm_control_idx[arm]] = th.tensor(
[0.90522, -0.42811, 2.23505, 1.64627, 0.76867, -0.79464, 2.05251]
)
elif self.default_arm_pose == "diagonal30":
pos[self.arm_control_idx[arm]] = th.Tensor(
pos[self.arm_control_idx[arm]] = th.tensor(
[0.71883, -0.02787, 1.86002, 1.52897, 0.52204, -0.99741, 2.03113]
)
elif self.default_arm_pose == "diagonal45":
pos[self.arm_control_idx[arm]] = th.Tensor(
pos[self.arm_control_idx[arm]] = th.tensor(
[0.66058, -0.14251, 1.77547, 1.43345, 0.65988, -1.02741, 1.81302]
)
elif self.default_arm_pose == "horizontal":
pos[self.arm_control_idx[arm]] = th.Tensor(
pos[self.arm_control_idx[arm]] = th.tensor(
[0.61511, 0.49229, 1.46306, 1.24919, 1.08282, -1.28865, 1.50910]
)
else:
@ -317,7 +317,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
lin_vel_global, _ = T.pose_transform([0, 0, 0], cur_orn, u_vec[self.base_idx[:3]], [0, 0, 0, 1])
ang_vel_global, _ = T.pose_transform([0, 0, 0], cur_orn, u_vec[self.base_idx[3:]], [0, 0, 0, 1])
u_vec[self.base_control_idx] = th.Tensor([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]])
u_vec[self.base_control_idx] = th.tensor([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]])
return u_vec, u_type_vec
def _get_proprioception_dict(self):
@ -351,7 +351,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
fcns = super().get_control_dict()
native_fcn = fcns.get_fcn("eef_right_pos_relative")
fcns["eef_right_pos_relative"] = lambda: (
native_fcn() + th.Tensor([0, 0, -self.get_joint_positions()[self.trunk_control_idx[0]]])
native_fcn() + th.tensor([0, 0, -self.get_joint_positions()[self.trunk_control_idx[0]]])
)
return fcns
@ -459,7 +459,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
n-array: Indices in low-level control vector corresponding to the three controllable 1DoF base joints
"""
joints = list(self.joints.keys())
return th.Tensor([joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "rz"]])
return th.tensor([joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "rz"]])
@property
def base_idx(self):
@ -468,7 +468,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
n-array: Indices in low-level control vector corresponding to the six 1DoF base joints
"""
joints = list(self.joints.keys())
return th.Tensor(
return th.tensor(
[joints.index(f"base_footprint_{component}_joint") for component in ["x", "y", "z", "rx", "ry", "rz"]]
)
@ -478,7 +478,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to trunk joint.
"""
return th.Tensor([6])
return th.tensor([6])
@property
def camera_control_idx(self):
@ -486,19 +486,19 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [tilt, pan] camera joints.
"""
return th.Tensor([9, 12])
return th.tensor([9, 12])
@property
def arm_control_idx(self):
return {
"left": th.Tensor([7, 10, 13, 15, 17, 19, 21]),
"right": th.Tensor([8, 11, 14, 16, 18, 20, 22]),
"combined": th.Tensor([7, 8, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
"left": th.tensor([7, 10, 13, 15, 17, 19, 21]),
"right": th.tensor([8, 11, 14, 16, 18, 20, 22]),
"combined": th.tensor([7, 8, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]),
}
@property
def gripper_control_idx(self):
return {"left": th.Tensor([23, 24]), "right": th.Tensor([25, 26])}
return {"left": th.tensor([23, 24]), "right": th.tensor([25, 26])}
@property
def finger_lengths(self):
@ -694,8 +694,8 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
@property
def arm_workspace_range(self):
return {
"left": [th.deg2rad(th.Tensor([15])).item(), th.deg2rad(th.Tensor([75])).item()],
"right": [th.deg2rad(th.Tensor([-75])).item(), th.deg2rad(th.Tensor([-15])).item()],
"left": [th.deg2rad(th.tensor([15])).item(), th.deg2rad(th.tensor([75])).item()],
"right": [th.deg2rad(th.tensor([-75])).item(), th.deg2rad(th.tensor([-15])).item()],
}
def get_position_orientation(self):
@ -708,7 +708,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
position = current_position
if orientation is None:
orientation = current_orientation
position, orientation = th.Tensor(position), th.Tensor(orientation)
position, orientation = th.tensor(position), th.tensor(orientation)
assert math.isclose(
th.norm(orientation), 1, abs_tol=1e-3
), f"{self.name} desired orientation {orientation} is not a unit quaternion."
@ -742,7 +742,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
lazy.pxr.Gf.Quatf(*orientation[[3, 0, 1, 2]].tolist())
)
def set_linear_velocity(self, velocity: th.Tensor):
def set_linear_velocity(self, velocity: th.tensor):
# Transform the desired linear velocity from the world frame to the root_link ("base_footprint_x") frame
# Note that this will also set the target to be the desired linear velocity (i.e. the robot will try to maintain
# such velocity), which is different from the default behavior of set_linear_velocity for all other objects.
@ -752,11 +752,11 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
self.joints["base_footprint_y_joint"].set_vel(velocity_in_root_link[1], drive=False)
self.joints["base_footprint_z_joint"].set_vel(velocity_in_root_link[2], drive=False)
def get_linear_velocity(self) -> th.Tensor:
def get_linear_velocity(self) -> th.tensor:
# Note that the link we are interested in is self.base_footprint_link, not self.root_link
return self.base_footprint_link.get_linear_velocity()
def set_angular_velocity(self, velocity: th.Tensor) -> None:
def set_angular_velocity(self, velocity: th.tensor) -> None:
# See comments of self.set_linear_velocity
orn = self.root_link.get_orientation()
velocity_in_root_link = T.quat2mat(orn).T @ velocity
@ -764,7 +764,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
self.joints["base_footprint_ry_joint"].set_vel(velocity_in_root_link[1], drive=False)
self.joints["base_footprint_rz_joint"].set_vel(velocity_in_root_link[2], drive=False)
def get_angular_velocity(self) -> th.Tensor:
def get_angular_velocity(self) -> th.tensor:
# Note that the link we are interested in is self.base_footprint_link, not self.root_link
return self.base_footprint_link.get_angular_velocity()
@ -775,7 +775,7 @@ class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
for arm in self.arm_names
}
def teleop_data_to_action(self, teleop_action) -> th.Tensor:
def teleop_data_to_action(self, teleop_action) -> th.tensor:
action = ManipulationRobot.teleop_data_to_action(self, teleop_action)
action[self.base_action_idx] = teleop_action.base * 0.1
return action

View File

@ -27,7 +27,7 @@ class Turtlebot(TwoWheelRobot):
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return th.Tensor([0, 1])
return th.tensor([0, 1])
@property
def _default_joint_pos(self):

View File

@ -78,8 +78,8 @@ class TwoWheelRobot(LocomotionRobot):
ang_vel = (r_vel - l_vel) / self.wheel_axle_length
# Add info
dic["dd_base_lin_vel"] = th.Tensor([lin_vel])
dic["dd_base_ang_vel"] = th.Tensor([ang_vel])
dic["dd_base_lin_vel"] = th.tensor([lin_vel])
dic["dd_base_ang_vel"] = th.tensor([ang_vel])
return dic
@ -151,7 +151,7 @@ class TwoWheelRobot(LocomotionRobot):
classes.add("TwoWheelRobot")
return classes
def teleop_data_to_action(self, teleop_action) -> th.Tensor:
def teleop_data_to_action(self, teleop_action) -> th.tensor:
"""
Generate action data from teleoperation action data
NOTE: This implementation only supports DifferentialDriveController.
@ -159,11 +159,11 @@ class TwoWheelRobot(LocomotionRobot):
Args:
teleop_action (TeleopAction): teleoperation action data
Returns:
th.Tensor: array of action data
th.tensor: array of action data
"""
action = super().teleop_data_to_action(teleop_action)
assert isinstance(
self._controllers["base"], DifferentialDriveController
), "Only DifferentialDriveController is supported!"
action[self.base_action_idx] = th.Tensor([teleop_action.base[0], teleop_action.base[2]]) * 0.3
action[self.base_action_idx] = th.tensor([teleop_action.base[0], teleop_action.base[2]]) * 0.3
return action

View File

@ -139,7 +139,7 @@ class VX300S(ManipulationRobot):
@property
def _default_joint_pos(self):
return th.Tensor([0.0, -0.849879, 0.258767, 0.0, 1.2831712, 0.0, 0.057, 0.057])
return th.tensor([0.0, -0.849879, 0.258767, 0.0, 1.2831712, 0.0, 0.057, 0.057])
@property
def finger_lengths(self):

View File

@ -149,8 +149,8 @@ class SceneGraphBuilder(object):
# Update the position of everything that's already in the scene by using our relative position to last frame.
old_desired_to_new_desired = world_to_desired_frame @ self._last_desired_frame_to_world
nodes = list(self._G.nodes)
poses = th.Tensor([self._G.nodes[obj]["pose"] for obj in nodes])
bbox_poses = th.Tensor([self._G.nodes[obj]["bbox_pose"] for obj in nodes])
poses = th.tensor([self._G.nodes[obj]["pose"] for obj in nodes])
bbox_poses = th.tensor([self._G.nodes[obj]["bbox_pose"] for obj in nodes])
updated_poses = old_desired_to_new_desired @ poses
updated_bbox_poses = old_desired_to_new_desired @ bbox_poses
for i, obj in enumerate(nodes):

View File

@ -120,7 +120,7 @@ class StaticTraversableScene(TraversableScene):
height_adjustment = height - self.floor_heights[floor]
else:
height_adjustment = self.floor_heights[floor] - self._scene_prim.get_position()[2] + additional_elevation
self._scene_prim.set_position(th.Tensor([0, 0, height_adjustment]))
self._scene_prim.set_position(th.tensor([0, 0, height_adjustment]))
def get_floor_height(self, floor=0):
"""

View File

@ -166,13 +166,13 @@ class ScanSensor(BaseSensor):
# Grab vector of corresponding angles for each scan line
angles = th.arange(
-th.deg2rad(th.Tensor([self.horizontal_fov / 2])).item(),
th.deg2rad(th.Tensor([self.horizontal_fov / 2])).item(),
th.deg2rad(th.Tensor([self.horizontal_resolution])).item(),
-th.deg2rad(th.tensor([self.horizontal_fov / 2])).item(),
th.deg2rad(th.tensor([self.horizontal_fov / 2])).item(),
th.deg2rad(th.tensor([self.horizontal_resolution])).item(),
)
# Convert into 3D unit vectors for each angle
unit_vector_laser = th.Tensor([[th.cos(ang), th.sin(ang), 0.0] for ang in angles])
unit_vector_laser = th.tensor([[th.cos(ang), th.sin(ang), 0.0] for ang in angles])
# Scale unit vectors by corresponding laser scan distnaces
assert ((scan >= 0.0) & (scan <= 1.0)).all(), "scan out of valid range [0, 1]"
@ -186,7 +186,7 @@ class ScanSensor(BaseSensor):
base_pos, base_ori = self.occupancy_grid_local_link.get_position_orientation()
scan_local = quat2mat(base_ori).T.dot((scan_world - base_pos).T).T
scan_local = scan_local[:, :2]
scan_local = th.cat([th.Tensor([[0, 0]]), scan_local, th.Tensor([[0, 0]])], dim=0)
scan_local = th.cat([th.tensor([[0, 0]]), scan_local, th.tensor([[0, 0]])], dim=0)
# flip y axis
scan_local[:, 1] *= -1

View File

@ -25,10 +25,10 @@ class BaseSensorNoise(Registerable, metaclass=ABCMeta):
identical call to self.corrupt(...)
Args:
obs (th.Tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
obs (th.tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
th.Tensor: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
th.tensor: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
return self.corrupt(obs=obs)
@ -37,10 +37,10 @@ class BaseSensorNoise(Registerable, metaclass=ABCMeta):
If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading.
Args:
obs (th.Tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
obs (th.tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
th.Tensor: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
th.tensor: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
# Run sanity check to make sure obs is in acceptable range
assert len(obs[(obs < 0.0) | (obs > 1.0)]) == 0, "sensor reading has to be between [0.0, 1.0]"
@ -53,10 +53,10 @@ class BaseSensorNoise(Registerable, metaclass=ABCMeta):
Corrupts observation @obs by adding sensor noise to sensor reading
Args:
obs (th.Tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
obs (th.tensor): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
th.Tensor: Corrupted observation numpy array
th.tensor: Corrupted observation numpy array
"""
raise NotImplementedError()

View File

@ -313,10 +313,10 @@ class VisionSensor(BaseSensor):
Also, correct the id_to_labels input with the labels from semantic_class_name_to_id() and return it.
Args:
img (th.Tensor): Semantic segmentation image to remap
img (th.tensor): Semantic segmentation image to remap
id_to_labels (dict): Dictionary of semantic IDs to class labels
Returns:
th.Tensor: Remapped semantic segmentation image
th.tensor: Remapped semantic segmentation image
dict: Corrected id_to_labels dictionary
"""
# Preprocess id_to_labels to feed into the remapper
@ -354,13 +354,13 @@ class VisionSensor(BaseSensor):
Also, correct the id_to_labels input with our new labels and return it.
Args:
img (th.Tensor): Instance segmentation image to remap
img (th.tensor): Instance segmentation image to remap
id_to_labels (dict): Dictionary of instance IDs to class labels
semantic_img (th.Tensor): Semantic segmentation image to use for instance registry
semantic_img (th.tensor): Semantic segmentation image to use for instance registry
semantic_labels (dict): Dictionary of semantic IDs to class labels
id (bool): Whether to remap for instance ID segmentation
Returns:
th.Tensor: Remapped instance segmentation image
th.tensor: Remapped instance segmentation image
dict: Corrected id_to_labels dictionary
"""
# Sometimes 0 and 1 show up in the image, but they are not in the id_to_labels mapping
@ -524,24 +524,24 @@ class VisionSensor(BaseSensor):
Returns a dictionary of keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor.
The returned dictionary includes the following keys and their corresponding data types:
- "cameraAperture": th.Tensor (float32) - Camera aperture dimensions.
- "cameraApertureOffset": th.Tensor (float32) - Offset of the camera aperture.
- "cameraFisheyeLensP": th.Tensor (float32) - Fisheye lens P parameter.
- "cameraFisheyeLensS": th.Tensor (float32) - Fisheye lens S parameter.
- "cameraAperture": th.tensor (float32) - Camera aperture dimensions.
- "cameraApertureOffset": th.tensor (float32) - Offset of the camera aperture.
- "cameraFisheyeLensP": th.tensor (float32) - Fisheye lens P parameter.
- "cameraFisheyeLensS": th.tensor (float32) - Fisheye lens S parameter.
- "cameraFisheyeMaxFOV": float - Maximum field of view for fisheye lens.
- "cameraFisheyeNominalHeight": int - Nominal height for fisheye lens.
- "cameraFisheyeNominalWidth": int - Nominal width for fisheye lens.
- "cameraFisheyeOpticalCentre": th.Tensor (float32) - Optical center for fisheye lens.
- "cameraFisheyePolynomial": th.Tensor (float32) - Polynomial parameters for fisheye lens distortion.
- "cameraFisheyeOpticalCentre": th.tensor (float32) - Optical center for fisheye lens.
- "cameraFisheyePolynomial": th.tensor (float32) - Polynomial parameters for fisheye lens distortion.
- "cameraFocalLength": float - Focal length of the camera.
- "cameraFocusDistance": float - Focus distance of the camera.
- "cameraFStop": float - F-stop value of the camera.
- "cameraModel": str - Camera model identifier.
- "cameraNearFar": th.Tensor (float32) - Near and far plane distances.
- "cameraProjection": th.Tensor (float32) - Camera projection matrix.
- "cameraViewTransform": th.Tensor (float32) - Camera view transformation matrix.
- "cameraNearFar": th.tensor (float32) - Near and far plane distances.
- "cameraProjection": th.tensor (float32) - Camera projection matrix.
- "cameraViewTransform": th.tensor (float32) - Camera view transformation matrix.
- "metersPerSceneUnit": float - Scale factor from scene units to meters.
- "renderProductResolution": th.Tensor (int32) - Resolution of the rendered product.
- "renderProductResolution": th.tensor (int32) - Resolution of the rendered product.
Returns:
dict: Keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor.
@ -652,7 +652,7 @@ class VisionSensor(BaseSensor):
Returns:
2-tuple: [min, max] value of the sensor's clipping range, in meters
"""
return th.Tensor(self.get_attribute("clippingRange"))
return th.tensor(self.get_attribute("clippingRange"))
@clipping_range.setter
def clipping_range(self, limits):
@ -723,7 +723,7 @@ class VisionSensor(BaseSensor):
cx = width / 2
cy = height / 2
intrinsic_matrix = th.Tensor([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
intrinsic_matrix = th.tensor([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]])
return intrinsic_matrix
@property

View File

@ -364,8 +364,8 @@ def launch_simulator(*args, **kwargs):
if gm.RENDER_VIEWER_CAMERA:
self._set_viewer_camera()
self.viewer_camera.set_position_orientation(
position=th.Tensor(m.DEFAULT_VIEWER_CAMERA_POS),
orientation=th.Tensor(m.DEFAULT_VIEWER_CAMERA_QUAT),
position=th.tensor(m.DEFAULT_VIEWER_CAMERA_POS),
orientation=th.tensor(m.DEFAULT_VIEWER_CAMERA_QUAT),
)
self.viewer_width = viewer_width
self.viewer_height = viewer_height
@ -531,7 +531,7 @@ def launch_simulator(*args, **kwargs):
name="ground_plane",
z_position=0,
size=None,
color=None if floor_plane_color is None else th.Tensor(floor_plane_color),
color=None if floor_plane_color is None else th.tensor(floor_plane_color),
visible=floor_plane_visible,
# TODO: update with new PhysicsMaterial API
# static_friction=static_friction,

View File

@ -158,7 +158,7 @@ class MacroParticleSystem(BaseSystem):
def _dump_state(self):
state = super()._dump_state()
state["scales"] = (
th.Tensor([particle.scale for particle in self.particles.values()])
th.tensor([particle.scale for particle in self.particles.values()])
if self.particles is not None
else th.empty(0)
)
@ -340,7 +340,7 @@ class MacroParticleSystem(BaseSystem):
@property
def color(self):
return th.Tensor(self._color)
return th.tensor(self._color)
class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
@ -398,7 +398,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
self._particles_local_mat = None
# Maps group name to array of face_ids where particles are located if the group object is a cloth type
# Maps group name to th.Tensor of face IDs (int) that particles are attached to
# Maps group name to th.tensor of face IDs (int) that particles are attached to
self._cloth_face_ids = None
# Default behavior for this class -- whether to clip generated particles halfway into objects when sampling
@ -448,7 +448,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
orientations = T.axisangle2quat(T.vecs2axisangle(z_up, normals))
if not self._CLIP_INTO_OBJECTS and z_extent > 0:
z_offsets = (
th.Tensor([z_extent * particle.scale[2] for particle in self._group_particles[group].values()])
th.tensor([z_extent * particle.scale[2] for particle in self._group_particles[group].values()])
/ 2.0
)
# Shift the particles halfway up
@ -659,9 +659,9 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
if success:
self.generate_group_particles(
group=group,
positions=th.Tensor(positions),
orientations=th.Tensor(orientations),
scales=th.Tensor(scales),
positions=th.tensor(positions),
orientations=th.tensor(orientations),
scales=th.tensor(scales),
link_prim_paths=link_prim_paths,
)
# If we're a cloth, store the face_id as well
@ -776,7 +776,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
pos, ori = self._compute_batch_particles_position_orientation(particles=particles, local=local)
positions = pos if positions is None else positions
orientations = ori if orientations is None else orientations
lens = th.Tensor([len(particles), len(positions), len(orientations)])
lens = th.tensor([len(particles), len(positions), len(orientations)])
assert lens.min() == lens.max(), "Got mismatched particles, positions, and orientations!"
particle_local_poses_batch = th.zeros((n_particles, 4, 4))
@ -889,7 +889,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
ignore_scale (bool): Whether to ignore the parent_link scale when computing the local transform
Returns:
th.Tensor: (4, 4) homogeneous transform matrix
th.tensor: (4, 4) homogeneous transform matrix
"""
particle = self.particles[name]
parent_obj = self._particles_info[name]["obj"]
@ -998,7 +998,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
# Also store the cloth face IDs as a vector
if is_cloth:
self._cloth_face_ids[self.get_group_name(obj)] = th.Tensor(
self._cloth_face_ids[self.get_group_name(obj)] = th.tensor(
[self._particles_info[particle_name]["face_id"] for particle_name in self._group_particles[name]]
)
@ -1063,7 +1063,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
particle_idns.append(info["particle_idns"])
particle_attached_references.append(info["particle_attached_references"])
else:
indices_to_remove = th.cat((indices_to_remove, th.Tensor(info["particle_indices"], dtype=int)))
indices_to_remove = th.cat((indices_to_remove, th.tensor(info["particle_indices"], dtype=int)))
self._sync_particle_groups(
group_objects=group_objects,
particle_idns=particle_idns,
@ -1232,7 +1232,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
# Compute particle radius
vertices = (
th.Tensor(self.particle_object.get_attribute("points"))
th.tensor(self.particle_object.get_attribute("points"))
* self.particle_object.scale
* self.max_scale.reshape(1, 3)
)
@ -1330,7 +1330,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
pos, ori = self.get_particle_position_orientation(idx=idx)
orientation = ori if orientation is None else orientation
position = pos if position is None else (position - T.quat2mat(orientation) @ self._particle_offset)
self.particles_view.set_transforms(th.cat([position, orientation]).reshape(1, -1), indices=th.Tensor([idx]))
self.particles_view.set_transforms(th.cat([position, orientation]).reshape(1, -1), indices=th.tensor([idx]))
def set_particle_local_pose(self, idx, position=None, orientation=None):
self.set_particle_position_orientation(idx=idx, position=position, orientation=orientation)
@ -1384,7 +1384,7 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
l_vel, a_vel = self.get_particles_velocities()
lin_vel = l_vel if lin_vel is None else lin_vel
ang_vel = a_vel if ang_vel is None else ang_vel
self.particles_view.set_velocities(th.cat([lin_vel, ang_vel]).reshape(1, -1), indices=th.Tensor([idx]))
self.particles_view.set_velocities(th.cat([lin_vel, ang_vel]).reshape(1, -1), indices=th.tensor([idx]))
@property
def particle_radius(self):
@ -1417,14 +1417,14 @@ class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
Generates new particles
Args:
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
positions (th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be sampled randomly
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
velocities (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
angular_velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (ax,ay,az)
angular_velocities (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (ax,ay,az)
angular velocities. If not specified, all will be set to 0
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""

View File

@ -126,12 +126,12 @@ class PhysxParticleInstancer(BasePrim):
"""
Adds particles to this particle instancer.
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
positions (th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
velocities (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations (None or th.tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be scale [1, 1, 1] by default
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
@ -156,7 +156,7 @@ class PhysxParticleInstancer(BasePrim):
Remove particles from this instancer, specified by their indices @idxs in the data array
Args:
idxs (list or th.Tensor of int): IDs corresponding to the indices of specific particles to remove from this
idxs (list or th.tensor of int): IDs corresponding to the indices of specific particles to remove from this
instancer
"""
if len(idxs) > 0:
@ -206,10 +206,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_positions(self):
"""
Returns:
th.Tensor: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return th.Tensor(self.get_attribute(attr="positions"))
return th.tensor(self.get_attribute(attr="positions"))
@particle_positions.setter
def particle_positions(self, pos):
@ -217,7 +217,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle positions for this instancer
Args:
th.Tensor: (N, 3) numpy array, where each of the N particles' desired positions are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' desired positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
self.set_attribute(attr="positions", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(pos.float()))
@ -226,10 +226,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_orientations(self):
"""
Returns:
th.Tensor: (N, 4) numpy array, where each of the N particles' orientations are expressed in (x,y,z,w)
th.tensor: (N, 4) numpy array, where each of the N particles' orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
return th.Tensor(self.get_attribute(attr="orientations"))
return th.tensor(self.get_attribute(attr="orientations"))
@particle_orientations.setter
def particle_orientations(self, quat):
@ -237,7 +237,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle positions for this instancer
Args:
th.Tensor: (N, 4) numpy array, where each of the N particles' desired orientations are expressed in (x,y,z,w)
th.tensor: (N, 4) numpy array, where each of the N particles' desired orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
assert (
@ -253,10 +253,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_velocities(self):
"""
Returns:
th.Tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return th.Tensor(self.get_attribute(attr="velocities"))
return th.tensor(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
@ -264,7 +264,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle velocities for this instancer
Args:
th.Tensor: (N, 3) numpy array, where each of the N particles' desired velocities are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' desired velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert (
@ -276,10 +276,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_scales(self):
"""
Returns:
th.Tensor: (N, 3) numpy array, where each of the N particles' scales are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return th.Tensor(self.get_attribute(attr="scales"))
return th.tensor(self.get_attribute(attr="scales"))
@particle_scales.setter
def particle_scales(self, scales):
@ -287,7 +287,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle scales for this instancer
Args:
th.Tensor: (N, 3) numpy array, where each of the N particles' desired scales are expressed in (x,y,z)
th.tensor: (N, 3) numpy array, where each of the N particles' desired scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert (
@ -299,10 +299,10 @@ class PhysxParticleInstancer(BasePrim):
def particle_prototype_ids(self):
"""
Returns:
th.Tensor: (N,) numpy array, where each of the N particles' prototype_id (i.e.: which prototype is being used
th.tensor: (N,) numpy array, where each of the N particles' prototype_id (i.e.: which prototype is being used
for that particle)
"""
return th.Tensor(self.get_attribute(attr="protoIndices"))
return th.tensor(self.get_attribute(attr="protoIndices"))
@particle_prototype_ids.setter
def particle_prototype_ids(self, prototype_ids):
@ -310,7 +310,7 @@ class PhysxParticleInstancer(BasePrim):
Set the particle prototype_ids for this instancer
Args:
th.Tensor: (N,) numpy array, where each of the N particles' desired prototype_id
th.tensor: (N,) numpy array, where each of the N particles' desired prototype_id
(i.e.: which prototype is being used for that particle)
"""
assert (
@ -338,9 +338,9 @@ class PhysxParticleInstancer(BasePrim):
idn=self._idn,
particle_group=self.particle_group,
n_particles=self.n_particles,
particle_positions=th.Tensor(local_positions),
particle_positions=th.tensor(local_positions),
particle_velocities=self.particle_velocities,
particle_orientations=th.Tensor(local_orientations),
particle_orientations=th.tensor(local_orientations),
particle_scales=self.particle_scales,
particle_prototype_ids=self.particle_prototype_ids,
)
@ -356,16 +356,16 @@ class PhysxParticleInstancer(BasePrim):
f"instancer when loading state! Should be: {self.particle_group}, got: {state['particle_group']}."
)
local_positions = th.Tensor(state["particle_positions"])
local_orientations = th.Tensor(state["particle_orientations"])
local_positions = th.tensor(state["particle_positions"])
local_orientations = th.tensor(state["particle_orientations"])
global_positions, global_orientations = zip(
*[
T.pose_transform(*self.scene.prim.get_position_orientation(), local_pos, local_ori)
for local_pos, local_ori in zip(local_positions, local_orientations)
]
)
setattr(self, "particle_positions", th.Tensor(global_positions))
setattr(self, "particle_orientations", th.Tensor(global_orientations))
setattr(self, "particle_positions", th.tensor(global_positions))
setattr(self, "particle_orientations", th.tensor(global_orientations))
# Set values appropriately
keys = (
@ -376,7 +376,7 @@ class PhysxParticleInstancer(BasePrim):
for key in keys:
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
val = th.Tensor(state[key]) if not isinstance(state[key], th.Tensor) else state[key]
val = th.tensor(state[key]) if not isinstance(state[key], th.tensor) else state[key]
setattr(self, key, val)
def serialize(self, state):
@ -449,7 +449,7 @@ class MicroParticleSystem(BaseSystem):
# Color of the generated material. Default is white [1.0, 1.0, 1.0]
# (NOTE: external queries should call self.color)
self._color = th.Tensor([1.0, 1.0, 1.0])
self._color = th.tensor([1.0, 1.0, 1.0])
def initialize(self, scene):
# Run super first
@ -491,7 +491,7 @@ class MicroParticleSystem(BaseSystem):
self.system_prim = None
self._material = None
self._color = th.Tensor([1.0, 1.0, 1.0])
self._color = th.tensor([1.0, 1.0, 1.0])
@property
def particle_radius(self):
@ -858,7 +858,7 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
Removes pre-existing particles from instancer @instancer_idn
Args:
idxs (th.Tensor): (n_particles,) shaped array specifying IDs of particles to delete
idxs (th.tensor): (n_particles,) shaped array specifying IDs of particles to delete
instancer_idn (None or int): Unique identification number of the particle instancer to delete the particles
from. If None, this system will delete particles from the default particle instancer
"""
@ -889,7 +889,7 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
of a newly generated instancer.
Args:
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
positions (th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
@ -898,11 +898,11 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@self.self_collision
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
velocities (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations (None or th.tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
@ -922,7 +922,7 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
prototype_indices = (
th.ones(n_particles, dtype=int) * prototype_indices
if isinstance(prototype_indices, int)
else th.Tensor(prototype_indices, dtype=int)
else th.tensor(prototype_indices, dtype=int)
)
else:
prototype_indices = th.randint(len(self.particle_prototypes), (n_particles,))
@ -979,13 +979,13 @@ class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@self.self_collision
positions (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
positions (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
If not specified, will be set to the origin by default
velocities (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
velocities (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations (None or th.tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
@ -1414,7 +1414,7 @@ class FluidSystem(MicroPhysicalParticleSystem):
total_weight = base_color_weight + transmission_weight
if total_weight == 0.0:
# If the fluid doesn't have any color, we add a "blue" tint by default
color = th.Tensor([0.0, 0.0, 1.0])
color = th.tensor([0.0, 0.0, 1.0])
else:
base_color_weight /= total_weight
transmission_weight /= total_weight
@ -1484,7 +1484,7 @@ class FluidSystem(MicroPhysicalParticleSystem):
def customize_particle_material_factory(attr, value):
def func(mat):
setattr(mat, attr, th.Tensor(value))
setattr(mat, attr, th.tensor(value))
return func
@ -1596,7 +1596,7 @@ class GranularSystem(MicroPhysicalParticleSystem):
# Store the contact offset based on a minimum sphere
# Threshold the lower-bound to avoid super small particles
vertices = th.Tensor(prototype.get_attribute("points")) * prototype.scale
vertices = th.tensor(prototype.get_attribute("points")) * prototype.scale
_, particle_contact_offset = trimesh.nsphere.minimum_nsphere(trimesh.Trimesh(vertices=vertices))
if particle_contact_offset < m.MIN_PARTICLE_CONTACT_OFFSET:
prototype.scale *= m.MIN_PARTICLE_CONTACT_OFFSET / particle_contact_offset
@ -1662,7 +1662,7 @@ class Cloth(MicroParticleSystem):
tm = mesh_prim_to_trimesh_mesh(
mesh_prim=mesh_prim, include_normals=True, include_texcoord=True, world_frame=False
)
texcoord = th.Tensor(mesh_prim.GetAttribute("primvars:st").Get()) if has_uv_mapping else None
texcoord = th.tensor(mesh_prim.GetAttribute("primvars:st").Get()) if has_uv_mapping else None
else:
# We will remesh in pymeshlab, but it doesn't allow programmatic construction of a mesh with texcoords so
# we convert our mesh into a trimesh mesh, then export it to a temp file, then load it into pymeshlab
@ -1730,7 +1730,7 @@ class Cloth(MicroParticleSystem):
new_faces = cm.face_matrix()
new_vertices = cm.vertex_matrix()
new_normals = cm.vertex_normal_matrix()
texcoord = th.Tensor(cm.wedge_tex_coord_matrix()) if has_uv_mapping else None
texcoord = th.tensor(cm.wedge_tex_coord_matrix()) if has_uv_mapping else None
tm = trimesh.Trimesh(
vertices=new_vertices,
faces=new_faces,
@ -1740,7 +1740,7 @@ class Cloth(MicroParticleSystem):
tm.apply_transform(th.linalg.inv_ex(scaled_world_transform))
# Update the mesh prim
face_vertex_counts = th.Tensor([len(face) for face in tm.faces], dtype=int)
face_vertex_counts = th.tensor([len(face) for face in tm.faces], dtype=int)
mesh_prim.GetAttribute("faceVertexCounts").Set(face_vertex_counts)
mesh_prim.GetAttribute("points").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertices))
mesh_prim.GetAttribute("faceVertexIndices").Set(tm.faces.flatten())

View File

@ -160,7 +160,7 @@ class BaseSystem(Serializable):
Removes pre-existing particles
Args:
idxs (th.Tensor): (n_particles,) shaped array specifying IDs of particles to delete
idxs (th.tensor): (n_particles,) shaped array specifying IDs of particles to delete
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
raise NotImplementedError()
@ -178,10 +178,10 @@ class BaseSystem(Serializable):
Args:
scene (Scene): Scene object to generate particles in
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
positions (th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
scales (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (self.min_scale, self.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
@ -595,8 +595,8 @@ class VisualParticleSystem(BaseSystem):
# Convert these into scaling factors for the x and y axes for our particle object
particle_bbox = self.particle_object.aabb_extent
minimum = th.Tensor([bbox_lower_limit / particle_bbox[0], bbox_lower_limit / particle_bbox[1], 1.0])
maximum = th.Tensor([bbox_upper_limit / particle_bbox[0], bbox_upper_limit / particle_bbox[1], 1.0])
minimum = th.tensor([bbox_lower_limit / particle_bbox[0], bbox_lower_limit / particle_bbox[1], 1.0])
maximum = th.tensor([bbox_upper_limit / particle_bbox[0], bbox_upper_limit / particle_bbox[1], 1.0])
return minimum, maximum
@ -659,10 +659,10 @@ class VisualParticleSystem(BaseSystem):
Args:
group (str): Object on which to sample particle locations
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.Tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
positions (th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or th.tensor): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scaling in its
scales (None or th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) scaling in its
local frame. If not specified, all we randomly sampled based on @self.min_scale and @self.max_scale
link_prim_paths (None or list of str): Determines which link each generated particle will
be attached to. If not specified, all will be attached to the group object's prim, NOT a link
@ -810,7 +810,7 @@ class PhysicalParticleSystem(BaseSystem):
object physically interacts with its non-uniform geometry.
Args:
positions (th.Tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
positions (th.tensor): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
Returns:
n-array: (n_particles,) boolean array, True if in contact, otherwise False
@ -936,13 +936,13 @@ class PhysicalParticleSystem(BaseSystem):
# assume the particles are extremely small - sample cuboids of size 0 for better performance
cuboid_dimensions=th.zeros(3),
# raycast start inside the aabb in x-y plane and outside the aabb in the z-axis
aabb_offset=th.Tensor([-radius, -radius, radius]),
aabb_offset=th.tensor([-radius, -radius, radius]),
# bottom padding should be the same as the particle radius
cuboid_bottom_padding=radius,
# undo_cuboid_bottom_padding should be False - the sampled positions are above the surface by its radius
undo_cuboid_bottom_padding=False,
)
particle_positions = th.Tensor([result[0] for result in results if result[0] is not None])
particle_positions = th.tensor([result[0] for result in results if result[0] is not None])
# Also potentially sub-sample if we're past our limit
if max_samples is not None and len(particle_positions) > max_samples:
particle_positions = particle_positions[th.randperm(len(particle_positions))[:max_samples]]
@ -1087,7 +1087,7 @@ def create_system_from_metadata(system_name):
def generate_customize_particle_material_fcn(mat_kwargs):
def customize_mat(mat):
for attr, val in mat_kwargs.items():
setattr(mat, attr, th.Tensor(val) if isinstance(val, list) else val)
setattr(mat, attr, th.tensor(val) if isinstance(val, list) else val)
return customize_mat

View File

@ -107,7 +107,7 @@ class BehaviorTask(BaseTask):
self.future_obj_instances = None # set of str
# Info for demonstration collection
self.instruction_order = None # th.Tensor of int
self.instruction_order = None # th.tensor of int
self.currently_viewed_index = None # int
self.currently_viewed_instruction = None # tuple of str
self.activity_natural_language_goal_conditions = None # str
@ -383,9 +383,9 @@ class BehaviorTask(BaseTask):
# Batch rpy calculations for much better efficiency
objs_exist = {obj: obj.exists for obj in self.object_scope.values() if not obj.is_system}
objs_rpy = T.quat2euler(
th.Tensor(
th.tensor(
[
obj.states[Pose].get_value()[1] if obj_exist else th.Tensor([0, 0, 0, 1.0])
obj.states[Pose].get_value()[1] if obj_exist else th.tensor([0, 0, 0, 1.0])
for obj, obj_exist in objs_exist.items()
]
)
@ -403,14 +403,14 @@ class BehaviorTask(BaseTask):
# TODO: May need to update checking here to USDObject? Or even baseobject?
# TODO: How to handle systems as part of obs?
if obj_exist:
low_dim_obs[f"{obj.bddl_inst}_real"] = th.Tensor([1.0])
low_dim_obs[f"{obj.bddl_inst}_real"] = th.tensor([1.0])
low_dim_obs[f"{obj.bddl_inst}_pos"] = obj.states[Pose].get_value()[0]
low_dim_obs[f"{obj.bddl_inst}_ori_cos"] = obj_rpy_cos
low_dim_obs[f"{obj.bddl_inst}_ori_sin"] = obj_rpy_sin
if obj.name != agent.name:
for arm in agent.arm_names:
grasping_object = agent.is_grasping(arm=arm, candidate_obj=obj.wrapped_obj)
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = th.Tensor([float(grasping_object)])
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = th.tensor([float(grasping_object)])
else:
low_dim_obs[f"{obj.bddl_inst}_real"] = th.zeros(1)
low_dim_obs[f"{obj.bddl_inst}_pos"] = th.zeros(3)

View File

@ -89,8 +89,8 @@ class GraspTask(BaseTask):
joint_control_idx = th.cat([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
robot_pose = random.choice(self._reset_poses)
robot.set_joint_positions(robot_pose["joint_pos"], joint_control_idx)
robot_pos = th.Tensor(robot_pose["base_pos"])
robot_orn = th.Tensor(robot_pose["base_ori"])
robot_pos = th.tensor(robot_pose["base_pos"])
robot_orn = th.tensor(robot_pose["base_ori"])
# Move it to the appropriate scene. TODO: The scene should provide a function for this.
robot_pos, robot_orn = T.pose_transform(*robot.scene.prim.get_position_orientation(), robot_pos, robot_orn)
robot.set_position_orientation(robot_pos, robot_orn)
@ -106,11 +106,11 @@ class GraspTask(BaseTask):
# For Tiago
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = th.cat([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_combined_idx])
initial_joint_pos = th.tensor(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = th.where(th.isin(joint_combined_idx, joint_control_idx))[0]
# For Fetch
else:
initial_joint_pos = th.Tensor(robot.get_joint_positions()[joint_control_idx])
initial_joint_pos = th.tensor(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = th.arange(dim)
with PlanningContext(
@ -149,7 +149,7 @@ class GraspTask(BaseTask):
# Check if the robot has toppled
rotation = R.from_quat(robot.get_orientation())
robot_up = rotation.apply(th.Tensor([0, 0, 1]))
robot_up = rotation.apply(th.tensor([0, 0, 1]))
if robot_up[2] < 0.75:
raise ValueError("Robot has toppled over")
@ -202,7 +202,7 @@ class GraspTask(BaseTask):
def _get_random_joint_position(self, robot):
joint_positions = []
joint_control_idx = th.cat([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
joints = th.Tensor([joint for joint in robot.joints.values()])
joints = th.tensor([joint for joint in robot.joints.values()])
arm_joints = joints[joint_control_idx]
for i, joint in enumerate(arm_joints):
val = random.uniform(joint.lower_limit, joint.upper_limit)

View File

@ -87,9 +87,9 @@ class PointNavigationTask(BaseTask):
# Store inputs
self._robot_idn = robot_idn
self._floor = floor
self._initial_pos = initial_pos if initial_pos is None else th.Tensor(initial_pos)
self._initial_quat = initial_quat if initial_quat is None else th.Tensor(initial_quat)
self._goal_pos = goal_pos if goal_pos is None else th.Tensor(goal_pos)
self._initial_pos = initial_pos if initial_pos is None else th.tensor(initial_pos)
self._initial_quat = initial_quat if initial_quat is None else th.tensor(initial_quat)
self._goal_pos = goal_pos if goal_pos is None else th.tensor(goal_pos)
self._goal_tolerance = goal_tolerance
self._goal_in_polar = goal_in_polar
self._path_range = path_range
@ -174,7 +174,7 @@ class PointNavigationTask(BaseTask):
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=th.Tensor([1, 0, 0, 0.3]),
rgba=th.tensor([1, 0, 0, 0.3]),
)
self._goal_pos_marker = PrimitiveObject(
relative_prim_path="/task_goal_pos_marker",
@ -183,7 +183,7 @@ class PointNavigationTask(BaseTask):
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=th.Tensor([0, 0, 1, 0.3]),
rgba=th.tensor([0, 0, 1, 0.3]),
)
# Load the objects into the simulator
@ -201,7 +201,7 @@ class PointNavigationTask(BaseTask):
radius=self._waypoint_width,
height=self._waypoint_height,
visual_only=True,
rgba=th.Tensor([0, 1, 0, 0.3]),
rgba=th.tensor([0, 1, 0, 0.3]),
)
env.scene.add_object(waypoint)
waypoints.append(waypoint)
@ -234,7 +234,7 @@ class PointNavigationTask(BaseTask):
# Possibly sample initial ori
quat_lo, quat_hi = 0, math.pi * 2
initial_quat = (
T.euler2quat(th.Tensor([0, 0, (th.rand(1) * (quat_hi - quat_lo) + quat_lo).item()]))
T.euler2quat(th.tensor([0, 0, (th.rand(1) * (quat_hi - quat_lo) + quat_lo).item()]))
if self._randomize_initial_quat
else self._initial_quat
)
@ -382,14 +382,14 @@ class PointNavigationTask(BaseTask):
Returns:
3-array: (x,y,z) position in self._robot_idn agent's local frame
"""
delta_pos_global = th.Tensor(pos) - env.robots[self._robot_idn].states[Pose].get_value()[0]
delta_pos_global = th.tensor(pos) - env.robots[self._robot_idn].states[Pose].get_value()[0]
return T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T @ delta_pos_global
def _get_obs(self, env):
# Get relative position of goal with respect to the current agent position
xy_pos_to_goal = self._global_pos_to_robot_frame(env, self._goal_pos)[:2]
if self._goal_in_polar:
xy_pos_to_goal = th.Tensor(T.cartesian_to_polar(*xy_pos_to_goal))
xy_pos_to_goal = th.tensor(T.cartesian_to_polar(*xy_pos_to_goal))
# linear velocity and angular velocity
ori_t = T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T
@ -460,10 +460,10 @@ class PointNavigationTask(BaseTask):
num_nodes = min(self._n_vis_waypoints, shortest_path.shape[0])
for i in range(num_nodes):
self._waypoint_markers[i].set_position(
position=th.Tensor([shortest_path[i][0], shortest_path[i][1], floor_height])
position=th.tensor([shortest_path[i][0], shortest_path[i][1], floor_height])
)
for i in range(num_nodes, self._n_vis_waypoints):
self._waypoint_markers[i].set_position(position=th.Tensor([0.0, 0.0, 100.0]))
self._waypoint_markers[i].set_position(position=th.tensor([0.0, 0.0, 100.0]))
def step(self, env, action):
# Run super method first

View File

@ -38,7 +38,7 @@ class Falling(FailureCondition):
# Terminate if the robot has toppled over
if self._topple:
rotation = R.from_quat(env.scene.robots[self._robot_idn].get_orientation())
robot_up = rotation.apply(th.Tensor([0, 0, 1]))
robot_up = rotation.apply(th.tensor([0, 0, 1]))
if robot_up[2] < self._tilt_tolerance:
return True

View File

@ -794,7 +794,7 @@ class WasherDryerRule(BaseTransitionRule):
"""
# Compute all obj
objects = [obj for scene in og.sim.scenes for obj in scene.objects]
obj_positions = th.Tensor([obj.aabb_center for obj in objects])
obj_positions = th.tensor([obj.aabb_center for obj in objects])
return dict(obj_positions=obj_positions)
@classmethod
@ -818,7 +818,7 @@ class WasherDryerRule(BaseTransitionRule):
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions)
objects = [obj for scene in og.sim.scenes for obj in scene.objects]
in_volume_objs = list(th.Tensor(objects)[in_volume])
in_volume_objs = list(th.tensor(objects)[in_volume])
# Remove the container itself
if container in in_volume_objs:
in_volume_objs.remove(container)
@ -969,8 +969,8 @@ class SlicingRule(BaseTransitionRule):
# List of dicts gets replaced by {'0':dict, '1':dict, ...}
# Get bounding box info
part_bb_pos = th.Tensor(part["bb_pos"])
part_bb_orn = th.Tensor(part["bb_orn"])
part_bb_pos = th.tensor(part["bb_pos"])
part_bb_orn = th.tensor(part["bb_orn"])
# Determine the relative scale to apply to the object part from the original object
# Note that proper (rotated) scaling can only be applied when the relative orientation of
@ -1275,7 +1275,7 @@ class RecipeRule(BaseTransitionRule):
@classmethod
def _filter_input_objects_by_unary_and_binary_system_states(cls, recipe):
# Filter input objects based on a subset of input states (unary states and binary system states)
# Map object categories (str) to valid indices (th.Tensor)
# Map object categories (str) to valid indices (th.tensor)
category_to_valid_indices = dict()
for obj_category in recipe["input_objects"]:
if obj_category not in recipe["input_states"]:
@ -1306,7 +1306,9 @@ class RecipeRule(BaseTransitionRule):
category_to_valid_indices[obj_category].append(idx)
# Convert to numpy array for faster indexing
category_to_valid_indices[obj_category] = th.Tensor(category_to_valid_indices[obj_category], dtype=int)
category_to_valid_indices[obj_category] = th.tensor(
category_to_valid_indices[obj_category], dtype=th.int32
)
return category_to_valid_indices
@classmethod
@ -1617,7 +1619,7 @@ class RecipeRule(BaseTransitionRule):
dict: Keyword-mapped global rule information
"""
# Compute all relevant object AABB positions
obj_positions = th.Tensor([obj.aabb_center for obj in cls._OBJECTS])
obj_positions = th.tensor([obj.aabb_center for obj in cls._OBJECTS])
return dict(obj_positions=obj_positions)
@classmethod
@ -1641,7 +1643,7 @@ class RecipeRule(BaseTransitionRule):
# Compute in volume for all relevant object positions
# We check for either the object AABB being contained OR the object being on top of the container, in the
# case that the container is too flat for the volume to contain the object
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions) | th.Tensor(
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions) | th.tensor(
[obj.states[OnTop].get_value(container) for obj in cls._OBJECTS]
)
@ -1683,7 +1685,7 @@ class RecipeRule(BaseTransitionRule):
i += 1
# Wrap relevant objects as numpy array so we can index into it efficiently
cls._OBJECTS = th.Tensor(cls._OBJECTS)
cls._OBJECTS = th.tensor(cls._OBJECTS)
@classproperty
def candidate_filters(cls):
@ -1798,7 +1800,7 @@ class RecipeRule(BaseTransitionRule):
for system_name, particle_idxs in execution_info["relevant_systems"].items():
system = get_system(system_name)
volume += len(particle_idxs) * math.pi * (system.particle_radius**3) * 4 / 3
system.remove_particles(idxs=th.Tensor(list(particle_idxs)))
system.remove_particles(idxs=th.tensor(list(particle_idxs)))
if not cls.is_multi_instance:
# Remove either all objects or only the ones specified in the input objects of the recipe
@ -1826,7 +1828,7 @@ class RecipeRule(BaseTransitionRule):
log.warning(
f"Failed to spawn object {obj.name} in container {container.name}! Directly placing on top instead."
)
pos = th.Tensor(container.aabb_center) + th.Tensor(
pos = th.tensor(container.aabb_center) + th.tensor(
[0, 0, container.aabb_extent[2] / 2.0 + obj.aabb_extent[2] / 2.0]
)
obj.set_bbox_center_position_orientation(position=pos)

View File

@ -1284,7 +1284,7 @@ class BDDLSampler:
self._env.scene.add_object(simulator_obj)
# Set these objects to be far-away locations
simulator_obj.set_position(th.Tensor([100.0, 100.0, -100.0]) + th.ones(3) * num_new_obj * 5.0)
simulator_obj.set_position(th.tensor([100.0, 100.0, -100.0]) + th.ones(3) * num_new_obj * 5.0)
self._sampled_objects.add(simulator_obj)
self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst, entity=simulator_obj)

View File

@ -74,6 +74,6 @@ def load_default_config():
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, th.Tensor):
if isinstance(obj, th.tensor):
return obj.tolist()
return json.JSONEncoder.default(self, obj)

View File

@ -108,12 +108,12 @@ class IKSolver:
None or n-array: Joint positions for reaching desired target_pos and target_quat, otherwise None if no
solution was found
"""
pos = th.Tensor(target_pos, dtype=th.float64).reshape(3, 1)
rot = th.Tensor(T.quat2mat(th.Tensor([0, 0, 0, 1.0]) if target_quat is None else target_quat), dtype=th.float64)
pos = th.tensor(target_pos, dtype=th.float64).reshape(3, 1)
rot = th.tensor(T.quat2mat(th.tensor([0, 0, 0, 1.0]) if target_quat is None else target_quat), dtype=th.float64)
ik_target_pose = lazy.lula.Pose3(lazy.lula.Rotation3(rot), pos)
# Set the cspace seed and tolerance
initial_joint_pos = self.reset_joint_pos if initial_joint_pos is None else th.Tensor(initial_joint_pos)
initial_joint_pos = self.reset_joint_pos if initial_joint_pos is None else th.tensor(initial_joint_pos)
self.config.cspace_seeds = [initial_joint_pos]
self.config.position_tolerance = tolerance_pos
self.config.orientation_tolerance = 100.0 if target_quat is None else tolerance_quat
@ -125,7 +125,7 @@ class IKSolver:
# Compute target joint positions
ik_results = lazy.lula.compute_ik_ccd(self.kinematics, ik_target_pose, self.eef_name, self.config)
if ik_results.success:
return th.Tensor(ik_results.cspace_position)
return th.tensor(ik_results.cspace_position)
else:
return None

View File

@ -174,19 +174,19 @@ class ArticulationView(_ArticulationView):
def set_joint_limits(
self,
values: Union[th.Tensor, torch.Tensor],
indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
values: Union[th.tensor, torch.Tensor],
indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Sets joint limits for articulation joints in the view.
Args:
values (Union[th.Tensor, torch.Tensor, wp.array]): joint limits for articulations in the view. shape (M, K, 2).
indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
values (Union[th.tensor, torch.Tensor, wp.array]): joint limits for articulations in the view. shape (M, K, 2).
indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
joint_indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
@ -230,25 +230,25 @@ class ArticulationView(_ArticulationView):
def get_joint_limits(
self,
indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
clone: bool = True,
) -> Union[th.Tensor, torch.Tensor, wp.array]:
) -> Union[th.tensor, torch.Tensor, wp.array]:
"""Gets joint limits for articulation in the view.
Args:
indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to query. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
joint_indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to query. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
clone (Optional[bool]): True to return a clone of the internal buffer. Otherwise False. Defaults to True.
Returns:
Union[th.Tensor, torch.Tensor, wp.indexedarray]: joint limits for articulations in the view. shape (M, K).
Union[th.tensor, torch.Tensor, wp.indexedarray]: joint limits for articulations in the view. shape (M, K).
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -288,19 +288,19 @@ class ArticulationView(_ArticulationView):
def set_max_velocities(
self,
values: Union[th.Tensor, torch.Tensor, wp.array],
indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
values: Union[th.tensor, torch.Tensor, wp.array],
indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Sets maximum velocities for articulation in the view.
Args:
values (Union[th.Tensor, torch.Tensor, wp.array]): maximum velocities for articulations in the view. shape (M, K).
indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
values (Union[th.tensor, torch.Tensor, wp.array]): maximum velocities for articulations in the view. shape (M, K).
indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
joint_indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
@ -339,25 +339,25 @@ class ArticulationView(_ArticulationView):
def get_max_velocities(
self,
indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
clone: bool = True,
) -> Union[th.Tensor, torch.Tensor, wp.indexedarray]:
) -> Union[th.tensor, torch.Tensor, wp.indexedarray]:
"""Gets maximum velocities for articulation in the view.
Args:
indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to query. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
joint_indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to query. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
clone (Optional[bool]): True to return a clone of the internal buffer. Otherwise False. Defaults to True.
Returns:
Union[th.Tensor, torch.Tensor, wp.indexedarray]: maximum velocities for articulations in the view. shape (M, K).
Union[th.tensor, torch.Tensor, wp.indexedarray]: maximum velocities for articulations in the view. shape (M, K).
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -396,9 +396,9 @@ class ArticulationView(_ArticulationView):
def set_joint_positions(
self,
positions: Optional[Union[th.Tensor, torch.Tensor, wp.array]],
indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
positions: Optional[Union[th.tensor, torch.Tensor, wp.array]],
indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Set the joint positions of articulations in the view
@ -408,13 +408,13 @@ class ArticulationView(_ArticulationView):
Use the ``set_joint_position_targets`` or the ``apply_action`` methods to control the articulation joints.
Args:
positions (Optional[Union[th.Tensor, torch.Tensor, wp.array]]): joint positions of articulations in the view to be set to in the next frame.
positions (Optional[Union[th.tensor, torch.Tensor, wp.array]]): joint positions of articulations in the view to be set to in the next frame.
shape is (M, K).
indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): indices to specify which prims
indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): indices to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
joint_indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
@ -432,13 +432,13 @@ class ArticulationView(_ArticulationView):
>>> # set all the articulation joints.
>>> # Since there are 5 envs, the joint positions are repeated 5 times
>>> positions = th.tile(th.Tensor([0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8, 0.04, 0.04]), (num_envs, 1))
>>> positions = th.tile(th.tensor([0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8, 0.04, 0.04]), (num_envs, 1))
>>> prims.set_joint_positions(positions)
>>>
>>> # set only the fingers in closed position: panda_finger_joint1 (7) and panda_finger_joint2 (8) to 0.0
>>> # for the first, middle and last of the 5 envs
>>> positions = th.tile(th.Tensor([0.0, 0.0]), (3, 1))
>>> prims.set_joint_positions(positions, indices=th.Tensor([0, 2, 4]), joint_indices=th.Tensor([7, 8]))
>>> positions = th.tile(th.tensor([0.0, 0.0]), (3, 1))
>>> prims.set_joint_positions(positions, indices=th.tensor([0, 2, 4]), joint_indices=th.tensor([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -462,9 +462,9 @@ class ArticulationView(_ArticulationView):
def set_joint_velocities(
self,
velocities: Optional[Union[th.Tensor, torch.Tensor, wp.array]],
indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
velocities: Optional[Union[th.tensor, torch.Tensor, wp.array]],
indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Set the joint velocities of articulations in the view
@ -474,13 +474,13 @@ class ArticulationView(_ArticulationView):
Use the ``set_joint_velocity_targets`` or the ``apply_action`` methods to control the articulation joints.
Args:
velocities (Optional[Union[th.Tensor, torch.Tensor, wp.array]]): joint velocities of articulations in the view to be set to in the next frame.
velocities (Optional[Union[th.tensor, torch.Tensor, wp.array]]): joint velocities of articulations in the view to be set to in the next frame.
shape is (M, K).
indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): indices to specify which prims
indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): indices to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
joint_indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
@ -498,13 +498,13 @@ class ArticulationView(_ArticulationView):
>>> # set the velocities for all the articulation joints to the indicated values.
>>> # Since there are 5 envs, the joint velocities are repeated 5 times
>>> velocities = th.tile(th.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]), (num_envs, 1))
>>> velocities = th.tile(th.tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]), (num_envs, 1))
>>> prims.set_joint_velocities(velocities)
>>>
>>> # set the fingers velocities: panda_finger_joint1 (7) and panda_finger_joint2 (8) to -0.1
>>> # for the first, middle and last of the 5 envs
>>> velocities = th.tile(th.Tensor([-0.1, -0.1]), (3, 1))
>>> prims.set_joint_velocities(velocities, indices=th.Tensor([0, 2, 4]), joint_indices=th.Tensor([7, 8]))
>>> velocities = th.tile(th.tensor([-0.1, -0.1]), (3, 1))
>>> prims.set_joint_velocities(velocities, indices=th.tensor([0, 2, 4]), joint_indices=th.tensor([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -529,9 +529,9 @@ class ArticulationView(_ArticulationView):
def set_joint_efforts(
self,
efforts: Optional[Union[th.Tensor, torch.Tensor, wp.array]],
indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.Tensor, List, torch.Tensor, wp.array]] = None,
efforts: Optional[Union[th.tensor, torch.Tensor, wp.array]],
indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[th.tensor, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Set the joint efforts of articulations in the view
@ -541,13 +541,13 @@ class ArticulationView(_ArticulationView):
or the stiffness and damping must be set to zero.
Args:
efforts (Optional[Union[th.Tensor, torch.Tensor, wp.array]]): efforts of articulations in the view to be set to in the next frame.
efforts (Optional[Union[th.tensor, torch.Tensor, wp.array]]): efforts of articulations in the view to be set to in the next frame.
shape is (M, K).
indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): indices to specify which prims
indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): indices to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[th.Tensor, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
joint_indices (Optional[Union[th.tensor, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
@ -565,13 +565,13 @@ class ArticulationView(_ArticulationView):
>>> # set the efforts for all the articulation joints to the indicated values.
>>> # Since there are 5 envs, the joint efforts are repeated 5 times
>>> efforts = th.tile(th.Tensor([10, 20, 30, 40, 50, 60, 70, 80, 90]), (num_envs, 1))
>>> efforts = th.tile(th.tensor([10, 20, 30, 40, 50, 60, 70, 80, 90]), (num_envs, 1))
>>> prims.set_joint_efforts(efforts)
>>>
>>> # set the fingers efforts: panda_finger_joint1 (7) and panda_finger_joint2 (8) to 10
>>> # for the first, middle and last of the 5 envs
>>> efforts = th.tile(th.Tensor([10, 10]), (3, 1))
>>> prims.set_joint_efforts(efforts, indices=th.Tensor([0, 2, 4]), joint_indices=th.Tensor([7, 8]))
>>> efforts = th.tile(th.tensor([10, 10]), (3, 1))
>>> prims.set_joint_efforts(efforts, indices=th.tensor([0, 2, 4]), joint_indices=th.tensor([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
@ -605,11 +605,11 @@ class ArticulationView(_ArticulationView):
class RigidPrimView(_RigidPrimView):
def enable_gravities(self, indices: Optional[Union[th.Tensor, list, torch.Tensor, wp.array]] = None) -> None:
def enable_gravities(self, indices: Optional[Union[th.tensor, list, torch.Tensor, wp.array]] = None) -> None:
"""Enable gravity on rigid bodies (enabled by default).
Args:
indices (Optional[Union[th.Tensor, list, torch.Tensor, wp.array]], optional): indicies to specify which prims
indices (Optional[Union[th.tensor, list, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
@ -633,11 +633,11 @@ class RigidPrimView(_RigidPrimView):
self._physx_rigid_body_apis[i] = rigid_api
self._physx_rigid_body_apis[i].GetDisableGravityAttr().Set(False)
def disable_gravities(self, indices: Optional[Union[th.Tensor, list, torch.Tensor, wp.array]] = None) -> None:
def disable_gravities(self, indices: Optional[Union[th.tensor, list, torch.Tensor, wp.array]] = None) -> None:
"""Disable gravity on rigid bodies (enabled by default).
Args:
indices (Optional[Union[th.Tensor, list, torch.Tensor, wp.array]], optional): indicies to specify which prims
indices (Optional[Union[th.tensor, list, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
@ -681,7 +681,7 @@ def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3):
for bbox_2d in bboxes_2d_data:
semantic_id_list.append(bbox_2d["semanticId"])
bbox_2d_list.append(bbox_2d)
semantic_id_list_np = th.unique(th.Tensor(semantic_id_list))
semantic_id_list_np = th.unique(th.tensor(semantic_id_list))
color_list = random_colours(len(semantic_id_list_np.tolist()), True, num_channels)
for bbox_2d in bbox_2d_list:
index = th.where(semantic_id_list_np == bbox_2d["semanticId"])[0][0]
@ -697,5 +697,5 @@ def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3):
rgb_img_draw.rectangle(
[(bbox_2d["x_min"], bbox_2d["y_min"]), (bbox_2d["x_max"], bbox_2d["y_max"])], outline=outline, width=2
)
bboxes_2d_rgb = th.Tensor(rgb_img)
bboxes_2d_rgb = th.tensor(rgb_img)
return bboxes_2d_rgb

View File

@ -258,11 +258,11 @@ def _generate_convex_hull_volume_checker_functions(convex_hull_mesh):
in_volume = lambda mesh, particle_positions: check_points_in_convex_hull_mesh(
mesh_face_centroids=face_centroids,
mesh_face_normals=face_normals,
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
pos=th.tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.tensor(
[*(mesh.GetAttribute("xformOp:orient").Get().imaginary), mesh.GetAttribute("xformOp:orient").Get().real]
),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
calc_volume = lambda mesh: trimesh_mesh.volume if trimesh_mesh.is_volume else trimesh_mesh.convex_hull.volume
@ -321,53 +321,53 @@ def generate_points_in_volume_checker_function(obj, volume_link, use_visual_mesh
elif mesh_type == "Sphere":
fcn = lambda mesh, particle_positions: check_points_in_sphere(
size=mesh.GetAttribute("radius").Get(),
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
pos=th.tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cylinder":
fcn = lambda mesh, particle_positions: check_points_in_cylinder(
size=[mesh.GetAttribute("radius").Get(), mesh.GetAttribute("height").Get()],
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
pos=th.tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cone":
fcn = lambda mesh, particle_positions: check_points_in_cone(
size=[mesh.GetAttribute("radius").Get(), mesh.GetAttribute("height").Get()],
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
pos=th.tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cube":
fcn = lambda mesh, particle_positions: check_points_in_cube(
size=mesh.GetAttribute("size").Get(),
pos=th.Tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.Tensor(
pos=th.tensor(mesh.GetAttribute("xformOp:translate").Get()),
quat=th.tensor(
[
*(mesh.GetAttribute("xformOp:orient").Get().imaginary),
mesh.GetAttribute("xformOp:orient").Get().real,
]
),
scale=th.Tensor(mesh.GetAttribute("xformOp:scale").Get()),
scale=th.tensor(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
else:

View File

@ -34,7 +34,7 @@ def get_grasp_poses_for_object_sticky(target_obj):
visual=False
)
grasp_center_pos = bbox_center_in_world + th.Tensor([0, 0, th.max(bbox_extent_in_base_frame) + 0.05])
grasp_center_pos = bbox_center_in_world + th.tensor([0, 0, th.max(bbox_extent_in_base_frame) + 0.05])
towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
towards_object_in_world_frame /= th.norm(towards_object_in_world_frame)
@ -65,7 +65,7 @@ def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
approach_direction = random.choice([-1, 1]) if approach_axis != 2 else 1
constant_dimension_in_base_frame = approach_direction * bbox_extent_in_base_frame * th.eye(3)[approach_axis]
randomizable_dimensions_in_base_frame = bbox_extent_in_base_frame - th.abs(constant_dimension_in_base_frame)
dim_lo, dim_hi = th.Tensor([-1, -1, 0]), th.Tensor([1, 1, 1])
dim_lo, dim_hi = th.tensor([-1, -1, 0]), th.tensor([1, 1, 1])
random_dimensions_in_base_frame = (dim_hi - dim_lo) * th.rand(
dim_lo.size()
) + dim_lo # note that we don't allow going below center
@ -76,7 +76,7 @@ def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
grasp_center_pos = T.mat2pose(
T.pose2mat((bbox_center_in_world, bbox_quat_in_world)) # base frame to world frame
@ T.pose2mat((grasp_center_in_base_frame, [0, 0, 0, 1])) # grasp pose in base frame
)[0] + th.Tensor([0, 0, 0.02])
)[0] + th.tensor([0, 0, 0.02])
towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
towards_object_in_world_frame /= th.norm(towards_object_in_world_frame)
@ -89,7 +89,7 @@ def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
grasp_y /= th.norm(grasp_y)
grasp_z = th.cross(grasp_x, grasp_y)
grasp_z /= th.norm(grasp_z)
grasp_mat = th.Tensor([grasp_x, grasp_y, grasp_z]).T
grasp_mat = th.tensor([grasp_x, grasp_y, grasp_z]).T
grasp_quat = R.from_matrix(grasp_mat).as_quat()
grasp_pose = (grasp_center_pos, grasp_quat)
@ -195,7 +195,7 @@ def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint
# Pick the closer of the two faces along the push axis as our favorite.
points_along_push_axis = (
th.Tensor([canonical_push_axis, -canonical_push_axis]) * bbox_extent_in_link_frame[push_axis_idx] / 2
th.tensor([canonical_push_axis, -canonical_push_axis]) * bbox_extent_in_link_frame[push_axis_idx] / 2
)
(
push_axis_closer_side_idx,
@ -303,7 +303,7 @@ def interpolate_waypoints(start_pose, end_pose, num_waypoints="default"):
pos_waypoints = th.linspace(start_pos, end_pose[0], num_waypoints)
# Also interpolate the rotations
combined_rotation = R.from_quat(th.Tensor([start_orn, end_pose[1]]))
combined_rotation = R.from_quat(th.tensor([start_orn, end_pose[1]]))
slerp = Slerp([0, 1], combined_rotation)
orn_waypoints = slerp(th.linspace(0, 1, num_waypoints))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
@ -337,7 +337,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
)
bbox_quat_in_world = link.get_orientation()
bbox_extent_in_link_frame = th.Tensor(
bbox_extent_in_link_frame = th.tensor(
target_obj.native_link_bboxes[link_name]["collision"]["axis_aligned"]["extent"]
)
bbox_wrt_origin = T.relative_pose_transform(
@ -350,7 +350,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
)[[1, 2, 3, 0]]
joint_axis = R.from_quat(joint_orientation).apply([1, 0, 0])
joint_axis /= th.norm(joint_axis)
origin_towards_bbox = th.Tensor(bbox_wrt_origin[0])
origin_towards_bbox = th.tensor(bbox_wrt_origin[0])
open_direction = th.cross(joint_axis, origin_towards_bbox)
open_direction /= th.norm(open_direction)
lateral_axis = th.cross(open_direction, joint_axis)
@ -365,7 +365,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
canonical_open_direction = th.eye(3)[open_axis_idx]
points_along_open_axis = (
th.Tensor([canonical_open_direction, -canonical_open_direction]) * bbox_extent_in_link_frame[open_axis_idx] / 2
th.tensor([canonical_open_direction, -canonical_open_direction]) * bbox_extent_in_link_frame[open_axis_idx] / 2
)
current_yaw = relevant_joint.get_state()[0][0]
closed_yaw = relevant_joint.lower_limit
@ -383,7 +383,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
canonical_joint_axis = th.eye(3)[joint_axis_idx]
lateral_away_from_origin = th.eye(3)[lateral_axis_idx] * th.sign(origin_towards_bbox[lateral_axis_idx])
min_lateral_pos_wrt_surface_center = (
lateral_away_from_origin * -th.Tensor(origin_wrt_bbox[0])
lateral_away_from_origin * -th.tensor(origin_wrt_bbox[0])
- canonical_joint_axis * bbox_extent_in_link_frame[lateral_axis_idx] / 2
)
max_lateral_pos_wrt_surface_center = (
@ -446,7 +446,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
)
# Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
movement_in_world_frame = th.Tensor(targets[-1][0]) - th.Tensor(offset_grasp_pose_in_world_frame[0])
movement_in_world_frame = th.tensor(targets[-1][0]) - th.tensor(offset_grasp_pose_in_world_frame[0])
grasp_required = th.dot(movement_in_world_frame, approach_direction_in_world_frame) < 0
return (
@ -464,10 +464,10 @@ def _get_orientation_facing_vector_with_random_yaw(vector):
axes to be random.
Args:
vector (th.Tensor): The vector to face.
vector (th.tensor): The vector to face.
Returns:
th.Tensor: A quaternion representing the orientation.
th.tensor: A quaternion representing the orientation.
"""
forward = vector / th.norm(vector)
rand_vec = th.rand(3)
@ -476,7 +476,7 @@ def _get_orientation_facing_vector_with_random_yaw(vector):
side /= th.norm(3)
up = th.cross(forward, side)
# assert th.isclose(th.norm(up), 1, atol=1e-3)
rotmat = th.Tensor([forward, side, up]).T
rotmat = th.tensor([forward, side, up]).T
return R.from_matrix(rotmat).as_quat()
@ -489,7 +489,7 @@ def _rotate_point_around_axis(point_wrt_arbitrary_frame, arbitrary_frame_wrt_ori
Args:
point_wrt_arbitrary_frame (tuple): The point in the arbitrary frame.
arbitrary_frame_wrt_origin (tuple): The pose of the arbitrary frame in the origin frame.
joint_axis (th.Tensor): The axis to rotate around.
joint_axis (th.tensor): The axis to rotate around.
yaw_change (float): The amount to rotate by.
Returns:
@ -519,14 +519,14 @@ def _get_closest_point_to_point_in_world_frame(
Returns:
tuple: The index of the closest vector, the closest vector in the arbitrary frame, and the closest vector in the world frame.
"""
vectors_in_world = th.Tensor(
vectors_in_world = th.tensor(
[
T.pose_transform(*arbitrary_frame_to_world_frame, vector, [0, 0, 0, 1])[0]
for vector in vectors_in_arbitrary_frame
]
)
vector_distances_to_point = th.norm(vectors_in_world - th.Tensor(point_in_world)[None, :], dim=1)
vector_distances_to_point = th.norm(vectors_in_world - th.tensor(point_in_world)[None, :], dim=1)
closer_option_idx = th.argmin(vector_distances_to_point)
vector_in_arbitrary_frame = vectors_in_arbitrary_frame[closer_option_idx]
vector_in_world_frame = vectors_in_world[closer_option_idx]

Some files were not shown because too many files have changed in this diff Show More