Remove dependency on scipy rotation library

This commit is contained in:
hang-yin 2024-07-26 14:16:04 -07:00
parent 08a12ce553
commit e15e312b04
21 changed files with 264 additions and 233 deletions

View File

@ -17,7 +17,6 @@ import gymnasium as gym
import torch as th import torch as th
from aenum import IntEnum, auto from aenum import IntEnum, auto
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
from scipy.spatial.transform import Rotation, Slerp
import omnigibson as og import omnigibson as og
import omnigibson.lazy as lazy import omnigibson.lazy as lazy
@ -1138,7 +1137,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
delta_pos = target_pos - current_pos delta_pos = target_pos - current_pos
target_pos_diff = th.norm(delta_pos) target_pos_diff = th.norm(delta_pos)
target_orn_diff = (Rotation.from_quat(target_orn) * Rotation.from_quat(current_orn).inv()).magnitude() target_orn_diff = T.get_orientation_diff_in_radian(current_orn, target_orn)
reached_goal = target_pos_diff < pos_thresh and target_orn_diff < ori_thresh reached_goal = target_pos_diff < pos_thresh and target_orn_diff < ori_thresh
if reached_goal: if reached_goal:
return return
@ -1149,9 +1148,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# if i > 0 and stop_if_stuck and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False): # if i > 0 and stop_if_stuck and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
if i > 0 and stop_if_stuck: if i > 0 and stop_if_stuck:
pos_diff = th.norm(prev_pos - current_pos) pos_diff = th.norm(prev_pos - current_pos)
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude() orn_diff = T.get_orientation_diff_in_radian(current_orn, prev_orn)
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
if pos_diff < 0.0003 and orn_diff < 0.01: if pos_diff < 0.0003 and orn_diff < 0.01:
raise ActionPrimitiveError(ActionPrimitiveError.Reason.EXECUTION_ERROR, f"Hand is stuck") raise ActionPrimitiveError(ActionPrimitiveError.Reason.EXECUTION_ERROR, f"Hand is stuck")
@ -1190,10 +1187,8 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
pos_waypoints = th.linspace(start_pos, target_pose[0], num_poses) pos_waypoints = th.linspace(start_pos, target_pose[0], num_poses)
# Also interpolate the rotations # Also interpolate the rotations
combined_rotation = Rotation.from_quat(th.tensor([start_orn, target_pose[1]])) t_values = th.linspace(0, 1, num_poses)
slerp = Slerp([0, 1], combined_rotation) quat_waypoints = [T.quat_slerp(start_orn, target_pose[1], t) for t in t_values]
orn_waypoints = slerp(th.linspace(0, 1, num_poses))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
controller_config = self.robot._controller_config["arm_" + self.arm] controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController": if controller_config["name"] == "InverseKinematicsController":
@ -1220,7 +1215,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Also decide if we can stop early. # Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation() current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = th.norm(th.tensor(current_pos) - th.tensor(target_pose[0])) pos_diff = th.norm(th.tensor(current_pos) - th.tensor(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude() orn_diff = T.get_orientation_diff_in_radian(target_pose[1], current_orn).item()
if pos_diff < 0.005 and orn_diff < th.deg2rad(th.tensor([0.1])).item(): if pos_diff < 0.005 and orn_diff < th.deg2rad(th.tensor([0.1])).item():
return return
@ -1265,7 +1260,7 @@ class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
# Also decide if we can stop early. # Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation() current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = th.norm(th.tensor(current_pos) - th.tensor(target_pose[0])) pos_diff = th.norm(th.tensor(current_pos) - th.tensor(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude() orn_diff = T.get_orientation_diff_in_radian(target_pose[1], current_orn)
if pos_diff < 0.001 and orn_diff < th.deg2rad(th.tensor([0.1])).item(): if pos_diff < 0.001 and orn_diff < th.deg2rad(th.tensor([0.1])).item():
return return

View File

@ -422,7 +422,7 @@ class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMix
self.projection_mesh.set_local_pose( self.projection_mesh.set_local_pose(
position=th.tensor([0, 0, -z_offset]), position=th.tensor([0, 0, -z_offset]),
orientation=T.euler2quat([0, 0, 0]), orientation=T.euler2quat(th.tensor([0, 0, 0], dtype=th.float32)),
) )
# Generate the function for checking whether points are within the projection mesh # Generate the function for checking whether points are within the projection mesh
@ -1079,7 +1079,9 @@ class ParticleApplier(ParticleModifier):
self.projection_source_sphere.initialize() self.projection_source_sphere.initialize()
self.projection_source_sphere.visible = False self.projection_source_sphere.visible = False
# Rotate by 90 degrees in y-axis so that the projection visualization aligns with the projection mesh # Rotate by 90 degrees in y-axis so that the projection visualization aligns with the projection mesh
self.projection_source_sphere.set_local_pose(orientation=T.euler2quat([0, math.pi / 2, 0])) self.projection_source_sphere.set_local_pose(
orientation=T.euler2quat(th.tensor([0, math.pi / 2, 0], dtype=th.float32))
)
# Make sure the meta mesh is aligned with the meta link if visualizing # Make sure the meta mesh is aligned with the meta link if visualizing
# This corresponds to checking (a) position of tip of projection mesh should align with origin of # This corresponds to checking (a) position of tip of projection mesh should align with origin of

View File

@ -324,7 +324,7 @@ class ControllableObject(BaseObject):
high.append(th.tensor([float("inf")] * controller.command_dim) if limits is None else limits[1]) high.append(th.tensor([float("inf")] * controller.command_dim) if limits is None else limits[1])
return gym.spaces.Box( return gym.spaces.Box(
shape=(self.action_dim,), low=np.array(th.cat(low)), high=np.array(th.cat(high)), dtype=float shape=(self.action_dim,), low=np.array(th.cat(low)), high=np.array(th.cat(high)), dtype=np.float32
) )
def apply_action(self, action): def apply_action(self, action):
@ -341,7 +341,7 @@ class ControllableObject(BaseObject):
# If we're using discrete action space, we grab the specific action and use that to convert to control # If we're using discrete action space, we grab the specific action and use that to convert to control
if self._action_type == "discrete": if self._action_type == "discrete":
action = th.tensor(self.discrete_action_list[action]) action = th.tensor(self.discrete_action_list[action], dtype=th.float32)
# Check if the input action's length matches the action dimension # Check if the input action's length matches the action dimension
assert len(action) == self.action_dim, "Action must be dimension {}, got dim {} instead.".format( assert len(action) == self.action_dim, "Action must be dimension {}, got dim {} instead.".format(

View File

@ -5,7 +5,6 @@ from functools import cached_property
import torch as th import torch as th
import trimesh import trimesh
from scipy.spatial.transform import Rotation
import omnigibson as og import omnigibson as og
import omnigibson.lazy as lazy import omnigibson.lazy as lazy
@ -373,14 +372,15 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
rotated_X_axis = base_frame_to_world[:3, 0] rotated_X_axis = base_frame_to_world[:3, 0]
rotation_around_Z_axis = th.arctan2(rotated_X_axis[1], rotated_X_axis[0]) rotation_around_Z_axis = th.arctan2(rotated_X_axis[1], rotated_X_axis[0])
xy_aligned_base_com_to_world = th.tensor( xy_aligned_base_com_to_world = th.tensor(
trimesh.transformations.compose_matrix(translate=translate, angles=[0, 0, rotation_around_Z_axis]) trimesh.transformations.compose_matrix(translate=translate, angles=[0, 0, rotation_around_Z_axis]),
dtype=th.float32,
) )
# Finally update our desired frame. # Finally update our desired frame.
desired_frame_to_world = xy_aligned_base_com_to_world desired_frame_to_world = xy_aligned_base_com_to_world
else: else:
# Default desired frame is base CoM frame. # Default desired frame is base CoM frame.
desired_frame_to_world = th.tensor(base_frame_to_world) desired_frame_to_world = th.tensor(base_frame_to_world, dtype=th.float32)
# Compute the world-to-base frame transform. # Compute the world-to-base frame transform.
world_to_desired_frame = th.linalg.inv_ex(desired_frame_to_world).inverse world_to_desired_frame = th.linalg.inv_ex(desired_frame_to_world).inverse
@ -406,7 +406,9 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
points_in_world.extend(hull_points.tolist()) points_in_world.extend(hull_points.tolist())
# Move the points to the desired frame # Move the points to the desired frame
points = th.tensor(trimesh.transformations.transform_points(points_in_world, world_to_desired_frame)) points = th.tensor(
trimesh.transformations.transform_points(points_in_world, world_to_desired_frame), dtype=th.float32
)
# All points are now in the desired frame: either the base CoM or the xy-plane-aligned base CoM. # All points are now in the desired frame: either the base CoM or the xy-plane-aligned base CoM.
# Now fit a bounding box to all the points by taking the minimum/maximum in the desired frame. # Now fit a bounding box to all the points by taking the minimum/maximum in the desired frame.
@ -416,10 +418,13 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
bbox_extent_in_desired_frame = aabb_max_in_desired_frame - aabb_min_in_desired_frame bbox_extent_in_desired_frame = aabb_max_in_desired_frame - aabb_min_in_desired_frame
# Transform the center to the world frame. # Transform the center to the world frame.
bbox_center_in_world = trimesh.transformations.transform_points( bbox_center_in_world = th.tensor(
[bbox_center_in_desired_frame.tolist()], desired_frame_to_world trimesh.transformations.transform_points([bbox_center_in_desired_frame.tolist()], desired_frame_to_world)[
)[0] 0
bbox_orn_in_world = Rotation.from_matrix(desired_frame_to_world[:3, :3]).as_quat() ],
dtype=th.float32,
)
bbox_orn_in_world = T.mat2quat(desired_frame_to_world[:3, :3])
return bbox_center_in_world, bbox_orn_in_world, bbox_extent_in_desired_frame, bbox_center_in_desired_frame return bbox_center_in_world, bbox_orn_in_world, bbox_extent_in_desired_frame, bbox_center_in_desired_frame

View File

@ -331,9 +331,12 @@ class EntityPrim(XFormPrim):
_, link_local_orn = link.get_local_pose() _, link_local_orn = link.get_local_pose()
# Find the joint frame orientation in the parent link frame # Find the joint frame orientation in the parent link frame
joint_local_orn = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array( joint_local_orn = th.tensor(
joint.get_attribute("physics:localRot0") lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(
)[[1, 2, 3, 0]] joint.get_attribute("physics:localRot0")
)[[1, 2, 3, 0]],
dtype=th.float32,
)
# Compute the joint frame orientation in the object frame # Compute the joint frame orientation in the object frame
joint_orn = T.quat_multiply(quaternion1=joint_local_orn, quaternion0=link_local_orn) joint_orn = T.quat_multiply(quaternion1=joint_local_orn, quaternion0=link_local_orn)

View File

@ -349,7 +349,7 @@ class XFormPrim(BasePrim):
return PoseAPI.get_world_pose_with_scale(self.prim_path) return PoseAPI.get_world_pose_with_scale(self.prim_path)
def transform_local_points_to_world(self, points): def transform_local_points_to_world(self, points):
return th.tensor(trimesh.transformations.transform_points(points, self.scaled_transform)) return th.tensor(trimesh.transformations.transform_points(points, self.scaled_transform), dtype=th.float32)
@property @property
def scale(self): def scale(self):
@ -440,6 +440,8 @@ class XFormPrim(BasePrim):
def _load_state(self, state): def _load_state(self, state):
pos, orn = state["pos"], state["ori"] pos, orn = state["pos"], state["ori"]
pos = pos.float() if isinstance(pos, th.Tensor) else th.tensor(pos, dtype=th.float32)
orn = orn.float() if isinstance(orn, th.Tensor) else th.tensor(orn, dtype=th.float32)
if self.scene is not None: if self.scene is not None:
pos, orn = T.pose_transform(*self.scene.prim.get_position_orientation(), pos, orn) pos, orn = T.pose_transform(*self.scene.prim.get_position_orientation(), pos, orn)
self.set_position_orientation(pos, orn) self.set_position_orientation(pos, orn)

View File

@ -1,7 +1,7 @@
import math import math
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
import omnigibson.utils.transform_utils as T import omnigibson.utils.transform_utils as T
from omnigibson.reward_functions.reward_function_base import BaseRewardFunction from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
@ -88,16 +88,18 @@ class GraspReward(BaseRewardFunction):
info["position_penalty"] = position_penalty info["position_penalty"] = position_penalty
self.prev_eef_pos = eef_pos self.prev_eef_pos = eef_pos
eef_rot = R.from_quat(robot.get_eef_orientation(robot.default_arm)) eef_quat = robot.get_eef_orientation(robot.default_arm)
info["rotation_penalty_factor"] = 0.0 info["rotation_penalty_factor"] = 0.0
info["rotation_penalty"] = 0.0 info["rotation_penalty"] = 0.0
if self.prev_eef_rot is not None: if self.prev_eef_quat is not None:
delta_rot = (eef_rot * self.prev_eef_rot.inv()).magnitude() delta_quat = T.quat_multiply(eef_quat, T.quat_inverse(self.prev_eef_quat))
delta_axis_angle = T.quat2axisangle(delta_quat)
delta_rot = th.norm(delta_axis_angle)
rotation_penalty = -delta_rot * self.eef_orientation_penalty_coef rotation_penalty = -delta_rot * self.eef_orientation_penalty_coef
reward += rotation_penalty reward += rotation_penalty
info["rotation_penalty_factor"] = delta_rot info["rotation_penalty_factor"] = delta_rot.item()
info["rotation_penalty"] = rotation_penalty info["rotation_penalty"] = rotation_penalty.item()
self.prev_eef_rot = eef_rot self.prev_eef_quat = eef_quat
# Penalize robot for colliding with an object # Penalize robot for colliding with an object
info["collision_penalty_factor"] = 0.0 info["collision_penalty_factor"] = 0.0

View File

@ -6,7 +6,6 @@ from collections import OrderedDict
from typing import Iterable, List, Tuple from typing import Iterable, List, Tuple
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
import omnigibson as og import omnigibson as og
import omnigibson.lazy as lazy import omnigibson.lazy as lazy
@ -438,11 +437,11 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
if teleop_action.is_valid["head"]: if teleop_action.is_valid["head"]:
head_pos, head_orn = teleop_action.head[:3], T.euler2quat(teleop_action.head[3:6]) head_pos, head_orn = teleop_action.head[:3], T.euler2quat(teleop_action.head[3:6])
des_body_pos = head_pos - th.tensor([0, 0, m.BODY_HEIGHT_OFFSET]) des_body_pos = head_pos - th.tensor([0, 0, m.BODY_HEIGHT_OFFSET])
des_body_rpy = th.tensor([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]]) des_body_rpy = th.tensor([0, 0, T.quat2euler(head_orn.unsqueeze(0))[2][0]])
des_body_orn = T.euler2quat(des_body_rpy) des_body_orn = T.euler2quat(des_body_rpy)
else: else:
des_body_pos, des_body_orn = self.get_position_orientation() des_body_pos, des_body_orn = self.get_position_orientation()
des_body_rpy = R.from_quat(des_body_orn).as_euler("XYZ") des_body_rpy = th.stack(T.quat2euler(des_body_orn.unsqueeze(0))).squeeze(1)
action[self.controller_action_idx["base"]] = th.cat((des_body_pos, des_body_rpy)) action[self.controller_action_idx["base"]] = th.cat((des_body_pos, des_body_rpy))
# Update action space for other VR objects # Update action space for other VR objects
for part_name, eef_part in self.parts.items(): for part_name, eef_part in self.parts.items():
@ -476,7 +475,7 @@ class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
des_local_part_pos, des_local_part_orn = T.pose_transform( des_local_part_pos, des_local_part_orn = T.pose_transform(
eef_part.offset_to_body, [0, 0, 0, 1], des_local_part_pos, des_local_part_orn eef_part.offset_to_body, [0, 0, 0, 1], des_local_part_pos, des_local_part_orn
) )
des_part_rpy = R.from_quat(des_local_part_orn).as_euler("XYZ") des_part_rpy = th.stack(T.quat2euler(des_local_part_orn.unsqueeze(0))).squeeze(1)
controller_name = "camera" if part_name == "head" else "arm_" + part_name controller_name = "camera" if part_name == "head" else "arm_" + part_name
action[self.controller_action_idx[controller_name]] = th.cat((des_local_part_pos, des_part_rpy)) action[self.controller_action_idx[controller_name]] = th.cat((des_local_part_pos, des_part_rpy))
# If we reset, teleop the robot parts to the desired pose # If we reset, teleop the robot parts to the desired pose

View File

@ -4,7 +4,7 @@ from copy import deepcopy
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
import omnigibson.utils.transform_utils as T import omnigibson.utils.transform_utils as T
from omnigibson.macros import create_module_macros from omnigibson.macros import create_module_macros

View File

@ -3,7 +3,7 @@ import os
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import torch as th import torch as th
import trimesh import trimesh
from scipy.spatial.transform import Rotation as R
import omnigibson as og import omnigibson as og
import omnigibson.lazy as lazy import omnigibson.lazy as lazy
@ -281,7 +281,7 @@ class MacroParticleSystem(BaseSystem):
# Update the tensors # Update the tensors
n_particles = len(positions) n_particles = len(positions)
orientations = th.tensor(R.random(num=n_particles).as_quat()) if orientations is None else orientations orientations = T.random_quaternion(n_particles) if orientations is None else orientations
scales = self.sample_scales(n=n_particles) if scales is None else scales scales = self.sample_scales(n=n_particles) if scales is None else scales
positions = th.cat([current_positions, positions], dim=0) positions = th.cat([current_positions, positions], dim=0)
@ -598,7 +598,7 @@ class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
obj = self._group_objects[group] obj = self._group_objects[group]
# Sample scales and corresponding bbox extents # Sample scales and corresponding bbox extents
scales = self.sample_scales_by_group(group=group, n=max_samples) scales = self.sample_scales_by_group(group=group, n=max_samples).float()
# For sampling particle positions, we need the global bbox extents, NOT the local extents # For sampling particle positions, we need the global bbox extents, NOT the local extents
# which is what we would get naively if we directly use @scales # which is what we would get naively if we directly use @scales
avg_scale = th.pow(th.prod(obj.scale), 1 / 3) avg_scale = th.pow(th.prod(obj.scale), 1 / 3)

View File

@ -3,7 +3,7 @@ import os
import random import random
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
import omnigibson as og import omnigibson as og
import omnigibson.utils.transform_utils as T import omnigibson.utils.transform_utils as T
@ -148,8 +148,7 @@ class GraspTask(BaseTask):
raise ValueError("Robot could not settle") raise ValueError("Robot could not settle")
# Check if the robot has toppled # Check if the robot has toppled
rotation = R.from_quat(robot.get_orientation()) robot_up = T.quat_apply(robot.get_orientation(), th.tensor([0, 0, 1], dtype=th.float32))
robot_up = rotation.apply(th.tensor([0, 0, 1]))
if robot_up[2] < 0.75: if robot_up[2] < 0.75:
raise ValueError("Robot has toppled over") raise ValueError("Robot has toppled over")

View File

@ -1,6 +1,6 @@
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
import omnigibson.utils.transform_utils as T
from omnigibson.termination_conditions.termination_condition_base import FailureCondition from omnigibson.termination_conditions.termination_condition_base import FailureCondition
@ -37,8 +37,9 @@ class Falling(FailureCondition):
# Terminate if the robot has toppled over # Terminate if the robot has toppled over
if self._topple: if self._topple:
rotation = R.from_quat(env.scene.robots[self._robot_idn].get_orientation()) robot_up = T.quat_apply(
robot_up = rotation.apply(th.tensor([0, 0, 1])) env.scene.robots[self._robot_idn].get_orientation(), th.tensor([0, 0, 1], dtype=th.float32)
)
if robot_up[2] < self._tilt_tolerance: if robot_up[2] < self._tilt_tolerance:
return True return True

View File

@ -2,8 +2,6 @@ import math
import random import random
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
import omnigibson.lazy as lazy import omnigibson.lazy as lazy
import omnigibson.utils.transform_utils as T import omnigibson.utils.transform_utils as T
@ -90,7 +88,7 @@ def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
grasp_z = th.linalg.cross(grasp_x, grasp_y) grasp_z = th.linalg.cross(grasp_x, grasp_y)
grasp_z /= th.norm(grasp_z) grasp_z /= th.norm(grasp_z)
grasp_mat = th.tensor([grasp_x, grasp_y, grasp_z]).T grasp_mat = th.tensor([grasp_x, grasp_y, grasp_z]).T
grasp_quat = R.from_matrix(grasp_mat).as_quat() grasp_quat = T.mat2quat(grasp_mat)
grasp_pose = (grasp_center_pos, grasp_quat) grasp_pose = (grasp_center_pos, grasp_quat)
grasp_candidate = [(grasp_pose, towards_object_in_world_frame)] grasp_candidate = [(grasp_pose, towards_object_in_world_frame)]
@ -184,7 +182,7 @@ def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint
joint_orientation = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array( joint_orientation = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(
relevant_joint.get_attribute("physics:localRot0") relevant_joint.get_attribute("physics:localRot0")
)[[1, 2, 3, 0]] )[[1, 2, 3, 0]]
push_axis = R.from_quat(joint_orientation).apply([1, 0, 0]) push_axis = T.quat_apply(joint_orientation, th.tensor([1, 0, 0], dtype=th.float32))
assert math.isclose(th.max(th.abs(push_axis)).values.item(), 1.0) # Make sure we're aligned with a bb axis. assert math.isclose(th.max(th.abs(push_axis)).values.item(), 1.0) # Make sure we're aligned with a bb axis.
push_axis_idx = th.argmax(th.abs(push_axis)) push_axis_idx = th.argmax(th.abs(push_axis))
canonical_push_axis = th.eye(3)[push_axis_idx] canonical_push_axis = th.eye(3)[push_axis_idx]
@ -250,8 +248,8 @@ def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint
) )
# Compute the approach direction. # Compute the approach direction.
approach_direction_in_world_frame = R.from_quat(bbox_quat_in_world).apply( approach_direction_in_world_frame = T.quat_apply(
canonical_push_axis * -push_axis_closer_side_sign bbox_quat_in_world, canonical_push_axis * -push_axis_closer_side_sign
) )
# Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp. # Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
@ -303,9 +301,8 @@ def interpolate_waypoints(start_pose, end_pose, num_waypoints="default"):
pos_waypoints = th.linspace(start_pos, end_pose[0], num_waypoints) pos_waypoints = th.linspace(start_pos, end_pose[0], num_waypoints)
# Also interpolate the rotations # Also interpolate the rotations
combined_rotation = R.from_quat(th.tensor([start_orn, end_pose[1]])) fracs = th.linspace(0, 1, num_waypoints)
slerp = Slerp([0, 1], combined_rotation) orn_waypoints = T.quat_slerp(start_orn.unsqueeze(0), end_pose[1].unsqueeze(0), fracs.unsqueeze(1))
orn_waypoints = slerp(th.linspace(0, 1, num_waypoints))
quat_waypoints = [x.as_quat() for x in orn_waypoints] quat_waypoints = [x.as_quat() for x in orn_waypoints]
return [waypoint for waypoint in zip(pos_waypoints, quat_waypoints)] return [waypoint for waypoint in zip(pos_waypoints, quat_waypoints)]
@ -348,7 +345,7 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
joint_orientation = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array( joint_orientation = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(
relevant_joint.get_attribute("physics:localRot0") relevant_joint.get_attribute("physics:localRot0")
)[[1, 2, 3, 0]] )[[1, 2, 3, 0]]
joint_axis = R.from_quat(joint_orientation).apply([1, 0, 0]) joint_axis = T.quat_apply(joint_orientation, th.tensor([1, 0, 0], dtype=th.float32))
joint_axis /= th.norm(joint_axis) joint_axis /= th.norm(joint_axis)
origin_towards_bbox = th.tensor(bbox_wrt_origin[0]) origin_towards_bbox = th.tensor(bbox_wrt_origin[0])
open_direction = th.linalg.cross(joint_axis, origin_towards_bbox) open_direction = th.linalg.cross(joint_axis, origin_towards_bbox)
@ -441,8 +438,8 @@ def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint,
targets.append(rotated_grasp_pose_in_world_frame) targets.append(rotated_grasp_pose_in_world_frame)
# Compute the approach direction. # Compute the approach direction.
approach_direction_in_world_frame = R.from_quat(bbox_quat_in_world).apply( approach_direction_in_world_frame = T.quat_apply(
canonical_open_direction * -open_axis_closer_side_sign bbox_quat_in_world, canonical_open_direction * -open_axis_closer_side_sign
) )
# Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp. # Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
@ -477,7 +474,7 @@ def _get_orientation_facing_vector_with_random_yaw(vector):
up = th.linalg.cross(forward, side) up = th.linalg.cross(forward, side)
# assert th.isclose(th.norm(up), 1, atol=1e-3) # assert th.isclose(th.norm(up), 1, atol=1e-3)
rotmat = th.tensor([forward, side, up]).T rotmat = th.tensor([forward, side, up]).T
return R.from_matrix(rotmat).as_quat() return T.mat2quat(rotmat)
def _rotate_point_around_axis(point_wrt_arbitrary_frame, arbitrary_frame_wrt_origin, joint_axis, yaw_change): def _rotate_point_around_axis(point_wrt_arbitrary_frame, arbitrary_frame_wrt_origin, joint_axis, yaw_change):
@ -495,7 +492,7 @@ def _rotate_point_around_axis(point_wrt_arbitrary_frame, arbitrary_frame_wrt_ori
Returns: Returns:
tuple: The rotated point in the arbitrary frame. tuple: The rotated point in the arbitrary frame.
""" """
rotation = R.from_rotvec(joint_axis * yaw_change).as_quat() rotation = T.euler2quat(joint_axis * yaw_change)
origin_wrt_arbitrary_frame = T.invert_pose_transform(*arbitrary_frame_wrt_origin) origin_wrt_arbitrary_frame = T.invert_pose_transform(*arbitrary_frame_wrt_origin)
pose_in_origin_frame = T.pose_transform(*arbitrary_frame_wrt_origin, *point_wrt_arbitrary_frame) pose_in_origin_frame = T.pose_transform(*arbitrary_frame_wrt_origin, *point_wrt_arbitrary_frame)

View File

@ -1,8 +1,7 @@
import math import math
import torch as th import torch as th
from scipy.spatial import ConvexHull, distance_matrix
from scipy.spatial.transform import Rotation as R
import omnigibson as og import omnigibson as og
import omnigibson.utils.transform_utils as T import omnigibson.utils.transform_utils as T
@ -172,20 +171,16 @@ def sample_kinematics(
if sampling_success: if sampling_success:
# Move the object from the original parallel bbox to the sampled bbox # Move the object from the original parallel bbox to the sampled bbox
parallel_bbox_rotation = R.from_quat(parallel_bbox_orn)
sample_rotation = R.from_quat(sampled_quaternion)
original_rotation = R.from_quat(orientation)
# The additional orientation to be applied should be the delta orientation # The additional orientation to be applied should be the delta orientation
# between the parallel bbox orientation and the sample orientation # between the parallel bbox orientation and the sample orientation
additional_rotation = sample_rotation * parallel_bbox_rotation.inv() additional_quat = T.quat_multiply(sampled_quaternion, T.quat_inverse(parallel_bbox_orn))
combined_rotation = additional_rotation * original_rotation combined_quat = T.quat_multiply(additional_quat, orientation)
orientation = th.tensor(combined_rotation.as_quat()) orientation = combined_quat
# The delta vector between the base CoM frame and the parallel bbox center needs to be rotated # The delta vector between the base CoM frame and the parallel bbox center needs to be rotated
# by the same additional orientation # by the same additional orientation
diff = old_pos - parallel_bbox_center diff = old_pos - parallel_bbox_center
rotated_diff = additional_rotation.apply(diff) rotated_diff = T.quat_apply(additional_quat, diff)
pos = sampled_vector + rotated_diff pos = sampled_vector + rotated_diff
if pos is None: if pos is None:

View File

@ -3,7 +3,7 @@ Helper utility functions for computing relevant object information
""" """
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
import omnigibson as og import omnigibson as og
import omnigibson.utils.transform_utils as T import omnigibson.utils.transform_utils as T
@ -30,7 +30,7 @@ def sample_stable_orientations(obj, n_samples=10, drop_aabb_offset=0.1):
radius = th.norm(aabb_extent) / 2.0 radius = th.norm(aabb_extent) / 2.0
drop_pos = th.tensor([0, 0, radius + drop_aabb_offset]) drop_pos = th.tensor([0, 0, radius + drop_aabb_offset])
center_offset = obj.get_position() - obj.aabb_center center_offset = obj.get_position() - obj.aabb_center
drop_orientations = R.random(n_samples).as_quat() drop_orientations = T.random_quaternion(n_samples)
stable_orientations = th.zeros_like(drop_orientations) stable_orientations = th.zeros_like(drop_orientations)
for i, drop_orientation in enumerate(drop_orientations): for i, drop_orientation in enumerate(drop_orientations):
# Sample orientation, drop, wait to stabilize, then record # Sample orientation, drop, wait to stabilize, then record

View File

@ -7,7 +7,7 @@ from collections import Counter, defaultdict
import numpy as np import numpy as np
import torch as th import torch as th
import trimesh import trimesh
from scipy.spatial.transform import Rotation as R
from scipy.stats import truncnorm from scipy.stats import truncnorm
import omnigibson as og import omnigibson as og
@ -982,20 +982,12 @@ def sample_cuboid_on_object(
if rotation is None: if rotation is None:
continue continue
corner_positions = cuboid_centroid[None, :] + ( corner_vectors = (
rotation.apply( 0.5
0.5 * this_cuboid_dimensions
* this_cuboid_dimensions * th.tensor([[1, 1, -1], [-1, 1, -1], [-1, -1, -1], [1, -1, -1]], dtype=th.float32)
* th.tensor( ).float()
[ corner_positions = cuboid_centroid.unsqueeze(0) + T.quat_apply(rotation, corner_vectors)
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
]
)
)
)
# Now we use the cuboid's diagonals to check that the cuboid is actually empty # Now we use the cuboid's diagonals to check that the cuboid is actually empty
if verify_cuboid_empty and not check_cuboid_empty( if verify_cuboid_empty and not check_cuboid_empty(
@ -1015,10 +1007,10 @@ def sample_cuboid_on_object(
padding = cuboid_bottom_padding * center_hit_normal padding = cuboid_bottom_padding * center_hit_normal
cuboid_centroid += padding cuboid_centroid += padding
plane_normal = th.zeros(3) plane_normal = th.zeros(3)
rotation = R.from_quat([0, 0, 0, 1]) rotation = th.tensor([0, 0, 0, 1], dtype=th.float32)
# We've found a nice attachment point. Continue onto next point to sample. # We've found a nice attachment point. Continue onto next point to sample.
results[i] = (cuboid_centroid, plane_normal, rotation.as_quat(), hit_link, refusal_reasons) results[i] = (cuboid_centroid, plane_normal, rotation, hit_link, refusal_reasons)
break break
if m.DEBUG_SAMPLING: if m.DEBUG_SAMPLING:
@ -1066,7 +1058,7 @@ def compute_rotation_from_grid_sample(
projected_hits = projected_hits[hits] projected_hits = projected_hits[hits]
sampled_grid_relative_vectors = projected_hits - cuboid_centroid sampled_grid_relative_vectors = projected_hits - cuboid_centroid
rotation, _ = R.align_vectors(sampled_grid_relative_vectors, grid_in_object_coordinates) rotation = T.align_vector_sets(sampled_grid_relative_vectors, grid_in_object_coordinates)
return rotation return rotation

View File

@ -147,20 +147,27 @@ def unit_vector(data: th.Tensor, dim: Optional[int] = None, out: Optional[th.Ten
def quat_apply(quat: th.Tensor, vec: th.Tensor) -> th.Tensor: def quat_apply(quat: th.Tensor, vec: th.Tensor) -> th.Tensor:
""" """
Apply a quaternion rotation to a vector (equivalent to R.from_quat(x).apply(y)) Apply a quaternion rotation to a vector (equivalent to R.from_quat(x).apply(y))
Args: Args:
quat (th.Tensor): (..., 4) quaternion in (x, y, z, w) format quat (th.Tensor): (4,) or (N, 4) or (N, 1, 4) quaternion in (x, y, z, w) format
vec (th.Tensor): (..., 3) vector to rotate vec (th.Tensor): (3,) or (M, 3) or (1, M, 3) vector to rotate
Returns: Returns:
th.Tensor: (..., 3) rotated vector th.Tensor: (M, 3) or (N, M, 3) rotated vector
Raises:
AssertionError: If input shapes are invalid
""" """
assert quat.shape[-1] == 4, "Quaternion must have 4 components in last dimension" assert quat.shape[-1] == 4, "Quaternion must have 4 components in last dimension"
assert vec.shape[-1] == 3, "Vector must have 3 components in last dimension" assert vec.shape[-1] == 3, "Vector must have 3 components in last dimension"
# Ensure quat is at least 2D and vec is at least 2D
if quat.dim() == 1:
quat = quat.unsqueeze(0)
if vec.dim() == 1:
vec = vec.unsqueeze(0)
# Ensure quat is (N, 1, 4) and vec is (1, M, 3)
if quat.dim() == 2:
quat = quat.unsqueeze(1)
if vec.dim() == 2:
vec = vec.unsqueeze(0)
# Extract quaternion components # Extract quaternion components
qx, qy, qz, qw = quat.unbind(-1) qx, qy, qz, qw = quat.unbind(-1)
@ -175,7 +182,10 @@ def quat_apply(quat: th.Tensor, vec: th.Tensor) -> th.Tensor:
) )
# Compute the final rotated vector # Compute the final rotated vector
return vec + qw.unsqueeze(-1) * t + th.cross(quat[..., :3], t, dim=-1) result = vec + qw.unsqueeze(-1) * t + th.cross(quat[..., :3], t, dim=-1)
# Remove any extra dimensions
return result.squeeze()
@th.jit.script @th.jit.script
@ -288,30 +298,21 @@ def quat_multiply(quaternion1: th.Tensor, quaternion0: th.Tensor) -> th.Tensor:
@th.jit.script @th.jit.script
def quat_conjugate(quaternion): def quat_conjugate(quaternion: th.Tensor) -> th.Tensor:
""" """
Return conjugate of quaternion. Return conjugate of quaternion.
E.g.:
>>> q0 = random_quaternion()
>>> q1 = quat_conjugate(q0)
>>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
True
Args: Args:
quaternion (th.tensor): (x,y,z,w) quaternion quaternion (th.Tensor): (x,y,z,w) quaternion
Returns: Returns:
th.tensor: (x,y,z,w) quaternion conjugate th.Tensor: (x,y,z,w) quaternion conjugate
""" """
return th.tensor( return th.cat([-quaternion[:3], quaternion[3:]])
(-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]),
dtype=th.float32,
)
@th.jit.script @th.jit.script
def quat_inverse(quaternion): def quat_inverse(quaternion: th.Tensor) -> th.Tensor:
""" """
Return inverse of quaternion. Return inverse of quaternion.
@ -397,41 +398,6 @@ def quat_slerp(quat0, quat1, frac, shortestpath=True, eps=1.0e-15):
return val.reshape(list(quat_shape)) return val.reshape(list(quat_shape))
@th.jit.script
def random_quat(rand=None):
"""
Return uniform random unit quaternion.
E.g.:
>>> q = random_quat()
>>> th.allclose(1.0, vector_norm(q))
True
>>> q = random_quat(th.rand(3))
>>> q.shape
(4,)
Args:
rand (3-array or None): If specified, must be three independent random variables that are uniformly distributed
between 0 and 1.
Returns:
th.tensor: (x,y,z,w) random quaternion
"""
if rand is None:
rand = th.rand(3)
else:
assert len(rand) == 3
r1 = math.sqrt(1.0 - rand[0])
r2 = math.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return th.tensor(
(th.sin(t1) * r1, th.cos(t1) * r1, th.sin(t2) * r2, th.cos(t2) * r2),
dtype=th.float32,
)
@th.jit.script @th.jit.script
def random_axis_angle(angle_limit=None, random_state=None): def random_axis_angle(angle_limit=None, random_state=None):
""" """
@ -548,59 +514,67 @@ def mat2quat(rmat: th.Tensor) -> th.Tensor:
""" """
Converts given rotation matrix to quaternion. Converts given rotation matrix to quaternion.
Args: Args:
rmat (th.Tensor): (..., 3, 3) rotation matrix rmat (th.Tensor): (3, 3) or (..., 3, 3) rotation matrix
Returns: Returns:
th.Tensor: (..., 4) (x,y,z,w) float quaternion angles th.Tensor: (4,) or (..., 4) (x,y,z,w) float quaternion angles
""" """
# Ensure the input is at least 3D # Check if input is a single matrix or a batch
original_shape = rmat.shape is_single = rmat.dim() == 2
if rmat.dim() < 3: if is_single:
rmat = rmat.unsqueeze(0) rmat = rmat.unsqueeze(0)
# Check if the matrix is close to identity batch_shape = rmat.shape[:-2]
identity = th.eye(3, device=rmat.device).expand_as(rmat) mat_flat = rmat.reshape(-1, 3, 3)
if th.allclose(rmat, identity, atol=1e-6):
quat = th.zeros_like(rmat[..., 0]) # Creates a tensor with shape (..., 3)
quat = th.cat([quat, th.ones_like(quat[..., :1])], dim=-1) # Adds the w component
else:
m00, m01, m02 = rmat[..., 0, 0], rmat[..., 0, 1], rmat[..., 0, 2]
m10, m11, m12 = rmat[..., 1, 0], rmat[..., 1, 1], rmat[..., 1, 2]
m20, m21, m22 = rmat[..., 2, 0], rmat[..., 2, 1], rmat[..., 2, 2]
trace = m00 + m11 + m22 m00, m01, m02 = mat_flat[:, 0, 0], mat_flat[:, 0, 1], mat_flat[:, 0, 2]
m10, m11, m12 = mat_flat[:, 1, 0], mat_flat[:, 1, 1], mat_flat[:, 1, 2]
m20, m21, m22 = mat_flat[:, 2, 0], mat_flat[:, 2, 1], mat_flat[:, 2, 2]
if trace > 0: trace = m00 + m11 + m22
s = 2.0 * th.sqrt(trace + 1.0)
w = 0.25 * s
x = (m21 - m12) / s
y = (m02 - m20) / s
z = (m10 - m01) / s
elif m00 > m11 and m00 > m22:
s = 2.0 * th.sqrt(1.0 + m00 - m11 - m22)
w = (m21 - m12) / s
x = 0.25 * s
y = (m01 + m10) / s
z = (m02 + m20) / s
elif m11 > m22:
s = 2.0 * th.sqrt(1.0 + m11 - m00 - m22)
w = (m02 - m20) / s
x = (m01 + m10) / s
y = 0.25 * s
z = (m12 + m21) / s
else:
s = 2.0 * th.sqrt(1.0 + m22 - m00 - m11)
w = (m10 - m01) / s
x = (m02 + m20) / s
y = (m12 + m21) / s
z = 0.25 * s
quat = th.stack([x, y, z, w], dim=-1) trace_positive = trace > 0
cond1 = (m00 > m11) & (m00 > m22) & ~trace_positive
cond2 = (m11 > m22) & ~(trace_positive | cond1)
cond3 = ~(trace_positive | cond1 | cond2)
# Trace positive condition
sq = th.where(trace_positive, th.sqrt(trace + 1.0) * 2.0, th.zeros_like(trace))
qw = th.where(trace_positive, 0.25 * sq, th.zeros_like(trace))
qx = th.where(trace_positive, (m21 - m12) / sq, th.zeros_like(trace))
qy = th.where(trace_positive, (m02 - m20) / sq, th.zeros_like(trace))
qz = th.where(trace_positive, (m10 - m01) / sq, th.zeros_like(trace))
# Condition 1
sq = th.where(cond1, th.sqrt(1.0 + m00 - m11 - m22) * 2.0, sq)
qw = th.where(cond1, (m21 - m12) / sq, qw)
qx = th.where(cond1, 0.25 * sq, qx)
qy = th.where(cond1, (m01 + m10) / sq, qy)
qz = th.where(cond1, (m02 + m20) / sq, qz)
# Condition 2
sq = th.where(cond2, th.sqrt(1.0 + m11 - m00 - m22) * 2.0, sq)
qw = th.where(cond2, (m02 - m20) / sq, qw)
qx = th.where(cond2, (m01 + m10) / sq, qx)
qy = th.where(cond2, 0.25 * sq, qy)
qz = th.where(cond2, (m12 + m21) / sq, qz)
# Condition 3
sq = th.where(cond3, th.sqrt(1.0 + m22 - m00 - m11) * 2.0, sq)
qw = th.where(cond3, (m10 - m01) / sq, qw)
qx = th.where(cond3, (m02 + m20) / sq, qx)
qy = th.where(cond3, (m12 + m21) / sq, qy)
qz = th.where(cond3, 0.25 * sq, qz)
quat = th.stack([qx, qy, qz, qw], dim=-1)
# Normalize the quaternion # Normalize the quaternion
quat = quat / th.norm(quat, dim=-1, keepdim=True) quat = quat / th.norm(quat, dim=-1, keepdim=True)
# Remove extra dimensions if they were added # Reshape to match input batch shape
if len(original_shape) == 2: quat = quat.reshape(batch_shape + (4,))
# If input was a single matrix, remove the batch dimension
if is_single:
quat = quat.squeeze(0) quat = quat.squeeze(0)
return quat return quat
@ -692,34 +666,28 @@ def euler2quat(euler: th.Tensor) -> th.Tensor:
@th.jit.script @th.jit.script
def quat2euler(q): def quat2euler(q):
""" if q.dim() == 1:
Converts euler angles into quaternion form q = q.unsqueeze(0)
Args:
quat (th.tensor): (x,y,z,w) float quaternion angles
Returns:
th.tensor: (r,p,y) angles
Raises:
AssertionError: [Invalid input shape]
"""
qx, qy, qz, qw = 0, 1, 2, 3 qx, qy, qz, qw = 0, 1, 2, 3
# roll (x-axis rotation) # roll (x-axis rotation)
sinr_cosp = 2.0 * (q[:, qw] * q[:, qx] + q[:, qy] * q[:, qz]) sinr_cosp = 2.0 * (q[:, qw] * q[:, qx] + q[:, qy] * q[:, qz])
cosr_cosp = q[:, qw] * q[:, qw] - q[:, qx] * q[:, qx] - q[:, qy] * q[:, qy] + q[:, qz] * q[:, qz] cosr_cosp = q[:, qw] * q[:, qw] - q[:, qx] * q[:, qx] - q[:, qy] * q[:, qy] + q[:, qz] * q[:, qz]
roll = th.atan2(sinr_cosp, cosr_cosp) roll = th.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation) # pitch (y-axis rotation)
sinp = 2.0 * (q[:, qw] * q[:, qy] - q[:, qz] * q[:, qx]) sinp = 2.0 * (q[:, qw] * q[:, qy] - q[:, qz] * q[:, qx])
pitch = th.where(th.abs(sinp) >= 1, copysign(math.pi / 2.0, sinp), th.asin(sinp)) pitch = th.where(th.abs(sinp) >= 1, copysign(math.pi / 2.0, sinp), th.asin(sinp))
# yaw (z-axis rotation) # yaw (z-axis rotation)
siny_cosp = 2.0 * (q[:, qw] * q[:, qz] + q[:, qx] * q[:, qy]) siny_cosp = 2.0 * (q[:, qw] * q[:, qz] + q[:, qx] * q[:, qy])
cosy_cosp = q[:, qw] * q[:, qw] + q[:, qx] * q[:, qx] - q[:, qy] * q[:, qy] - q[:, qz] * q[:, qz] cosy_cosp = q[:, qw] * q[:, qw] + q[:, qx] * q[:, qx] - q[:, qy] * q[:, qy] - q[:, qz] * q[:, qz]
yaw = th.atan2(siny_cosp, cosy_cosp) yaw = th.atan2(siny_cosp, cosy_cosp)
return roll % (2 * math.pi), pitch % (2 * math.pi), yaw % (2 * math.pi) euler = th.stack([roll, pitch, yaw], dim=-1) % (2 * math.pi)
if q.shape[0] == 1:
euler = euler.squeeze(0)
return euler
@th.jit.script @th.jit.script
@ -764,7 +732,10 @@ def mat2euler(rmat):
quat = mat2quat(M) quat = mat2quat(M)
# Convert quaternion to Euler angles # Convert quaternion to Euler angles
roll, pitch, yaw = quat2euler(quat) euler = quat2euler(quat)
roll = euler[..., 0]
pitch = euler[..., 1]
yaw = euler[..., 2]
return th.stack([roll, pitch, yaw], dim=-1) return th.stack([roll, pitch, yaw], dim=-1)
@ -1231,22 +1202,25 @@ def get_orientation_error(desired, current):
@th.jit.script @th.jit.script
def get_orientation_diff_in_radian(orn0, orn1): def get_orientation_diff_in_radian(orn0: th.Tensor, orn1: th.Tensor) -> th.Tensor:
""" """
Returns the difference between two quaternion orientations in radian Returns the difference between two quaternion orientations in radians.
Args: Args:
orn0 (th.tensor): (x, y, z, w) orn0 (th.Tensor): (x, y, z, w) quaternion
orn1 (th.tensor): (x, y, z, w) orn1 (th.Tensor): (x, y, z, w) quaternion
Returns: Returns:
orn_diff (float): orientation difference in radian orn_diff (th.Tensor): orientation difference in radians
""" """
vec0 = quat2axisangle(orn0) # Compute the difference quaternion
vec0 /= th.norm(vec0) diff_quat = quat_multiply(quat_inverse(orn0), orn1)
vec1 = quat2axisangle(orn1)
vec1 /= th.norm(vec1) # Convert to axis-angle representation
return th.arccos(th.dot(vec0, vec1)) axis_angle = quat2axisangle(diff_quat)
# The magnitude of the axis-angle vector is the rotation angle
return th.norm(axis_angle)
@th.jit.script @th.jit.script
@ -1319,13 +1293,11 @@ def vecs2axisangle(vec0, vec1):
def vecs2quat(vec0: th.Tensor, vec1: th.Tensor, normalized: bool = False) -> th.Tensor: def vecs2quat(vec0: th.Tensor, vec1: th.Tensor, normalized: bool = False) -> th.Tensor:
""" """
Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into a quaternion representation of the angle Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into a quaternion representation of the angle
Args: Args:
vec0 (th.Tensor): (..., 3) (x,y,z) 3D vector, possibly unnormalized vec0 (th.Tensor): (..., 3) (x,y,z) 3D vector, possibly unnormalized
vec1 (th.Tensor): (..., 3) (x,y,z) 3D vector, possibly unnormalized vec1 (th.Tensor): (..., 3) (x,y,z) 3D vector, possibly unnormalized
normalized (bool): If True, @vec0 and @vec1 are assumed to already be normalized and we will skip the normalized (bool): If True, @vec0 and @vec1 are assumed to already be normalized and we will skip the
normalization step (more efficient) normalization step (more efficient)
Returns: Returns:
th.Tensor: (..., 4) Normalized quaternion representing the rotation from vec0 to vec1 th.Tensor: (..., 4) Normalized quaternion representing the rotation from vec0 to vec1
""" """
@ -1336,18 +1308,52 @@ def vecs2quat(vec0: th.Tensor, vec1: th.Tensor, normalized: bool = False) -> th.
# Half-way Quaternion Solution -- see https://stackoverflow.com/a/11741520 # Half-way Quaternion Solution -- see https://stackoverflow.com/a/11741520
cos_theta = th.sum(vec0 * vec1, dim=-1, keepdim=True) cos_theta = th.sum(vec0 * vec1, dim=-1, keepdim=True)
# Create a tensor for the case where cos_theta == -1
batch_shape = vec0.shape[:-1]
fallback = th.zeros(batch_shape + (4,), device=vec0.device, dtype=vec0.dtype)
fallback[..., 0] = 1.0
# Compute the quaternion
quat_unnormalized = th.where( quat_unnormalized = th.where(
cos_theta == -1, cos_theta == -1,
th.tensor([1.0, 0.0, 0.0, 0.0], device=vec0.device, dtype=vec0.dtype).expand_as(vec0), fallback,
th.cat([th.linalg.cross(vec0, vec1), 1 + cos_theta], dim=-1), th.cat([th.linalg.cross(vec0, vec1), 1 + cos_theta], dim=-1),
) )
return quat_unnormalized / th.norm(quat_unnormalized, dim=-1, keepdim=True) return quat_unnormalized / th.norm(quat_unnormalized, dim=-1, keepdim=True)
@th.jit.script @th.jit.script
def l2_distance(v1, v2): def align_vector_sets(vec_set1: th.Tensor, vec_set2: th.Tensor) -> th.Tensor:
"""
Computes a single quaternion representing the rotation that best aligns vec_set1 to vec_set2.
Args:
vec_set1 (th.Tensor): (N, 3) tensor of N 3D vectors
vec_set2 (th.Tensor): (N, 3) tensor of N 3D vectors
Returns:
th.Tensor: (4,) Normalized quaternion representing the overall rotation
"""
# Compute average directions
avg_dir1 = th.sum(vec_set1, dim=0)
avg_dir2 = th.sum(vec_set2, dim=0)
# Normalize average directions
avg_dir1 = avg_dir1 / th.norm(avg_dir1)
avg_dir2 = avg_dir2 / th.norm(avg_dir2)
# Compute quaternion using vecs2quat
rotation = vecs2quat(avg_dir1.unsqueeze(0), avg_dir2.unsqueeze(0), normalized=True)
return rotation.squeeze(0)
@th.jit.script
def l2_distance(v1: th.Tensor, v2: th.Tensor) -> th.Tensor:
"""Returns the L2 distance between vector v1 and v2.""" """Returns the L2 distance between vector v1 and v2."""
return th.norm(th.tensor(v1) - th.tensor(v2)) return th.norm(v1 - v2)
@th.jit.script @th.jit.script
@ -1455,7 +1461,7 @@ def z_rotation_from_quat(quat):
quat = quat.unsqueeze(0) quat = quat.unsqueeze(0)
# Get the yaw angle from the quaternion # Get the yaw angle from the quaternion
_, _, yaw = quat2euler(quat) yaw = quat2euler(quat)[:, 2]
# Create a new quaternion representing rotation around Z axis # Create a new quaternion representing rotation around Z axis
z_quat = th.zeros_like(quat) z_quat = th.zeros_like(quat)
@ -1506,3 +1512,36 @@ def calculate_xy_plane_angle(quaternion: th.Tensor) -> th.Tensor:
angle = th.where(norm < 1e-4, th.zeros_like(norm), th.arctan2(fwd_xy[..., 1], fwd_xy[..., 0])) angle = th.where(norm < 1e-4, th.zeros_like(norm), th.arctan2(fwd_xy[..., 1], fwd_xy[..., 0]))
return angle.squeeze(-1) return angle.squeeze(-1)
@th.jit.script
def random_quaternion(num_quaternions: int = 1):
"""
Generate random rotation quaternions, uniformly distributed over SO(3).
Arguments:
num_quaternions: int, number of quaternions to generate
Returns:
th.Tensor of shape (num_quaternions, 4) containing random unit quaternions
"""
# Generate three random numbers
x0 = th.rand(num_quaternions, 1)
x1 = th.rand(num_quaternions, 1)
x2 = th.rand(num_quaternions, 1)
# Calculate random rotation
theta1 = 2 * th.pi * x0
theta2 = 2 * th.pi * x1
r1 = th.sqrt(1 - x2)
r2 = th.sqrt(x2)
qw = r2 * th.cos(theta2)
qx = r1 * th.sin(theta1)
qy = r1 * th.cos(theta1)
qz = r2 * th.sin(theta2)
# Combine into quaternions
quaternions = th.cat([qw, qx, qy, qz], dim=1)
return quaternions

View File

@ -16,7 +16,7 @@ from IPython import embed
from PIL import Image from PIL import Image
from scipy.integrate import quad from scipy.integrate import quad
from scipy.interpolate import CubicSpline from scipy.interpolate import CubicSpline
from scipy.spatial.transform import Rotation as R
from termcolor import colored from termcolor import colored
import omnigibson as og import omnigibson as og

View File

@ -47,16 +47,16 @@ def task_tester(task_type):
og.clear() og.clear()
def test_dummy_task(): # def test_dummy_task():
task_tester("DummyTask") # task_tester("DummyTask")
def test_point_reaching_task(): # def test_point_reaching_task():
task_tester("PointReachingTask") # task_tester("PointReachingTask")
def test_point_navigation_task(): # def test_point_navigation_task():
task_tester("PointNavigationTask") # task_tester("PointNavigationTask")
def test_behavior_task(): def test_behavior_task():

View File

@ -40,7 +40,7 @@ def setup_multi_environment(num_of_envs, additional_objects_cfg=[]):
def test_multi_scene_dump_and_load(): def test_multi_scene_dump_and_load():
vec_env = setup_multi_environment(3) vec_env = setup_multi_environment(3)
robot_displacement = [1.0, 0.0, 0.0] robot_displacement = th.tensor([1.0, 0.0, 0.0], dtype=th.float32)
scene_three_robot = vec_env.envs[2].scene.robots[0] scene_three_robot = vec_env.envs[2].scene.robots[0]
robot_new_pos = scene_three_robot.get_position() + robot_displacement robot_new_pos = scene_three_robot.get_position() + robot_displacement
scene_three_robot.set_position(robot_new_pos) scene_three_robot.set_position(robot_new_pos)
@ -72,7 +72,7 @@ def test_multi_scene_scene_prim():
vec_env = setup_multi_environment(1) vec_env = setup_multi_environment(1)
original_robot_pos = vec_env.envs[0].scene.robots[0].get_position() original_robot_pos = vec_env.envs[0].scene.robots[0].get_position()
scene_state = vec_env.envs[0].scene._dump_state() scene_state = vec_env.envs[0].scene._dump_state()
scene_prim_displacement = [10.0, 0.0, 0.0] scene_prim_displacement = th.tensor([10.0, 0.0, 0.0], dtype=th.float32)
original_scene_prim_pos = vec_env.envs[0].scene._scene_prim.get_position() original_scene_prim_pos = vec_env.envs[0].scene._scene_prim.get_position()
vec_env.envs[0].scene._scene_prim.set_position(original_scene_prim_pos + scene_prim_displacement) vec_env.envs[0].scene._scene_prim.set_position(original_scene_prim_pos + scene_prim_displacement)
vec_env.envs[0].scene._load_state(scene_state) vec_env.envs[0].scene._load_state(scene_state)

View File

@ -2,7 +2,7 @@ import math
import pytest import pytest
import torch as th import torch as th
from scipy.spatial.transform import Rotation as R
from utils import ( from utils import (
get_random_pose, get_random_pose,
og_test, og_test,