Pre-commit hook for numpy import
This commit is contained in:
parent
7aa81c3e86
commit
9a402b2570
|
@ -8,4 +8,20 @@ repos:
|
|||
rev: 5.13.2
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort (python)
|
||||
name: isort (python)
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: no-commit-to-branch
|
||||
name: Check for numpy imports
|
||||
entry: grep -R "import numpy" .
|
||||
language: system
|
||||
types: [python]
|
||||
exclude: |
|
||||
(?x)^(
|
||||
\.pre-commit-config\.yaml|
|
||||
omnigibson/utils/deprecated_utils\.py| # Keep Numpy import for deprecated Omniverse utils
|
||||
omnigibson/utils/numpy_utils\.py| # Utilities specifically for numpy operations and dtype
|
||||
tests/test_transform_utils\.py # This test file uses Scipy and Numpy
|
||||
)$
|
||||
stages: [commit]
|
|
@ -2,11 +2,12 @@ import os
|
|||
|
||||
import cv2
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch as th
|
||||
from PIL import Image
|
||||
|
||||
import omnigibson as og
|
||||
from omnigibson.utils.asset_utils import get_available_og_scenes, get_og_scene_path
|
||||
from omnigibson.utils.numpy_utils import to_numpy
|
||||
from omnigibson.utils.ui_utils import choose_from_options
|
||||
|
||||
|
||||
|
@ -27,7 +28,7 @@ def main(random_selection=False, headless=False, short_exec=False):
|
|||
|
||||
trav_map = Image.open(os.path.join(get_og_scene_path(scene_model), "layout", "floor_trav_0.png"))
|
||||
trav_map = trav_map.resize((trav_map_size, trav_map_size))
|
||||
trav_map = cv2.erode(np.array(trav_map), np.ones((trav_map_erosion, trav_map_erosion)))
|
||||
trav_map = cv2.erode(to_numpy(trav_map), th.ones((trav_map_erosion, trav_map_erosion)).cpu().numpy())
|
||||
|
||||
if not headless:
|
||||
plt.figure(figsize=(12, 12))
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import os
|
||||
|
||||
import numpy as np
|
||||
import torch as th
|
||||
from PIL import Image
|
||||
|
||||
|
@ -10,6 +9,7 @@ Image.MAX_IMAGE_PIXELS = None
|
|||
import omnigibson as og
|
||||
from omnigibson.macros import gm
|
||||
from omnigibson.maps.map_base import BaseMap
|
||||
from omnigibson.utils.numpy_utils import to_numpy
|
||||
from omnigibson.utils.python_utils import torch_delete
|
||||
from omnigibson.utils.ui_utils import create_module_logger
|
||||
|
||||
|
@ -64,8 +64,8 @@ class SegmentationMap(BaseMap):
|
|||
assert height == width, "room seg map is not a square"
|
||||
assert img_ins.size == img_sem.size, "semantic and instance seg maps have different sizes"
|
||||
map_size = int(height * self.map_default_resolution / self.map_resolution)
|
||||
img_ins = th.tensor(np.array(img_ins.resize((map_size, map_size), Image.NEAREST)))
|
||||
img_sem = th.tensor(np.array(img_sem.resize((map_size, map_size), Image.NEAREST)))
|
||||
img_ins = th.tensor(to_numpy(img_ins.resize((map_size, map_size), Image.NEAREST)))
|
||||
img_sem = th.tensor(to_numpy(img_sem.resize((map_size, map_size), Image.NEAREST)))
|
||||
|
||||
room_categories = os.path.join(gm.DATASET_PATH, "metadata", "room_categories.txt")
|
||||
with open(room_categories, "r") as fp:
|
||||
|
|
|
@ -2,7 +2,6 @@ import math
|
|||
import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch as th
|
||||
from PIL import Image
|
||||
|
||||
|
@ -11,6 +10,7 @@ Image.MAX_IMAGE_PIXELS = None
|
|||
|
||||
from omnigibson.maps.map_base import BaseMap
|
||||
from omnigibson.utils.motion_planning_utils import astar
|
||||
from omnigibson.utils.numpy_utils import to_numpy
|
||||
from omnigibson.utils.ui_utils import create_module_logger
|
||||
|
||||
# Create module logger
|
||||
|
@ -78,10 +78,10 @@ class TraversableMap(BaseMap):
|
|||
for floor in range(len(self.floor_heights)):
|
||||
if self.trav_map_with_objects:
|
||||
# TODO: Shouldn't this be generated dynamically?
|
||||
trav_map = th.tensor(np.array(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor)))))
|
||||
trav_map = th.tensor(to_numpy(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor)))))
|
||||
else:
|
||||
trav_map = th.tensor(
|
||||
np.array(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
|
||||
to_numpy(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
|
||||
)
|
||||
|
||||
# If we do not initialize the original size of the traversability map, we obtain it from the image
|
||||
|
@ -119,7 +119,7 @@ class TraversableMap(BaseMap):
|
|||
else:
|
||||
radius = self.default_erosion_radius
|
||||
radius_pixel = int(math.ceil(radius / self.map_resolution))
|
||||
trav_map = th.tensor(cv2.erode(trav_map.cpu().numpy(), np.ones((radius_pixel, radius_pixel))))
|
||||
trav_map = th.tensor(cv2.erode(trav_map.cpu().numpy(), th.ones((radius_pixel, radius_pixel)).cpu().numpy()))
|
||||
return trav_map
|
||||
|
||||
def get_random_point(self, floor=None, reference_point=None, robot=None):
|
||||
|
|
|
@ -4,7 +4,6 @@ from copy import deepcopy
|
|||
from functools import cached_property
|
||||
|
||||
import gymnasium as gym
|
||||
import numpy as np
|
||||
import torch as th
|
||||
|
||||
import omnigibson as og
|
||||
|
@ -12,6 +11,7 @@ from omnigibson.controllers import create_controller
|
|||
from omnigibson.controllers.controller_base import ControlType
|
||||
from omnigibson.objects.object_base import BaseObject
|
||||
from omnigibson.utils.constants import PrimType
|
||||
from omnigibson.utils.numpy_utils import NumpyTypes
|
||||
from omnigibson.utils.python_utils import CachedFunctions, assert_valid_key, merge_nested_dicts
|
||||
from omnigibson.utils.ui_utils import create_module_logger
|
||||
from omnigibson.utils.usd_utils import ControllableObjectViewAPI
|
||||
|
@ -324,7 +324,10 @@ class ControllableObject(BaseObject):
|
|||
high.append(th.tensor([float("inf")] * controller.command_dim) if limits is None else limits[1])
|
||||
|
||||
return gym.spaces.Box(
|
||||
shape=(self.action_dim,), low=th.cat(low).cpu().numpy(), high=th.cat(high).cpu().numpy(), dtype=np.float32
|
||||
shape=(self.action_dim,),
|
||||
low=th.cat(low).cpu().numpy(),
|
||||
high=th.cat(high).cpu().numpy(),
|
||||
dtype=NumpyTypes.FLOAT32,
|
||||
)
|
||||
|
||||
def apply_action(self, action):
|
||||
|
|
|
@ -2,7 +2,6 @@ from abc import abstractmethod
|
|||
from copy import deepcopy
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch as th
|
||||
|
||||
import omnigibson.utils.transform_utils as T
|
||||
|
@ -19,6 +18,7 @@ from omnigibson.sensors import (
|
|||
)
|
||||
from omnigibson.utils.constants import PrimType
|
||||
from omnigibson.utils.gym_utils import GymObservable
|
||||
from omnigibson.utils.numpy_utils import NumpyTypes
|
||||
from omnigibson.utils.python_utils import classproperty, merge_nested_dicts
|
||||
from omnigibson.utils.usd_utils import (
|
||||
ControllableObjectViewAPI,
|
||||
|
@ -387,7 +387,7 @@ class BaseRobot(USDObject, ControllableObject, GymObservable):
|
|||
# Have to handle proprio separately since it's not an actual sensor
|
||||
if "proprio" in self._obs_modalities:
|
||||
obs_space["proprio"] = self._build_obs_box_space(
|
||||
shape=(self.proprioception_dim,), low=-float("inf"), high=float("inf"), dtype=np.float64
|
||||
shape=(self.proprioception_dim,), low=-float("inf"), high=float("inf"), dtype=NumpyTypes.FLOAT32
|
||||
)
|
||||
|
||||
return obs_space
|
||||
|
|
|
@ -2,13 +2,13 @@ import math
|
|||
from collections.abc import Iterable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch as th
|
||||
from transforms3d.quaternions import quat2mat
|
||||
|
||||
import omnigibson.lazy as lazy
|
||||
import omnigibson.utils.transform_utils as T
|
||||
from omnigibson.sensors.sensor_base import BaseSensor
|
||||
from omnigibson.utils.constants import OccupancyGridState
|
||||
from omnigibson.utils.numpy_utils import NumpyTypes
|
||||
from omnigibson.utils.python_utils import classproperty
|
||||
|
||||
|
||||
|
@ -146,8 +146,13 @@ class ScanSensor(BaseSensor):
|
|||
# Set the remaining modalities' values
|
||||
# (obs modality, shape, low, high)
|
||||
obs_space_mapping = dict(
|
||||
scan=((self.n_horizontal_rays, self.n_vertical_rays), 0.0, 1.0, np.float32),
|
||||
occupancy_grid=((self.occupancy_grid_resolution, self.occupancy_grid_resolution, 1), 0.0, 1.0, np.float32),
|
||||
scan=((self.n_horizontal_rays, self.n_vertical_rays), 0.0, 1.0, NumpyTypes.FLOAT32),
|
||||
occupancy_grid=(
|
||||
(self.occupancy_grid_resolution, self.occupancy_grid_resolution, 1),
|
||||
0.0,
|
||||
1.0,
|
||||
NumpyTypes.FLOAT32,
|
||||
),
|
||||
)
|
||||
|
||||
return obs_space_mapping
|
||||
|
@ -185,11 +190,11 @@ class ScanSensor(BaseSensor):
|
|||
|
||||
# Convert scans from laser frame to world frame
|
||||
pos, ori = self.get_position_orientation()
|
||||
scan_world = th.matmul(th.tensor(quat2mat(ori)), scan_laser.T).T + pos
|
||||
scan_world = th.matmul(T.quat2mat(ori), scan_laser.T).T + pos
|
||||
|
||||
# Convert scans from world frame to local base frame
|
||||
base_pos, base_ori = self.occupancy_grid_local_link.get_position_orientation()
|
||||
scan_local = th.matmul(th.tensor(quat2mat(base_ori)).T, (scan_world - base_pos).T).T
|
||||
scan_local = th.matmul(T.quat2mat(base_ori).T, (scan_world - base_pos).T).T
|
||||
scan_local = scan_local[:, :2]
|
||||
scan_local = th.cat([th.tensor([[0, 0]]), scan_local, th.tensor([[0, 0]])], dim=0)
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ import math
|
|||
import time
|
||||
|
||||
import gymnasium as gym
|
||||
import numpy as np
|
||||
import torch as th
|
||||
|
||||
import omnigibson as og
|
||||
|
@ -15,6 +14,7 @@ from omnigibson.utils.constants import (
|
|||
semantic_class_id_to_name,
|
||||
semantic_class_name_to_id,
|
||||
)
|
||||
from omnigibson.utils.numpy_utils import NumpyTypes
|
||||
from omnigibson.utils.python_utils import assert_valid_key, classproperty
|
||||
from omnigibson.utils.sim_utils import set_carb_setting
|
||||
from omnigibson.utils.ui_utils import dock_window
|
||||
|
@ -737,15 +737,17 @@ class VisionSensor(BaseSensor):
|
|||
bbox_3d_space = gym.spaces.Sequence(
|
||||
space=gym.spaces.Tuple(
|
||||
(
|
||||
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId
|
||||
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_min
|
||||
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_min
|
||||
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_min
|
||||
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_max
|
||||
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_max
|
||||
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_max
|
||||
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(4, 4), dtype=np.float32), # transform
|
||||
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio
|
||||
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=NumpyTypes.UINT32), # semanticId
|
||||
gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(), dtype=NumpyTypes.FLOAT32), # x_min
|
||||
gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(), dtype=NumpyTypes.FLOAT32), # y_min
|
||||
gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(), dtype=NumpyTypes.FLOAT32), # z_min
|
||||
gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(), dtype=NumpyTypes.FLOAT32), # x_max
|
||||
gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(), dtype=NumpyTypes.FLOAT32), # y_max
|
||||
gym.spaces.Box(low=-float("inf"), high=float("inf"), shape=(), dtype=NumpyTypes.FLOAT32), # z_max
|
||||
gym.spaces.Box(
|
||||
low=-float("inf"), high=float("inf"), shape=(4, 4), dtype=NumpyTypes.FLOAT32
|
||||
), # transform
|
||||
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=NumpyTypes.FLOAT32), # occlusion ratio
|
||||
)
|
||||
)
|
||||
)
|
||||
|
@ -753,25 +755,25 @@ class VisionSensor(BaseSensor):
|
|||
bbox_2d_space = gym.spaces.Sequence(
|
||||
space=gym.spaces.Tuple(
|
||||
(
|
||||
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_min
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_min
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_max
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_max
|
||||
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio
|
||||
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=NumpyTypes.UINT32), # semanticId
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=NumpyTypes.INT32), # x_min
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=NumpyTypes.INT32), # y_min
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=NumpyTypes.INT32), # x_max
|
||||
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=NumpyTypes.INT32), # y_max
|
||||
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=NumpyTypes.FLOAT32), # occlusion ratio
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
obs_space_mapping = dict(
|
||||
rgb=((self.image_height, self.image_width, 4), 0, 255, np.uint8),
|
||||
depth=((self.image_height, self.image_width), 0.0, np.inf, np.float32),
|
||||
depth_linear=((self.image_height, self.image_width), 0.0, np.inf, np.float32),
|
||||
normal=((self.image_height, self.image_width, 4), -1.0, 1.0, np.float32),
|
||||
seg_semantic=((self.image_height, self.image_width), 0, MAX_CLASS_COUNT, np.uint32),
|
||||
seg_instance=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32),
|
||||
seg_instance_id=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32),
|
||||
flow=((self.image_height, self.image_width, 4), -np.inf, np.inf, np.float32),
|
||||
rgb=((self.image_height, self.image_width, 4), 0, 255, NumpyTypes.UINT8),
|
||||
depth=((self.image_height, self.image_width), 0.0, float("inf"), NumpyTypes.FLOAT32),
|
||||
depth_linear=((self.image_height, self.image_width), 0.0, float("inf"), NumpyTypes.FLOAT32),
|
||||
normal=((self.image_height, self.image_width, 4), -1.0, 1.0, NumpyTypes.FLOAT32),
|
||||
seg_semantic=((self.image_height, self.image_width), 0, MAX_CLASS_COUNT, NumpyTypes.UINT32),
|
||||
seg_instance=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, NumpyTypes.UINT32),
|
||||
seg_instance_id=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, NumpyTypes.UINT32),
|
||||
flow=((self.image_height, self.image_width, 4), -float("inf"), float("inf"), NumpyTypes.FLOAT32),
|
||||
bbox_2d_tight=bbox_2d_space,
|
||||
bbox_2d_loose=bbox_2d_space,
|
||||
bbox_3d=bbox_3d_space,
|
||||
|
|
|
@ -4,7 +4,6 @@ import tempfile
|
|||
import uuid
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import torch as th
|
||||
import trimesh
|
||||
|
||||
|
@ -16,6 +15,7 @@ from omnigibson.prims.geom_prim import VisualGeomPrim
|
|||
from omnigibson.prims.material_prim import MaterialPrim
|
||||
from omnigibson.prims.prim_base import BasePrim
|
||||
from omnigibson.systems.system_base import BaseSystem, PhysicalParticleSystem
|
||||
from omnigibson.utils.numpy_utils import NumpyTypes, to_numpy
|
||||
from omnigibson.utils.physx_utils import create_physx_particle_system, create_physx_particleset_pointinstancer
|
||||
from omnigibson.utils.python_utils import assert_valid_key, torch_delete
|
||||
from omnigibson.utils.ui_utils import create_module_logger
|
||||
|
@ -239,7 +239,7 @@ class PhysxParticleInstancer(BasePrim):
|
|||
th.tensor: (N, 4) numpy array, where each of the N particles' orientations are expressed in (x,y,z,w)
|
||||
quaternion coordinates relative to this instancer's parent prim
|
||||
"""
|
||||
return th.from_numpy(np.array(self.get_attribute(attr="orientations"), dtype=np.float32))
|
||||
return th.from_numpy(to_numpy(self.get_attribute(attr="orientations"), dtype=NumpyTypes.FLOAT32))
|
||||
|
||||
@particle_orientations.setter
|
||||
def particle_orientations(self, quat):
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
from abc import ABCMeta, abstractmethod
|
||||
from copy import deepcopy
|
||||
|
||||
import numpy as np
|
||||
import torch as th
|
||||
|
||||
from omnigibson.utils.gym_utils import GymObservable
|
||||
from omnigibson.utils.numpy_utils import NumpyTypes
|
||||
from omnigibson.utils.python_utils import Registerable, classproperty
|
||||
|
||||
REGISTERED_TASKS = dict()
|
||||
|
@ -99,7 +99,7 @@ class BaseTask(GymObservable, Registerable, metaclass=ABCMeta):
|
|||
# Create the low dim obs space and add to the main obs space dict -- make sure we're flattening low dim obs
|
||||
if self._low_dim_obs_dim > 0:
|
||||
obs_space["low_dim"] = self._build_obs_box_space(
|
||||
shape=(self._low_dim_obs_dim,), low=-np.inf, high=np.inf, dtype=np.float64
|
||||
shape=(self._low_dim_obs_dim,), low=-float("inf"), high=float("inf"), dtype=NumpyTypes.FLOAT32
|
||||
)
|
||||
|
||||
return obs_space
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
from enum import Enum
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class NumpyTypes(Enum):
|
||||
FLOAT32 = np.float32
|
||||
INT32 = np.int32
|
||||
UINT8 = np.uint8
|
||||
UINT32 = np.uint32
|
||||
|
||||
|
||||
def to_numpy(arr, dtype=None):
|
||||
return np.array(arr, dtype=dtype)
|
|
@ -8,6 +8,7 @@ from scipy.spatial.transform import Rotation as R
|
|||
from scipy.spatial.transform import Slerp
|
||||
from torch.testing import assert_close
|
||||
|
||||
from omnigibson.utils.numpy_utils import NumpyTypes
|
||||
from omnigibson.utils.transform_utils import *
|
||||
|
||||
# Set the seed for PyTorch
|
||||
|
@ -45,7 +46,7 @@ class TestQuaternionOperations:
|
|||
q_np = q.cpu().numpy()
|
||||
scipy_mat = R.from_quat(q_np).as_matrix()
|
||||
our_mat = quat2mat(q)
|
||||
assert_close(our_mat, th.from_numpy(scipy_mat.astype(np.float32)))
|
||||
assert_close(our_mat, th.from_numpy(scipy_mat.astype(NumpyTypes.FLOAT32)))
|
||||
|
||||
def test_quat_mul(self):
|
||||
q1, q2 = random_quaternion().squeeze(), random_quaternion().squeeze()
|
||||
|
@ -54,18 +55,18 @@ class TestQuaternionOperations:
|
|||
scipy_result = R.from_quat(q1_scipy) * R.from_quat(q2_scipy)
|
||||
scipy_quat = scipy_result.as_quat()
|
||||
our_quat = quat_mul(q1, q2)
|
||||
assert quaternions_close(our_quat, th.from_numpy(scipy_quat.astype(np.float32)))
|
||||
assert quaternions_close(our_quat, th.from_numpy(scipy_quat.astype(NumpyTypes.FLOAT32)))
|
||||
|
||||
def test_quat_conjugate(self):
|
||||
q = random_quaternion().squeeze()
|
||||
q_scipy = q.cpu().numpy()
|
||||
scipy_conj = R.from_quat(q_scipy).inv().as_quat()
|
||||
our_conj = quat_conjugate(q)
|
||||
assert quaternions_close(our_conj, th.from_numpy(scipy_conj.astype(np.float32)))
|
||||
assert quaternions_close(our_conj, th.from_numpy(scipy_conj.astype(NumpyTypes.FLOAT32)))
|
||||
|
||||
def test_quat_inverse(self):
|
||||
q = random_quaternion().squeeze()
|
||||
scipy_inv = R.from_quat(q.cpu().numpy()).inv().as_quat().astype(np.float32)
|
||||
scipy_inv = R.from_quat(q.cpu().numpy()).inv().as_quat().astype(NumpyTypes.FLOAT32)
|
||||
our_inv = quat_inverse(q)
|
||||
assert quaternions_close(our_inv, th.from_numpy(scipy_inv))
|
||||
q_identity = quat_mul(q, our_inv)
|
||||
|
@ -76,7 +77,7 @@ class TestQuaternionOperations:
|
|||
# r1 = R.from_quat(q1.cpu().numpy())
|
||||
# r2 = R.from_quat(q2.cpu().numpy())
|
||||
# r_diff = r1.inv() * r2
|
||||
# scipy_dist = r_diff.as_quat().astype(np.float32)
|
||||
# scipy_dist = r_diff.as_quat().astype(NumpyTypes.FLOAT32)
|
||||
# our_dist = quat_distance(q1, q2)
|
||||
# assert quaternions_close(our_dist, th.from_numpy(scipy_dist))
|
||||
# assert our_dist.shape == (4,)
|
||||
|
@ -103,16 +104,16 @@ class TestMatrixOperations:
|
|||
def test_rotation_matrix_properties(self):
|
||||
rand_quat = random_quaternion().squeeze()
|
||||
R_mat = quat2mat(rand_quat)
|
||||
scipy_R = R.from_quat(rand_quat.cpu().numpy()).as_matrix().astype(np.float32)
|
||||
scipy_R = R.from_quat(rand_quat.cpu().numpy()).as_matrix().astype(NumpyTypes.FLOAT32)
|
||||
assert_close(R_mat, th.from_numpy(scipy_R))
|
||||
assert_close(th.matmul(R_mat, R_mat.t()), th.eye(3))
|
||||
assert_close(th.det(R_mat), th.tensor(1.0))
|
||||
|
||||
@pytest.mark.parametrize("angle", [0, np.pi / 4, np.pi / 2, np.pi])
|
||||
@pytest.mark.parametrize("angle", [0, math.pi / 4, math.pi / 2, math.pi])
|
||||
def test_rotation_matrix(self, angle):
|
||||
direction = normalize(random_vector())
|
||||
R_mat = rotation_matrix(angle, direction)
|
||||
scipy_R = R.from_rotvec(angle * direction.cpu().numpy()).as_matrix().astype(np.float32)
|
||||
scipy_R = R.from_rotvec(angle * direction.cpu().numpy()).as_matrix().astype(NumpyTypes.FLOAT32)
|
||||
assert_close(R_mat, th.from_numpy(scipy_R))
|
||||
|
||||
identity = th.eye(3, dtype=R_mat.dtype, device=R_mat.device)
|
||||
|
@ -133,15 +134,15 @@ class TestMatrixOperations:
|
|||
cos_angle = th.dot(perpendicular, rotated_perpendicular)
|
||||
assert_close(cos_angle, th.cos(th.tensor(angle)))
|
||||
|
||||
@pytest.mark.parametrize("angle", [0, np.pi / 4, np.pi / 2, np.pi])
|
||||
@pytest.mark.parametrize("angle", [0, math.pi / 4, math.pi / 2, math.pi])
|
||||
def test_transformation_matrix(self, angle):
|
||||
direction = normalize(random_vector())
|
||||
point = th.randn(3, dtype=th.float32)
|
||||
T = transformation_matrix(angle, direction, point)
|
||||
|
||||
direction_np = direction.cpu().numpy()
|
||||
scipy_R = R.from_rotvec(angle * direction_np).as_matrix().astype(np.float32)
|
||||
scipy_T = np.eye(4, dtype=np.float32)
|
||||
scipy_R = R.from_rotvec(angle * direction_np).as_matrix().astype(NumpyTypes.FLOAT32)
|
||||
scipy_T = np.eye(4, dtype=NumpyTypes.FLOAT32)
|
||||
scipy_T[:3, :3] = scipy_R
|
||||
scipy_T[:3, 3] = point.cpu().numpy() - np.dot(scipy_R, point.cpu().numpy())
|
||||
assert_close(T, th.from_numpy(scipy_T))
|
||||
|
@ -155,11 +156,11 @@ class TestMatrixOperations:
|
|||
|
||||
def test_transformation_matrix_no_point(self):
|
||||
direction = normalize(random_vector())
|
||||
angle = np.pi / 4
|
||||
angle = math.pi / 4
|
||||
T = transformation_matrix(angle, direction)
|
||||
|
||||
scipy_R = R.from_rotvec(angle * direction.cpu().numpy()).as_matrix().astype(np.float32)
|
||||
scipy_T = np.eye(4, dtype=np.float32)
|
||||
scipy_R = R.from_rotvec(angle * direction.cpu().numpy()).as_matrix().astype(NumpyTypes.FLOAT32)
|
||||
scipy_T = np.eye(4, dtype=NumpyTypes.FLOAT32)
|
||||
scipy_T[:3, :3] = scipy_R
|
||||
assert_close(T, th.from_numpy(scipy_T))
|
||||
|
||||
|
@ -170,7 +171,7 @@ class TestMatrixOperations:
|
|||
def test_matrix_inverse(self):
|
||||
M = random_matrix()
|
||||
M_inv = matrix_inverse(M)
|
||||
scipy_M_inv = np.linalg.inv(M.cpu().numpy()).astype(np.float32)
|
||||
scipy_M_inv = np.linalg.inv(M.cpu().numpy()).astype(NumpyTypes.FLOAT32)
|
||||
assert_close(M_inv, th.from_numpy(scipy_M_inv), atol=1e-3, rtol=1e-3)
|
||||
assert_close(th.matmul(M, M_inv), th.eye(3))
|
||||
|
||||
|
@ -190,7 +191,7 @@ class TestPoseTransformations:
|
|||
T = pose2mat((pos, orn))
|
||||
|
||||
scipy_R = R.from_quat(orn.cpu().numpy())
|
||||
scipy_T = np.eye(4, dtype=np.float32)
|
||||
scipy_T = np.eye(4, dtype=NumpyTypes.FLOAT32)
|
||||
scipy_T[:3, :3] = scipy_R.as_matrix()
|
||||
scipy_T[:3, 3] = pos.cpu().numpy()
|
||||
|
||||
|
@ -206,7 +207,7 @@ class TestPoseTransformations:
|
|||
T_inv = pose_inv(T)
|
||||
|
||||
scipy_R = R.from_quat(orn.cpu().numpy())
|
||||
scipy_T = np.eye(4, dtype=np.float32)
|
||||
scipy_T = np.eye(4, dtype=NumpyTypes.FLOAT32)
|
||||
scipy_T[:3, :3] = scipy_R.as_matrix()
|
||||
scipy_T[:3, 3] = pos.cpu().numpy()
|
||||
scipy_T_inv = np.linalg.inv(scipy_T)
|
||||
|
@ -224,24 +225,24 @@ class TestPoseTransformations:
|
|||
scipy_rel_R = scipy_R0.inv() * scipy_R1
|
||||
scipy_rel_pos = scipy_R0.inv().apply(pos1.cpu().numpy() - pos0.cpu().numpy())
|
||||
|
||||
assert_close(rel_pos, th.from_numpy(scipy_rel_pos.astype(np.float32)))
|
||||
assert quaternions_close(rel_orn, th.from_numpy(scipy_rel_R.as_quat().astype(np.float32)))
|
||||
assert_close(rel_pos, th.from_numpy(scipy_rel_pos.astype(NumpyTypes.FLOAT32)))
|
||||
assert quaternions_close(rel_orn, th.from_numpy(scipy_rel_R.as_quat().astype(NumpyTypes.FLOAT32)))
|
||||
|
||||
|
||||
class TestAxisAngleConversions:
|
||||
@pytest.mark.parametrize("angle", [0.0, np.pi / 4, np.pi / 2, np.pi])
|
||||
@pytest.mark.parametrize("angle", [0.0, math.pi / 4, math.pi / 2, math.pi])
|
||||
def test_axisangle2quat_and_quat2axisangle(self, angle):
|
||||
axis = normalize(random_vector())
|
||||
axisangle = axis * angle
|
||||
quat = axisangle2quat(axisangle)
|
||||
|
||||
scipy_R = R.from_rotvec(axisangle.cpu().numpy())
|
||||
scipy_quat = scipy_R.as_quat().astype(np.float32)
|
||||
scipy_quat = scipy_R.as_quat().astype(NumpyTypes.FLOAT32)
|
||||
|
||||
assert quaternions_close(quat, th.from_numpy(scipy_quat))
|
||||
|
||||
recovered_axisangle = quat2axisangle(quat)
|
||||
scipy_recovered_axisangle = scipy_R.as_rotvec().astype(np.float32)
|
||||
scipy_recovered_axisangle = scipy_R.as_rotvec().astype(NumpyTypes.FLOAT32)
|
||||
|
||||
assert th.allclose(recovered_axisangle, th.from_numpy(scipy_recovered_axisangle)) or th.allclose(
|
||||
recovered_axisangle, -th.from_numpy(scipy_recovered_axisangle)
|
||||
|
@ -253,7 +254,7 @@ class TestAxisAngleConversions:
|
|||
axisangle = vecs2axisangle(vec1, vec2)
|
||||
|
||||
scipy_R = R.align_vectors(vec2.unsqueeze(0).cpu().numpy(), vec1.unsqueeze(0).cpu().numpy())[0]
|
||||
scipy_axisangle = scipy_R.as_rotvec().astype(np.float32)
|
||||
scipy_axisangle = scipy_R.as_rotvec().astype(NumpyTypes.FLOAT32)
|
||||
|
||||
assert_close(axisangle, th.from_numpy(scipy_axisangle))
|
||||
|
||||
|
@ -263,7 +264,7 @@ class TestAxisAngleConversions:
|
|||
quat = vecs2quat(vec1, vec2)
|
||||
|
||||
scipy_R = R.align_vectors(vec2.unsqueeze(0).cpu().numpy(), vec1.unsqueeze(0).cpu().numpy())[0]
|
||||
scipy_quat = scipy_R.as_quat().astype(np.float32)
|
||||
scipy_quat = scipy_R.as_quat().astype(NumpyTypes.FLOAT32)
|
||||
|
||||
assert quaternions_close(quat, th.from_numpy(scipy_quat))
|
||||
|
||||
|
@ -273,34 +274,34 @@ class TestEulerAngleConversions:
|
|||
"euler",
|
||||
[
|
||||
th.tensor([0.0, 0.0, 0.0]),
|
||||
th.tensor([np.pi / 4, np.pi / 3, np.pi / 2]),
|
||||
th.tensor([math.pi / 4, math.pi / 3, math.pi / 2]),
|
||||
],
|
||||
)
|
||||
def test_euler2quat_and_quat2euler(self, euler):
|
||||
quat = euler2quat(euler)
|
||||
scipy_R = R.from_euler("xyz", euler.cpu().numpy())
|
||||
scipy_quat = scipy_R.as_quat().astype(np.float32)
|
||||
scipy_quat = scipy_R.as_quat().astype(NumpyTypes.FLOAT32)
|
||||
assert quaternions_close(quat, th.from_numpy(scipy_quat))
|
||||
|
||||
recovered_euler = quat2euler(quat)
|
||||
scipy_recovered_euler = scipy_R.as_euler("xyz").astype(np.float32)
|
||||
scipy_recovered_euler = scipy_R.as_euler("xyz").astype(NumpyTypes.FLOAT32)
|
||||
assert_close(recovered_euler, th.from_numpy(scipy_recovered_euler))
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"euler",
|
||||
[
|
||||
th.tensor([0.0, 0.0, 0.0]),
|
||||
th.tensor([np.pi / 4, np.pi / 3, np.pi / 2]),
|
||||
th.tensor([math.pi / 4, math.pi / 3, math.pi / 2]),
|
||||
],
|
||||
)
|
||||
def test_euler2mat_and_mat2euler(self, euler):
|
||||
mat = euler2mat(euler)
|
||||
scipy_R = R.from_euler("xyz", euler.cpu().numpy())
|
||||
scipy_mat = scipy_R.as_matrix().astype(np.float32)
|
||||
scipy_mat = scipy_R.as_matrix().astype(NumpyTypes.FLOAT32)
|
||||
assert_close(mat, th.from_numpy(scipy_mat))
|
||||
|
||||
recovered_euler = mat2euler(mat)
|
||||
scipy_recovered_euler = scipy_R.as_euler("xyz").astype(np.float32)
|
||||
scipy_recovered_euler = scipy_R.as_euler("xyz").astype(NumpyTypes.FLOAT32)
|
||||
assert_close(recovered_euler, th.from_numpy(scipy_recovered_euler))
|
||||
|
||||
|
||||
|
@ -311,7 +312,7 @@ class TestQuaternionApplications:
|
|||
rotated_vec = quat_apply(quat, vec)
|
||||
|
||||
scipy_R = R.from_quat(quat.cpu().numpy())
|
||||
scipy_rotated_vec = scipy_R.apply(vec.cpu().numpy()).astype(np.float32)
|
||||
scipy_rotated_vec = scipy_R.apply(vec.cpu().numpy()).astype(NumpyTypes.FLOAT32)
|
||||
|
||||
assert rotated_vec.shape == (3,)
|
||||
assert_close(rotated_vec, th.from_numpy(scipy_rotated_vec))
|
||||
|
@ -325,7 +326,7 @@ class TestQuaternionApplications:
|
|||
key_rots = R.from_quat(np.stack([q1.cpu().numpy(), q2.cpu().numpy()]))
|
||||
key_times = [0, 1]
|
||||
slerp = Slerp(key_times, key_rots)
|
||||
scipy_q_slerp = slerp([t]).as_quat()[0].astype(np.float32)
|
||||
scipy_q_slerp = slerp([t]).as_quat()[0].astype(NumpyTypes.FLOAT32)
|
||||
|
||||
assert quaternions_close(q_slerp, th.from_numpy(scipy_q_slerp))
|
||||
assert_close(th.norm(q_slerp), th.tensor(1.0))
|
||||
|
|
Loading…
Reference in New Issue