Refactor panorama and lidar scan (#422)

* clean up panorama code

* minor fix

* fix lidar scanner bug

* add option to use robot camera
This commit is contained in:
Fei Xia 2021-08-05 10:27:19 -07:00 committed by GitHub
parent f32bf3b27a
commit 8d503e2d12
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 159 additions and 25 deletions

View File

@ -1,6 +1,11 @@
import os
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import igibson
from igibson.objects.ycb_object import YCBObject
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.render.profiler import Profiler
from igibson.robots.turtlebot_robot import Turtlebot
@ -12,20 +17,24 @@ from igibson.utils.utils import parse_config
def main():
config = parse_config(os.path.join(igibson.example_config_path, "turtlebot_demo.yaml"))
settings = MeshRendererSettings()
s = Simulator(mode="gui", image_width=256, image_height=256, rendering_settings=settings)
s = Simulator(mode="headless", image_width=256, image_height=256, rendering_settings=settings)
scene = StaticIndoorScene("Rs", build_graph=True, pybullet_load_texture=True)
s.import_scene(scene)
turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
for i in range(10000):
with Profiler("Simulator step"):
turtlebot.apply_action([0.1, -0.1])
s.step()
lidar = s.renderer.get_lidar_all()
print(lidar.shape)
# TODO: visualize lidar scan
turtlebot.apply_action([0.1, -0.1])
s.step()
lidar = s.renderer.get_lidar_all()
print(lidar.shape)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(lidar[:, 0], lidar[:, 1], lidar[:, 2], s=3)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
s.disconnect()

View File

@ -0,0 +1,45 @@
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from igibson.utils.assets_utils import get_scene_path
def main():
global _mouse_ix, _mouse_iy, down, view_direction
if len(sys.argv) > 1:
model_path = sys.argv[1]
else:
model_path = os.path.join(get_scene_path("Rs"), "mesh_z_up.obj")
settings = MeshRendererSettings(enable_pbr=False)
renderer = MeshRenderer(width=512, height=512, rendering_settings=settings)
renderer.load_object(model_path)
renderer.add_instance(0)
print(renderer.visual_objects, renderer.instances)
print(renderer.materials_mapping, renderer.mesh_materials)
px = 0
py = 0.2
camera_pose = np.array([px, py, 0.5])
view_direction = np.array([0, -1, -0.3])
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
renderer.set_fov(90)
img = renderer.get_equi()
print(img.shape)
plt.imshow(img)
plt.show()
renderer.release()
if __name__ == "__main__":
main()

View File

@ -5,6 +5,7 @@ import shutil
import sys
import numpy as np
import py360convert
from PIL import Image
import igibson
@ -1773,17 +1774,19 @@ class MeshRenderer(object):
lidar_readings = lidar_readings[self.x_samples, self.y_samples, :3]
dist = np.linalg.norm(lidar_readings, axis=1)
lidar_readings = lidar_readings[dist > 0]
lidar_readings[:, 2] = -lidar_readings[:, 2] # make z pointing out
return lidar_readings
def get_lidar_all(self, offset_with_camera=np.array([0, 0, 0])):
"""
Get complete LiDAR readings by patching together partial ones
:param offset_with_camera: optionally place the lidar scanner
with an offset to the camera
:return: complete 360 degree LiDAR readings
"""
for instance in self.instances:
if isinstance(instance, Robot):
camera_pos = instance.robot.eyes.get_position()
camera_pos = instance.robot.eyes.get_position() + offset_with_camera
orn = instance.robot.eyes.get_orientation()
mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3]
view_direction = mat.dot(np.array([1, 0, 0]))
@ -1792,27 +1795,103 @@ class MeshRenderer(object):
original_fov = self.vertical_fov
self.set_fov(90)
lidar_readings = []
view_direction = np.array([1, 0, 0])
r2 = np.array(
[[np.cos(-np.pi / 2), -np.sin(-np.pi / 2), 0], [np.sin(-np.pi / 2), np.cos(-np.pi / 2), 0], [0, 0, 1]]
r = np.array(
[
[
np.cos(-np.pi / 2),
0,
-np.sin(-np.pi / 2),
0,
],
[0, 1, 0, 0],
[np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2), 0],
[0, 0, 0, 1],
]
)
r3 = np.array(
[[np.cos(-np.pi / 2), 0, -np.sin(-np.pi / 2)], [0, 1, 0], [np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2)]]
)
transformatiom_matrix = np.eye(3)
transformation_matrix = np.eye(4)
for i in range(4):
self.set_camera(
np.array(self.camera) + offset_with_camera,
np.array(self.camera) + offset_with_camera + view_direction,
[0, 0, 1],
)
lidar_one_view = self.get_lidar_from_depth()
lidar_readings.append(lidar_one_view.dot(transformatiom_matrix))
view_direction = r2.dot(view_direction)
transformatiom_matrix = r3.dot(transformatiom_matrix)
lidar_readings.append(lidar_one_view.dot(transformation_matrix[:3, :3]))
self.V = r.dot(self.V)
transformation_matrix = np.linalg.inv(r).dot(transformation_matrix)
lidar_readings = np.concatenate(lidar_readings, axis=0)
# currently, the lidar scan is in camera frame (z forward, x right, y up)
# it seems more intuitive to change it to (z up, x right, y forward)
lidar_readings = lidar_readings.dot(np.array([[1, 0, 0], [0, 0, 1], [0, 1, 0]]))
self.set_fov(original_fov)
return lidar_readings
def get_cube(self, mode="rgb", use_robot_camera=False):
"""
:param mode: simulator rendering mode, 'rgb' or '3d'
:param use_robot_camera: use the camera pose from robot
:return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D] * n_cameras
"""
orig_fov = self.vertical_fov
self.set_fov(90)
org_V = np.copy(self.V)
if use_robot_camera:
for instance in self.instances:
if isinstance(instance, Robot):
camera_pos = instance.robot.eyes.get_position()
orn = instance.robot.eyes.get_orientation()
mat = quat2rotmat(xyzw2wxyz(orn))[:3, :3]
view_direction = mat.dot(np.array([1, 0, 0]))
self.set_camera(camera_pos, camera_pos + view_direction, [0, 0, 1])
def render_cube():
frames = []
r = np.array(
[
[
np.cos(-np.pi / 2),
0,
-np.sin(-np.pi / 2),
0,
],
[0, 1, 0, 0],
[np.sin(-np.pi / 2), 0, np.cos(-np.pi / 2), 0],
[0, 0, 0, 1],
]
)
for i in range(4):
frames.append(self.render(modes=(mode))[0])
self.V = r.dot(self.V)
# Up
r_up = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
self.V = r_up.dot(org_V)
frames.append(self.render(modes=(mode))[0])
r_down = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
# Down
self.V = r_down.dot(org_V)
frames.append(self.render(modes=(mode))[0])
return frames
frames = render_cube()
self.V = org_V
self.set_fov(orig_fov)
return frames
def get_equi(self, mode="rgb", use_robot_camera=False):
"""
:param mode: simulator rendering mode, 'rgb' or '3d'
:param use_robot_camera: use the camera pose from robot
:return: List of sensor readings, normalized to [0.0, 1.0], ordered as [F, R, B, L, U, D]
"""
frames = self.get_cube(mode=mode, use_robot_camera=use_robot_camera)
frames = [frames[0], frames[1][:, ::-1, :], frames[2][:, ::-1, :], frames[3], frames[4], frames[5]]
equi = py360convert.c2e(cubemap=frames, h=frames[0].shape[0], w=frames[0].shape[0] * 2, cube_format="list")
return equi

View File

@ -159,6 +159,7 @@ setup(
"sphinx_rtd_theme",
"h5py",
"gitpython",
"py360convert",
],
ext_modules=[CMakeExtension("MeshRendererContext", sourcedir="igibson/render")],
cmdclass=dict(build_ext=CMakeBuild),