Merged newest PBR code and adapated VR code to fit new directory structure.
This commit is contained in:
commit
19ba7ea306
|
@ -1,4 +1,5 @@
|
|||
*.pyc
|
||||
*.pyd
|
||||
*.pkl
|
||||
*.so
|
||||
*.egg-info/
|
||||
|
@ -34,6 +35,9 @@ physics/models
|
|||
0_VRDemoSettings.txt
|
||||
|
||||
|
||||
# VR
|
||||
# This is the folder where the pyd files get put after setup.py builds gibson
|
||||
Release
|
||||
|
||||
cnn_policy*
|
||||
*ppo_policy*
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
[submodule "gibson2/render/pybind11"]
|
||||
path = gibson2/render/pybind11
|
||||
url = https://github.com/pybind/pybind11.git
|
||||
[submodule "gibson2/render/glfw"]
|
||||
path = gibson2/render/glfw
|
||||
url = https://github.com/glfw/glfw
|
||||
[submodule "gibson2/render/glm"]
|
||||
path = gibson2/render/glm
|
||||
url = https://github.com/g-truc/glm
|
||||
[submodule "gibson2/render/openvr"]
|
||||
path = gibson2/render/openvr
|
||||
url = https://github.com/ValveSoftware/openvr
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
# Gibson in VR
|
||||
|
||||
Instructions for installing Gibson and VR integration on Windows 10.
|
||||
Assuming a fresh install of Windows.
|
||||
|
||||
These instructions partially overlap with installing Gibson
|
||||
http://svl.stanford.edu/gibson2/docs/installation.html#installation-method
|
||||
but are tailored to run the VR components in Windows.
|
||||
|
||||
|
||||
VR Station
|
||||
==========
|
||||
|
||||
Install Steam and Steam VR, connect VR headset and base stations, set up VR room
|
||||
Run steam performance test.
|
||||
|
||||
https://www.vive.com/eu/support/vive/category_howto/setting-up-for-the-first-time.html
|
||||
|
||||
|
||||
Dependencies and environment:
|
||||
=============================
|
||||
|
||||
* git
|
||||
https://git-scm.com/download/win
|
||||
|
||||
* Python
|
||||
https://www.python.org/downloads/release/python-377/
|
||||
|
||||
* Anaconda
|
||||
https://www.anaconda.com/distribution/#download-section
|
||||
|
||||
Make sure anaconda is added to the PATH as follows:
|
||||
C:\Users\C\anaconda3
|
||||
C:\Users\C\anaconda3\Scripts
|
||||
C:\Users\C\anaconda3\Library\bin
|
||||
|
||||
Lack of the latter produced the following error:
|
||||
HTTP 000 CONNECTION FAILED for url <https://repo.anaconda.com/pkgs/main/win-64/current_repodata.json> Elapsed
|
||||
|
||||
* Build Tools for Visual Studio:
|
||||
Microsoft Visual C++ 14.0 is required. Get it with "Build Tools for Visual Studio":
|
||||
https://visualstudio.microsoft.com/downloads/
|
||||
This is needed for bullet
|
||||
|
||||
* cmake:
|
||||
https://cmake.org/download/
|
||||
Needed for building MeshRendererContext and Pybind.
|
||||
|
||||
* SRAnipal (needed for eye tracking):
|
||||
https://hub.vive.com/en-US/download
|
||||
|
||||
Download the VIVE_SRanipalInstaller msi file and install SRAnipal.
|
||||
|
||||
Gibson
|
||||
======
|
||||
|
||||
* Get codebase and assets:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/fxia22/iGibson.git --recursive
|
||||
$ cd iGibson
|
||||
$ git checkout vr
|
||||
$ git submodule update --recursive
|
||||
```
|
||||
|
||||
After this you should have content at:
|
||||
core/render/glfw
|
||||
core/render/pybind11
|
||||
core/render/openvr
|
||||
|
||||
Download Gibson assets and copy to iGibson/gibson2/assets/
|
||||
Download enviroments (scenes) and copy to iGibson/gibson2/assets/dataset
|
||||
|
||||
* Create anaconda env:
|
||||
|
||||
```
|
||||
$ conda create -n gibsonvr python=3.6
|
||||
```
|
||||
Activate conda env:
|
||||
```
|
||||
$ source activate gibsonvr
|
||||
```
|
||||
|
||||
* Install Gibson in anaconda env:
|
||||
```
|
||||
$ cd iGibson
|
||||
```
|
||||
- If you followed the instructions, iGibson is at the vr branch
|
||||
```
|
||||
$ pip install -e .
|
||||
```
|
||||
|
||||
Should end printing 'Successfully installed gibson2'
|
||||
|
||||
* Copy dlls into the mesh_renderer/Release folder:
|
||||
Copy and paste the following dll files into iGibson/gibson2/core/render/mesh_renderer/Release:
|
||||
1) openvr_api.dll in render/openvr/bin/win64
|
||||
2) All the dll files in render/sranipal/bin
|
||||
|
||||
You can find all the VR demos iGibson/examples/demo
|
||||
|
||||
Run:
|
||||
|
||||
$ python vr_demo_hand.py (for a scene with an interactive hand)
|
||||
|
||||
or
|
||||
|
||||
$ python vr_demo_rs.py (for the current state-of-the-art Gibson graphics)
|
||||
|
||||
To see the features of the VR software.
|
||||
|
||||
To use the gripper asset featured in the interaction demos, please download the 'gripper' folder at this link: https://drive.google.com/drive/folders/1-lHTtUuEgs9zzcievvvVdjHP0BdN7Du4?usp=sharing, and put it in assets/models (wherever your assets folder is).
|
||||
To use the VR hand asset, please download and unzip the asset and put it into assets/models under the folder name 'vr_hand'.
|
||||
Link to VR hand: https://drive.google.com/file/d/117qb1r_YHHVdQuwLD83N_nd0la57j9hZ/view?usp=sharing
|
||||
|
||||
Have fun in VR!
|
||||
|
||||
Helpful tips:
|
||||
Press ESCAPE to force the fullscreen rendering window to close during program execution.
|
||||
Before using SRAnipal eye tracking, you may want to re-calibrate the eye tracker. Please go to the Vive system settings to perform this calibration.
|
|
@ -0,0 +1,53 @@
|
|||
# Replays the data stored in vr_demo_save.py
|
||||
import pybullet as p
|
||||
import numpy as np
|
||||
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import InteractiveObj, VrHand, YCBObject
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path
|
||||
from gibson2.utils.vr_logging import VRLogReader
|
||||
|
||||
model_path = assets_path + '\\models'
|
||||
sample_urdf_folder = model_path + '\\sample_urdfs\\'
|
||||
|
||||
optimize = True
|
||||
|
||||
# Note: the set-up code is all the same as in vr_demo_save
|
||||
# Make sure to set VR mode to false when doing data replay!
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize, vrMode=False)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
rHand = VrHand(start_pos=[0.0, 0.5, 1.5], leftHand=False, replayMode=True)
|
||||
s.import_articulated_object(rHand)
|
||||
|
||||
# Heavy mustard
|
||||
heavy_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(heavy_bottle)
|
||||
heavy_bottle.set_position([1, -0.4, 1])
|
||||
p.changeDynamics(heavy_bottle.body_id, -1, mass=500)
|
||||
|
||||
# Light mustard
|
||||
light_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(light_bottle)
|
||||
light_bottle.set_position([1, -0.6, 1])
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# TODO: Remove this later!
|
||||
camera_pose = np.array([0, 0, 1.2])
|
||||
view_direction = np.array([1, 0, 0])
|
||||
s.renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
|
||||
s.renderer.set_fov(90)
|
||||
|
||||
vr_log_path = 'vr_logs/vr_demo_save.h5'
|
||||
vr_reader = VRLogReader(log_filepath=vr_log_path, playback_fps=90)
|
||||
|
||||
while vr_reader.get_data_left_to_read():
|
||||
vr_reader.read_frame(s)
|
||||
|
||||
# Call step to render the frame data that has just been read
|
||||
s.step(shouldPrintTime=False)
|
|
@ -0,0 +1,66 @@
|
|||
# Stores PyBullet and VR data using the hdf5 file format
|
||||
import pybullet as p
|
||||
import numpy as np
|
||||
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import InteractiveObj, VrHand, YCBObject
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path
|
||||
from gibson2.utils.vr_logging import VRLogWriter
|
||||
|
||||
model_path = assets_path + '\\models'
|
||||
|
||||
optimize = True
|
||||
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
rHand = VrHand(start_pos=[0.0, 0.5, 1.5], leftHand=False)
|
||||
s.import_articulated_object(rHand)
|
||||
|
||||
# Heavy mustard
|
||||
heavy_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(heavy_bottle)
|
||||
heavy_bottle.set_position([1, -0.4, 1])
|
||||
p.changeDynamics(heavy_bottle.body_id, -1, mass=500)
|
||||
|
||||
# Light mustard
|
||||
light_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(light_bottle)
|
||||
light_bottle.set_position([1, -0.6, 1])
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
s.setVROffset([1.0, 0, -0.4])
|
||||
|
||||
# The VRLogWriter has a simple interface:
|
||||
# all we have to do is initialize it
|
||||
# and then call process_frame at the end of
|
||||
# each frame to record data.
|
||||
# Data is automatically flushed every
|
||||
# frames_before_write frames to hdf5.
|
||||
vr_log_path = 'vr_logs/vr_demo_save.h5'
|
||||
# Saves every 2 seconds or so
|
||||
vr_writer = VRLogWriter(frames_before_write=200, log_filepath=vr_log_path, profiling_mode=True)
|
||||
|
||||
# 2000 frames corresponds to approximately 20-30 seconds of data collection
|
||||
for i in range(2000):
|
||||
s.step(shouldPrintTime=False)
|
||||
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
rTrig, rTouchX, rTouchY = s.getButtonDataForController('right_controller')
|
||||
|
||||
if rIsValid:
|
||||
rHand.move_hand(rTrans, rRot)
|
||||
rHand.toggle_finger_state(rTrig)
|
||||
|
||||
vr_writer.process_frame(s)
|
||||
|
||||
# Note: always call this after the simulation is over to close the log file
|
||||
# and clean up resources used.
|
||||
vr_writer.end_log_session()
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,104 @@
|
|||
import cv2
|
||||
import sys
|
||||
import os
|
||||
import numpy as np
|
||||
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
|
||||
from gibson2.core.render.profiler import Profiler
|
||||
from gibson2.utils.assets_utils import get_model_path
|
||||
import gibson2
|
||||
import time
|
||||
|
||||
def main():
|
||||
global _mouse_ix, _mouse_iy, down, view_direction
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
model_path = sys.argv[1]
|
||||
else:
|
||||
model_path = os.path.join(get_model_path('Rs'), 'mesh_z_up.obj')
|
||||
|
||||
for interval in [3,2,1,0.5,0.4,0.3]:
|
||||
renderer = MeshRenderer(width=512, height=512, optimize=False)
|
||||
renderer.load_object(model_path)
|
||||
renderer.add_instance(0)
|
||||
|
||||
renderer.load_object(os.path.join(gibson2.assets_path, 'models/ycb/002_master_chef_can/textured_simple.obj'))
|
||||
for i in np.arange(-2,2,interval):
|
||||
for j in np.arange(-2,2,interval):
|
||||
renderer.add_instance(1)
|
||||
renderer.instances[-1].set_position([i,j,0.5])
|
||||
|
||||
|
||||
|
||||
renderer.load_object(os.path.join(gibson2.assets_path, 'models/ycb/003_cracker_box/textured_simple.obj'))
|
||||
for i in np.arange(-2,2,interval):
|
||||
for j in np.arange(-2,2,interval):
|
||||
renderer.add_instance(2)
|
||||
renderer.instances[-1].set_position([i,j,0.8])
|
||||
|
||||
if renderer.optimize:
|
||||
renderer.optimize_vertex_and_texture()
|
||||
|
||||
print(renderer.visual_objects, renderer.instances)
|
||||
print(renderer.materials_mapping, renderer.mesh_materials)
|
||||
camera_pose = np.array([0, 0, 1.2])
|
||||
view_direction = np.array([1, 0, 0])
|
||||
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
|
||||
renderer.set_fov(90)
|
||||
|
||||
# px = 0
|
||||
# py = 0
|
||||
|
||||
# _mouse_ix, _mouse_iy = -1, -1
|
||||
# down = False
|
||||
|
||||
# def change_dir(event, x, y, flags, param):
|
||||
# global _mouse_ix, _mouse_iy, down, view_direction
|
||||
# if event == cv2.EVENT_LBUTTONDOWN:
|
||||
# _mouse_ix, _mouse_iy = x, y
|
||||
# down = True
|
||||
# if event == cv2.EVENT_MOUSEMOVE:
|
||||
# if down:
|
||||
# dx = (x - _mouse_ix) / 100.0
|
||||
# dy = (y - _mouse_iy) / 100.0
|
||||
# _mouse_ix = x
|
||||
# _mouse_iy = y
|
||||
# r1 = np.array([[np.cos(dy), 0, np.sin(dy)], [0, 1, 0], [-np.sin(dy), 0, np.cos(dy)]])
|
||||
# r2 = np.array([[np.cos(-dx), -np.sin(-dx), 0], [np.sin(-dx), np.cos(-dx), 0], [0, 0, 1]])
|
||||
# view_direction = r1.dot(r2).dot(view_direction)
|
||||
# elif event == cv2.EVENT_LBUTTONUP:
|
||||
# down = False
|
||||
|
||||
# cv2.namedWindow('test')
|
||||
# cv2.setMouseCallback('test', change_dir)
|
||||
|
||||
# while True:
|
||||
# with Profiler('Render'):
|
||||
# frame = renderer.render(modes=('rgb', 'normal', '3d'))
|
||||
# cv2.imshow('test', cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
|
||||
# q = cv2.waitKey(1)
|
||||
# if q == ord('w'):
|
||||
# px += 0.05
|
||||
# elif q == ord('s'):
|
||||
# px -= 0.05
|
||||
# elif q == ord('a'):
|
||||
# py += 0.05
|
||||
# elif q == ord('d'):
|
||||
# py -= 0.05
|
||||
# elif q == ord('q'):
|
||||
# break
|
||||
# camera_pose = np.array([px, py, 1.2])
|
||||
# renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
|
||||
|
||||
start = time.time()
|
||||
|
||||
for i in range(100):
|
||||
frame = renderer.render(modes=('rgb'))
|
||||
|
||||
elapsed = time.time() - start
|
||||
print("num objects {} fps {}".format(len(renderer.instances), 100/elapsed))
|
||||
|
||||
renderer.release()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -72,6 +72,14 @@ def main():
|
|||
camera_pose = np.array([px, py, 0.5])
|
||||
renderer.set_camera(camera_pose, camera_pose + view_direction, [0, 0, 1])
|
||||
|
||||
# start = time.time()
|
||||
|
||||
# for i in range(100):
|
||||
# frame = renderer.render(modes=('rgb', 'normal', '3d'))
|
||||
|
||||
# elapsed = time.time() - start
|
||||
# print("num objects {} fps {}".format(len(renderer.instances), 100/elapsed))
|
||||
|
||||
renderer.release()
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
from gibson2.core.physics.robot_locomotors import Turtlebot
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject
|
||||
from gibson2.utils.utils import parse_config
|
||||
import pybullet as p
|
||||
import numpy as np
|
||||
from gibson2.core.render.profiler import Profiler
|
||||
import matplotlib.pyplot as plt
|
||||
import time
|
||||
|
||||
activationState = p.ACTIVATION_STATE_ENABLE_SLEEPING#+p.ACTIVATION_STATE_SLEEP
|
||||
N = 10
|
||||
object_positions = np.random.uniform(low=0, high=2, size=(N,3))
|
||||
|
||||
def test_time(sleep=True):
|
||||
config = parse_config('../configs/turtlebot_demo.yaml')
|
||||
s = Simulator(mode='headless', image_width=512, image_height=512)
|
||||
scene = BuildingScene('Rs',
|
||||
build_graph=True,
|
||||
pybullet_load_texture=True)
|
||||
s.import_scene(scene)
|
||||
#turtlebot = Turtlebot(config)
|
||||
#s.import_robot(turtlebot)
|
||||
#p.changeDynamics(scene.mesh_body_id, -1, activationState=activationState)
|
||||
#p.changeDynamics(scene.floor_body_ids[0], -1, activationState=activationState)
|
||||
|
||||
for i in range(N):
|
||||
obj = YCBObject('003_cracker_box')
|
||||
s.import_object(obj)
|
||||
obj.set_position_orientation(object_positions[i,:], [0,0,0,1])
|
||||
#print(obj.body_id)
|
||||
if sleep:
|
||||
p.changeDynamics(obj.body_id, -1, activationState=activationState)
|
||||
|
||||
|
||||
frame_times = []
|
||||
for i in range(3000):
|
||||
start = time.time()
|
||||
s.step()
|
||||
elapsed = time.time()-start
|
||||
frame_times.append(elapsed)
|
||||
|
||||
s.disconnect()
|
||||
|
||||
return frame_times
|
||||
|
||||
if __name__ == '__main__':
|
||||
frame_times_sleep = test_time(True)
|
||||
#print(1/np.mean(frame_times_sleep))
|
||||
frame_times_no_sleep = test_time(False)
|
||||
|
||||
box_pts = 10
|
||||
box = np.ones(box_pts)/box_pts
|
||||
|
||||
plt.plot(np.convolve(frame_times_sleep, box, mode='same'), label='With sleeping')
|
||||
plt.plot(np.convolve(frame_times_no_sleep, box, mode='same'), label='Without sleeping')
|
||||
plt.legend()
|
||||
plt.show()
|
|
@ -0,0 +1,32 @@
|
|||
from gibson2.core.simulator import Simulator
|
||||
from gibson2.core.physics.scene import BuildingScene, StadiumScene
|
||||
import pybullet as p
|
||||
import numpy as np
|
||||
import os
|
||||
import gibson2
|
||||
import time
|
||||
|
||||
# Simple rendering test for VR without VR sytem (just rendering with GLFW)
|
||||
optimize = True
|
||||
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, optimize_render=optimize)
|
||||
scene = BuildingScene('Bolton', is_interactive=True)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
frame_time_sum = 0
|
||||
n = 1000
|
||||
for i in range(n):
|
||||
start = time.time()
|
||||
s.step()
|
||||
elapsed = time.time() - start
|
||||
frame_time_sum += elapsed
|
||||
|
||||
av_fps = 1/(float(frame_time_sum)/float(n))
|
||||
print("Average fps:")
|
||||
print(av_fps)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,99 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.robot_locomotors import Fetch
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, GripperObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path, dataset_path
|
||||
from gibson2.utils.utils import parse_config
|
||||
|
||||
model_path = assets_path + '\\models\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
configs_folder = '..\\configs\\'
|
||||
fetch_config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
optimize = True
|
||||
# Toggle this to only use renderer without VR, for testing purposes
|
||||
vrMode = True
|
||||
|
||||
# Timestep should always be set to 1/90 to match VR system's 90fps
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize, vrMode=vrMode)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# Fetch robot in scene
|
||||
#fetch = Fetch(fetch_config)
|
||||
#s.import_robot(fetch)
|
||||
#fetch.set_position([0,0,0])
|
||||
#fetch.robot_specific_reset()
|
||||
|
||||
# Grippers represent hands
|
||||
lGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(rGripper)
|
||||
rGripper.set_position([0.0, 0.0, 1.0])
|
||||
|
||||
# Load objects in the environment
|
||||
for i in range(5):
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [1 ,0 - 0.2 * i, 1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Runs simulation
|
||||
while True:
|
||||
start = time.time()
|
||||
|
||||
if vrMode:
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
if deviceType == 'left_controller':
|
||||
if eventType == 'trigger_press':
|
||||
leftGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
leftGripperFraction = 0.0
|
||||
elif deviceType == 'right_controller':
|
||||
if eventType == 'trigger_press':
|
||||
rightGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
s.step(shouldTime=True)
|
||||
|
||||
if vrMode:
|
||||
hmdIsValid, hmdTrans, hmdRot = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
|
||||
if lIsValid:
|
||||
lGripper.move_gripper(lTrans, lRot)
|
||||
lGripper.set_close_fraction(leftGripperFraction)
|
||||
|
||||
if rIsValid:
|
||||
rGripper.move_gripper(rTrans, rRot)
|
||||
rGripper.set_close_fraction(rightGripperFraction)
|
||||
|
||||
elapsed = time.time() - start
|
||||
if (elapsed > 0):
|
||||
curr_fps = 1/elapsed
|
||||
else:
|
||||
curr_fps = 2000
|
||||
|
||||
print("Current fps: %f" % curr_fps)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,102 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.robot_locomotors import Fetch
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, GripperObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path, dataset_path
|
||||
from gibson2.utils.utils import parse_config
|
||||
|
||||
model_path = assets_path + '\\models\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
configs_folder = '..\\configs\\'
|
||||
fetch_config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
optimize = True
|
||||
# Toggle this to only use renderer without VR, for testing purposes
|
||||
vrMode = True
|
||||
|
||||
# Timestep should always be set to 1/90 to match VR system's 90fps
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize, vrMode=vrMode)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# Fetch robot in scene
|
||||
#fetch = Fetch(fetch_config)
|
||||
#s.import_robot(fetch)
|
||||
#fetch.set_position([0,0,0])
|
||||
#fetch.robot_specific_reset()
|
||||
|
||||
# Grippers represent hands
|
||||
lGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(rGripper)
|
||||
rGripper.set_position([0.0, 0.0, 1.0])
|
||||
|
||||
# Load objects in the environment
|
||||
for i in range(5):
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [1 ,0 - 0.2 * i, 1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Set VR system transform
|
||||
s.setVROffset([0,1.0,-1.0])
|
||||
|
||||
# Runs simulation
|
||||
while True:
|
||||
start = time.time()
|
||||
|
||||
if vrMode:
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
if deviceType == 'left_controller':
|
||||
if eventType == 'trigger_press':
|
||||
leftGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
leftGripperFraction = 0.0
|
||||
elif deviceType == 'right_controller':
|
||||
if eventType == 'trigger_press':
|
||||
rightGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
s.step(shouldTime=True)
|
||||
|
||||
if vrMode:
|
||||
hmdIsValid, hmdTrans, hmdRot = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
|
||||
if lIsValid:
|
||||
lGripper.move_gripper(lTrans, lRot)
|
||||
lGripper.set_close_fraction(leftGripperFraction)
|
||||
|
||||
if rIsValid:
|
||||
rGripper.move_gripper(rTrans, rRot)
|
||||
rGripper.set_close_fraction(rightGripperFraction)
|
||||
|
||||
elapsed = time.time() - start
|
||||
if (elapsed > 0):
|
||||
curr_fps = 1/elapsed
|
||||
else:
|
||||
curr_fps = 2000
|
||||
|
||||
print("Current fps: %f" % curr_fps)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,96 @@
|
|||
import pybullet as p
|
||||
import yaml
|
||||
from gibson2.core.physics.robot_locomotors import Turtlebot, Fetch
|
||||
from gibson2.core.physics.scene import StadiumScene, BuildingScene, EmptyScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, BuildingObj, GripperObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2.utils.utils import parse_config
|
||||
from gibson2 import assets_path, dataset_path
|
||||
import numpy as np
|
||||
import os
|
||||
import pybullet_data
|
||||
|
||||
configs_folder = '..\\configs\\'
|
||||
ohopee_path = dataset_path + '\\Ohoopee\\Ohoopee_mesh_texture.obj'
|
||||
model_path = assets_path + '\\models\\'
|
||||
bullet_obj_folder = model_path + '\\bullet_models\\data\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
sample_urdf_folder = model_path + '\\sample_urdfs\\'
|
||||
config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
s = Simulator(mode='vr')
|
||||
p.setGravity(0,0,-9.81)
|
||||
|
||||
# Import Ohoopee manually for simple demo
|
||||
building = BuildingObj(ohopee_path)
|
||||
s.import_object(building)
|
||||
|
||||
# Grippers represent hands
|
||||
lGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(rGripper)
|
||||
rGripper.set_position([0.0, 0.0, 1.0])
|
||||
|
||||
# Load objects in the environment
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [-0.3,-1.0,1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
bottle2 = YCBObject('006_mustard_bottle')
|
||||
|
||||
s.import_object(bottle2)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle2.body_id)
|
||||
bottle_pos = [-0.4,-1.0,1]
|
||||
p.resetBasePositionAndOrientation(bottle2.body_id, bottle_pos, org_orn)
|
||||
|
||||
can = YCBObject('002_master_chef_can')
|
||||
s.import_object(can)
|
||||
_, org_orn = p.getBasePositionAndOrientation(can.body_id)
|
||||
can_pos = [-0.5,-1.0,1]
|
||||
p.resetBasePositionAndOrientation(can.body_id, can_pos, org_orn)
|
||||
|
||||
basket = InteractiveObj(sample_urdf_folder + 'object_2eZY2JqYPQE.urdf')
|
||||
s.import_object(basket)
|
||||
basket.set_position([-0.8,0.65,1])
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
# Runs simulation
|
||||
while True:
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
if deviceType == 'left_controller':
|
||||
if eventType == 'trigger_press':
|
||||
leftGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
leftGripperFraction = 0.0
|
||||
elif deviceType == 'right_controller':
|
||||
if eventType == 'trigger_press':
|
||||
rightGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
s.step()
|
||||
|
||||
hmdIsValid, hmdTrans, hmdRot, _ = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot, _ = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot, _ = s.getDataForVRDevice('right_controller')
|
||||
|
||||
if lIsValid:
|
||||
p.changeConstraint(lGripper.cid, lTrans, lRot, maxForce=500)
|
||||
lGripper.set_close_fraction(leftGripperFraction)
|
||||
|
||||
if rIsValid:
|
||||
p.changeConstraint(rGripper.cid, rTrans, rRot, maxForce=500)
|
||||
rGripper.set_close_fraction(rightGripperFraction)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,59 @@
|
|||
import pybullet as p
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import GripperObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path
|
||||
|
||||
configs_folder = '..\\configs\\'
|
||||
model_path = assets_path + '\\models'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
|
||||
s = Simulator(mode='vr', vrMsaa=True)
|
||||
|
||||
rs = BuildingScene('Rs_interactive', build_graph=False, is_interactive=True)
|
||||
s.import_scene(rs)
|
||||
|
||||
# Grippers represent hands
|
||||
lGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(rGripper)
|
||||
rGripper.set_position([0.0, 0.0, 1.0])
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
# Runs simulation
|
||||
while True:
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
if deviceType == 'left_controller':
|
||||
if eventType == 'trigger_press':
|
||||
leftGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
leftGripperFraction = 0.0
|
||||
elif deviceType == 'right_controller':
|
||||
if eventType == 'trigger_press':
|
||||
rightGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
s.step()
|
||||
|
||||
hmdIsValid, hmdTrans, hmdRot, _ = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot, _ = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot, _ = s.getDataForVRDevice('right_controller')
|
||||
|
||||
if lIsValid:
|
||||
p.changeConstraint(lGripper.cid, lTrans, lRot, maxForce=500)
|
||||
lGripper.set_close_fraction(leftGripperFraction)
|
||||
|
||||
if rIsValid:
|
||||
p.changeConstraint(rGripper.cid, rTrans, rRot, maxForce=500)
|
||||
rGripper.set_close_fraction(rightGripperFraction)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,105 @@
|
|||
import pybullet as p
|
||||
import yaml
|
||||
from gibson2.core.physics.robot_locomotors import Turtlebot, Fetch
|
||||
from gibson2.core.physics.scene import StadiumScene, BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, GripperObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2.utils.utils import parse_config
|
||||
from gibson2 import assets_path
|
||||
import numpy as np
|
||||
|
||||
configs_folder = '..\\configs\\'
|
||||
model_path = assets_path + '\\models'
|
||||
bullet_obj_folder = model_path + '\\bullet_models\\data\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
sample_urdf_folder = model_path + '\\sample_urdfs\\'
|
||||
config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
# Utility function for multiplying quaternions stored as lists
|
||||
def multQuatLists(q0, q1):
|
||||
x0, y0, z0, w0 = q0
|
||||
x1, y1, z1, w1 = q1
|
||||
|
||||
x = x0*w1 + y0*z1 - z0*y1 + w0*x1
|
||||
y = -x0*z1 + y0*w1 + z0*x1 + w0*y1
|
||||
z = x0*y1 - y0*x1 + z0*w1 + w0*z1
|
||||
w = -x0*x1 - y0*y1 - z0*z1 + w0*w1
|
||||
|
||||
return [x,y,z,w]
|
||||
|
||||
s = Simulator(mode='vr')
|
||||
scene = StadiumScene()
|
||||
s.import_scene(scene)
|
||||
|
||||
fetch = Fetch(config)
|
||||
s.import_robot(fetch)
|
||||
fetch.set_position([0,1,0])
|
||||
|
||||
lGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj(gripper_folder + 'gripper.urdf')
|
||||
s.import_articulated_object(rGripper)
|
||||
|
||||
# Gravity set to 0 to show off interaction
|
||||
p.setGravity(0,0,0)
|
||||
|
||||
# Load objects in the environment
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [0.7,0,1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
can = YCBObject('002_master_chef_can')
|
||||
s.import_object(can)
|
||||
_, org_orn = p.getBasePositionAndOrientation(can.body_id)
|
||||
can_pos = [0,-0.7,1]
|
||||
p.resetBasePositionAndOrientation(can.body_id, can_pos, org_orn)
|
||||
|
||||
basket = InteractiveObj(sample_urdf_folder + 'object_2eZY2JqYPQE.urdf')
|
||||
s.import_object(basket)
|
||||
basket.set_position([-0.7,0,1])
|
||||
|
||||
# Gripper correction quaternion - not needed in this example, but helpful if you want to correct the rotation of another object (eg. a hand urdf for the controllers)
|
||||
gripper_correction_quat = p.getQuaternionFromEuler([0, 0, 0])
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
# Runs simulation and measures fps
|
||||
while True:
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
if deviceType == 'left_controller':
|
||||
if eventType == 'trigger_press':
|
||||
leftGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
leftGripperFraction = 0.0
|
||||
elif deviceType == 'right_controller':
|
||||
if eventType == 'trigger_press':
|
||||
rightGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
s.step()
|
||||
|
||||
hmdIsValid, hmdTrans, hmdRot, _ = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot, _ = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot, _ = s.getDataForVRDevice('right_controller')
|
||||
|
||||
if lIsValid:
|
||||
final_rot = multQuatLists(lRot, gripper_correction_quat)
|
||||
p.changeConstraint(lGripper.cid, lTrans, final_rot, maxForce=500)
|
||||
lGripper.set_close_fraction(leftGripperFraction)
|
||||
|
||||
if rIsValid:
|
||||
final_rot = multQuatLists(rRot, gripper_correction_quat)
|
||||
p.changeConstraint(rGripper.cid, rTrans, final_rot, maxForce=500)
|
||||
rGripper.set_close_fraction(rightGripperFraction)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,121 @@
|
|||
import pybullet as p
|
||||
import yaml
|
||||
from gibson2.core.physics.robot_locomotors import Turtlebot, Fetch
|
||||
from gibson2.core.physics.scene import StadiumScene, BuildingScene, EmptyScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, BuildingObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2.utils.utils import parse_config
|
||||
from gibson2 import assets_path
|
||||
import numpy as np
|
||||
import os
|
||||
import pybullet_data
|
||||
|
||||
# TODO: More work needs to be done to make Ohopee have physics!
|
||||
configs_folder = '..\\configs\\'
|
||||
ohopee_path = dataset_path + '\\Ohoopee\\Ohoopee_mesh_texture.obj'
|
||||
bullet_obj_folder = assets_path + '\\models\\bullet_models\\data\\'
|
||||
gripper_folder = assets_path + '\\models\\pybullet_gripper\\'
|
||||
models_path = assets_path + '\\models\\'
|
||||
|
||||
sample_urdf_folder = assets_path + '\\models\\sample_urdfs\\'
|
||||
config = parse_config(configs_folder + 'fetch_interactive_nav.yaml')
|
||||
|
||||
s = Simulator(mode='vr')
|
||||
p.setGravity(0,0,-9.81)
|
||||
|
||||
# Import Ohoopee manually for simple demo
|
||||
building = BuildingObj(ohopee_path)
|
||||
s.import_object(building)
|
||||
|
||||
fetch = Fetch(config)
|
||||
fetch_id = s.import_robot(fetch)[0]
|
||||
print("Fetch robot id:")
|
||||
print(fetch_id)
|
||||
fetch.set_position([-0.2,-0.1,0])
|
||||
fetch.robot_specific_reset()
|
||||
fetch_parts = fetch.parts
|
||||
eye_part = fetch_parts['eyes']
|
||||
gripper_part = fetch_parts['gripper_link']
|
||||
gripper_part_link_index = gripper_part.body_part_index
|
||||
|
||||
fetch_height = 1.08
|
||||
|
||||
def multQuatLists(q0, q1):
|
||||
x0, y0, z0, w0 = q0
|
||||
x1, y1, z1, w1 = q1
|
||||
|
||||
x = x0*w1 + y0*z1 - z0*y1 + w0*x1
|
||||
y = -x0*z1 + y0*w1 + z0*x1 + w0*y1
|
||||
z = x0*y1 - y0*x1 + z0*w1 + w0*z1
|
||||
w = -x0*x1 - y0*y1 - z0*z1 + w0*w1
|
||||
|
||||
return [x,y,z,w]
|
||||
|
||||
# Load objects in the environment
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [1.1,0.5,1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
can = YCBObject('002_master_chef_can')
|
||||
s.import_object(can)
|
||||
_, org_orn = p.getBasePositionAndOrientation(can.body_id)
|
||||
can_pos = [1.1,0.7,1]
|
||||
p.resetBasePositionAndOrientation(can.body_id, can_pos, org_orn)
|
||||
|
||||
basket = InteractiveObj(sample_urdf_folder + 'object_2eZY2JqYPQE.urdf')
|
||||
s.import_object(basket)
|
||||
basket.set_position([-0.8,0.8,1])
|
||||
|
||||
controllerTestObj = YCBObject('006_mustard_bottle')
|
||||
|
||||
s.import_object(controllerTestObj)
|
||||
test_id = controllerTestObj.body_id
|
||||
|
||||
def subtract_vector_list(v1, v2):
|
||||
return [v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]]
|
||||
|
||||
def add_vector_list(v1, v2):
|
||||
return [v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2]]
|
||||
|
||||
while True:
|
||||
# Always call before step
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
print("Device " + deviceType + " had event " + eventType)
|
||||
|
||||
# Set should_measure_fps to True to measure the current fps
|
||||
s.step()
|
||||
|
||||
# Always call after step
|
||||
hmdIsValid, hmdTrans, hmdRot, hmdActualPos = s.getDataForVRDevice('hmd')
|
||||
rIsValid, rTrans, rRot, _ = s.getDataForVRDevice('right_controller')
|
||||
|
||||
# Set HMD to Fetch's eyes
|
||||
eye_pos = eye_part.get_position()
|
||||
s.setVRCamera(eye_pos)
|
||||
|
||||
# Control Fetch arm with only the right controller
|
||||
if rIsValid:
|
||||
# Subtract headset position from controller to get position adjusted for new HMD location
|
||||
hmdDiffVec = subtract_vector_list(hmdTrans, hmdActualPos)
|
||||
rTransAdjusted = add_vector_list(rTrans, hmdDiffVec)
|
||||
|
||||
#p.resetBasePositionAndOrientation(test_id, rTransAdjusted, rRot)
|
||||
# TODO: Add in inverse kinematics later!
|
||||
#joint_pos = p.calculateInverseKinematics(fetch_id, gripper_part_link_index, rTransAdjusted, rRot)
|
||||
|
||||
#for i in range(len(joint_pos)):
|
||||
# p.setJointMotorControl2(fetch_id,
|
||||
# i,
|
||||
# p.POSITION_CONTROL,
|
||||
# targetPosition=joint_pos[i],
|
||||
# targetVelocity=0,
|
||||
# positionGain=0.15,
|
||||
# velocityGain=1.0,
|
||||
# force=500)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,120 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.robot_locomotors import Fetch
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, GripperObj, VisualMarker
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path, dataset_path
|
||||
from gibson2.utils.utils import parse_config
|
||||
|
||||
model_path = assets_path + '\\models\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
configs_folder = '..\\configs\\'
|
||||
fetch_config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
optimize = True
|
||||
# Toggle this to only use renderer without VR, for testing purposes
|
||||
vrMode = True
|
||||
|
||||
# Timestep should always be set to 1/90 to match VR system's 90fps
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, vrEyeTracking=True, optimize_render=optimize, vrMode=vrMode)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# Grippers represent hands
|
||||
lGripper = GripperObj()
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj()
|
||||
s.import_articulated_object(rGripper)
|
||||
rGripper.set_position([0.0, 0.0, 1.0])
|
||||
|
||||
# Load objects in the environment
|
||||
for i in range(5):
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [1 ,0 - 0.2 * i, 1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
# Load eye gaze marker
|
||||
gaze_marker = VisualMarker(radius=0.03)
|
||||
s.import_object(gaze_marker)
|
||||
gaze_marker.set_marker_pos([0,0,1.5])
|
||||
marker_pos, _ = p.getBasePositionAndOrientation(gaze_marker.body_id)
|
||||
print("Marker starting pos: ", marker_pos)
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Control printing of fps data for rendering and physics
|
||||
shouldTime = False
|
||||
# Control printing of Anipal data with right trigger
|
||||
shouldMoveMarker = False
|
||||
|
||||
# Runs simulation
|
||||
while True:
|
||||
start = time.time()
|
||||
|
||||
if vrMode:
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
if deviceType == 'left_controller':
|
||||
if eventType == 'trigger_press':
|
||||
leftGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
leftGripperFraction = 0.0
|
||||
elif deviceType == 'right_controller':
|
||||
if eventType == 'trigger_press':
|
||||
rightGripperFraction = 0.8
|
||||
shouldMoveMarker = True
|
||||
elif eventType == 'trigger_unpress':
|
||||
rightGripperFraction = 0.0
|
||||
shouldMoveMarker = False
|
||||
|
||||
s.step(shouldPrintTime=shouldTime)
|
||||
|
||||
if vrMode:
|
||||
hmdIsValid, hmdTrans, hmdRot = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
|
||||
is_eye_data_valid, origin, dir, left_pupil_diameter, right_pupil_diameter = s.getEyeTrackingData()
|
||||
|
||||
if is_eye_data_valid and shouldMoveMarker:
|
||||
updated_marker_pos = [origin[0] + dir[0], origin[1] + dir[1], origin[2] + dir[2]]
|
||||
gaze_marker.set_marker_pos(updated_marker_pos)
|
||||
#print("Current hmd pos: ", hmdTrans)
|
||||
#print("Gaze origin in world space", origin)
|
||||
#print("Gaze dir", dir)
|
||||
#print("Updated marker pos: ", updated_marker_pos)
|
||||
#print("------------------")
|
||||
|
||||
if lIsValid:
|
||||
lGripper.move_gripper(lTrans, lRot)
|
||||
lGripper.set_close_fraction(leftGripperFraction)
|
||||
|
||||
if rIsValid:
|
||||
rGripper.move_gripper(rTrans, rRot)
|
||||
rGripper.set_close_fraction(rightGripperFraction)
|
||||
#print("Right controller x and y: ", rTrans[0], rTrans[1])
|
||||
|
||||
elapsed = time.time() - start
|
||||
if (elapsed > 0):
|
||||
curr_fps = 1/elapsed
|
||||
else:
|
||||
curr_fps = 2000
|
||||
|
||||
if shouldTime:
|
||||
print("Current fps: %f" % curr_fps)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,53 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, VrHand
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path
|
||||
|
||||
model_path = assets_path + '\\models'
|
||||
sample_urdf_folder = model_path + '\\sample_urdfs\\'
|
||||
|
||||
optimize = True
|
||||
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, vrFullscreen=False, vrEyeTracking=False, optimized_renderer=optimize, vrMode=True)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# VR hand objects
|
||||
lHand = VrHand(start_pos=[0.0, 0.0, 1.5])
|
||||
s.import_articulated_object(lHand)
|
||||
|
||||
rHand = VrHand(start_pos=[0.0, 0.5, 1.5])
|
||||
s.import_articulated_object(rHand)
|
||||
|
||||
# Object to pick up
|
||||
model = InteractiveObj(sample_urdf_folder + 'object_H3ygj6efM8V.urdf')
|
||||
s.import_object(model)
|
||||
model.set_position([1,0,1])
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Start user close to counter for interaction
|
||||
s.setVROffset([1.0, 0, -0.4])
|
||||
|
||||
while True:
|
||||
s.step(shouldPrintTime=False)
|
||||
|
||||
hmdIsValid, hmdTrans, hmdRot = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
|
||||
lTrig, lTouchX, lTouchY = s.getButtonDataForController('left_controller')
|
||||
rTrig, rTouchX, rTouchY = s.getButtonDataForController('right_controller')
|
||||
|
||||
# TODO: Get a left hand model!
|
||||
if rIsValid:
|
||||
rHand.move_hand(rTrans, rRot)
|
||||
rHand.toggle_finger_state(rTrig)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,85 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, VrHand
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path
|
||||
from gibson2.utils.vr_utils import translate_vr_position_by_vecs
|
||||
|
||||
model_path = assets_path + '\\models'
|
||||
sample_urdf_folder = model_path + '\\sample_urdfs\\'
|
||||
|
||||
optimize = True
|
||||
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# VR hand object
|
||||
rHand = VrHand(start_pos=[0.0, 0.5, 1.5])
|
||||
s.import_articulated_object(rHand)
|
||||
|
||||
# Heavy robot
|
||||
heavy_robot = InteractiveObj(sample_urdf_folder + 'object_H3ygj6efM8V.urdf')
|
||||
s.import_object(heavy_robot)
|
||||
heavy_robot.set_position([1, 0.2, 1])
|
||||
# Robot is just a base, so use -1 as link index for changing dynamics
|
||||
p.changeDynamics(heavy_robot.body_id, -1, mass=500)
|
||||
|
||||
# Medium robot
|
||||
medium_robot = InteractiveObj(sample_urdf_folder + 'object_H3ygj6efM8V.urdf')
|
||||
s.import_object(medium_robot)
|
||||
medium_robot.set_position([1, 0, 1])
|
||||
# Robot is just a base, so use -1 as link index for changing dynamics
|
||||
p.changeDynamics(medium_robot.body_id, -1, mass=50)
|
||||
|
||||
# Light robot
|
||||
light_robot = InteractiveObj(sample_urdf_folder + 'object_H3ygj6efM8V.urdf')
|
||||
s.import_object(light_robot)
|
||||
light_robot.set_position([1, -0.2, 1])
|
||||
|
||||
# Heavy mustard
|
||||
heavy_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(heavy_bottle)
|
||||
heavy_bottle.set_position([1, -0.4, 1])
|
||||
p.changeDynamics(heavy_bottle.body_id, -1, mass=500)
|
||||
|
||||
# Light mustard
|
||||
light_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(light_bottle)
|
||||
light_bottle.set_position([1, -0.6, 1])
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Start user close to counter for interaction
|
||||
s.setVROffset([1.0, 0, -0.4])
|
||||
|
||||
# Haptic pulse strength (0 is weakest, 1 is strongest)
|
||||
haptic_pulse_strength = 1.0
|
||||
|
||||
while True:
|
||||
s.step(shouldPrintTime=False)
|
||||
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
rTrig, rTouchX, rTouchY = s.getButtonDataForController('right_controller')
|
||||
|
||||
relative_device = 'hmd'
|
||||
right, _, forward = s.getDeviceCoordinateSystem(relative_device)
|
||||
|
||||
if rIsValid:
|
||||
# Test varying haptic strength with the rTrigger fraction
|
||||
# Closed trigger causes 0 vibration - open to increase strength
|
||||
print('Trigger close fraction: ', rTrig)
|
||||
s.triggerHapticPulse('right_controller', 1 - rTrig)
|
||||
|
||||
rHand.move_hand(rTrans, rRot)
|
||||
rHand.toggle_finger_state(rTrig)
|
||||
|
||||
new_offset = translate_vr_position_by_vecs(rTouchX, rTouchY, right, forward, s.getVROffset(), 0.01)
|
||||
s.setVROffset(new_offset)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,89 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.robot_locomotors import Fetch
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, GripperObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path, dataset_path
|
||||
from gibson2.utils.utils import parse_config
|
||||
from gibson2.utils.vr_utils import translate_vr_position_by_vecs
|
||||
from math import sqrt
|
||||
|
||||
model_path = assets_path + '\\models\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
configs_folder = '..\\configs\\'
|
||||
fetch_config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
optimize = True
|
||||
# Toggle this to only use renderer without VR, for testing purposes
|
||||
vrMode = True
|
||||
# Possible types: hmd_relative, torso_relative
|
||||
movement_type = 'torso_relative'
|
||||
|
||||
# Timestep should always be set to 1/90 to match VR system's 90fps
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=False, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize, vrMode=vrMode)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# Grippers represent hands
|
||||
lGripper = GripperObj()
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj()
|
||||
s.import_articulated_object(rGripper)
|
||||
rGripper.set_position([0.0, 0.0, 1.0])
|
||||
|
||||
# Load objects in the environment
|
||||
for i in range(5):
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [1 ,0 - 0.2 * i, 1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Account for Gibson floors not being at z=0 - shift user height down by 0.2m
|
||||
s.setVROffset([0, 0, -0.2])
|
||||
|
||||
while True:
|
||||
s.step(shouldPrintTime=True)
|
||||
|
||||
if vrMode:
|
||||
hmdIsValid, hmdTrans, hmdRot = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
lTrig, lTouchX, lTouchY = s.getButtonDataForController('left_controller')
|
||||
rTrig, rTouchX, rTouchY = s.getButtonDataForController('right_controller')
|
||||
|
||||
if lIsValid:
|
||||
lGripper.move_gripper(lTrans, lRot)
|
||||
lGripper.set_close_fraction(lTrig)
|
||||
|
||||
if rIsValid:
|
||||
rGripper.move_gripper(rTrans, rRot)
|
||||
rGripper.set_close_fraction(rTrig)
|
||||
|
||||
relative_device = 'hmd'
|
||||
if movement_type == 'torso_relative':
|
||||
relative_device = 'right_controller'
|
||||
right, up, forward = s.getDeviceCoordinateSystem(relative_device)
|
||||
|
||||
# Move the VR player in the direction of the analog stick
|
||||
# In this implementation, +ve x corresponds to right and +ve y corresponds to forward
|
||||
# relative to the HMD
|
||||
# Only uses data from right controller
|
||||
if rIsValid:
|
||||
new_offset = translate_vr_position_by_vecs(rTouchX, rTouchY, right, forward, s.getVROffset(), 0.01)
|
||||
s.setVROffset(new_offset)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,77 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, VrHand
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path
|
||||
from gibson2.utils.vr_utils import translate_vr_position_by_vecs
|
||||
|
||||
model_path = assets_path + '\\models'
|
||||
sample_urdf_folder = model_path + '\\sample_urdfs\\'
|
||||
|
||||
optimize = True
|
||||
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# VR hand object
|
||||
rHand = VrHand(start_pos=[0.0, 0.5, 1.5])
|
||||
s.import_articulated_object(rHand)
|
||||
|
||||
# Heavy robot
|
||||
heavy_robot = InteractiveObj(sample_urdf_folder + 'object_H3ygj6efM8V.urdf')
|
||||
s.import_object(heavy_robot)
|
||||
heavy_robot.set_position([1, 0.2, 1])
|
||||
# Robot is just a base, so use -1 as link index for changing dynamics
|
||||
p.changeDynamics(heavy_robot.body_id, -1, mass=500)
|
||||
|
||||
# Medium robot
|
||||
medium_robot = InteractiveObj(sample_urdf_folder + 'object_H3ygj6efM8V.urdf')
|
||||
s.import_object(medium_robot)
|
||||
medium_robot.set_position([1, 0, 1])
|
||||
# Robot is just a base, so use -1 as link index for changing dynamics
|
||||
p.changeDynamics(medium_robot.body_id, -1, mass=50)
|
||||
|
||||
# Light robot
|
||||
light_robot = InteractiveObj(sample_urdf_folder + 'object_H3ygj6efM8V.urdf')
|
||||
s.import_object(light_robot)
|
||||
light_robot.set_position([1, -0.2, 1])
|
||||
|
||||
# Heavy mustard
|
||||
heavy_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(heavy_bottle)
|
||||
heavy_bottle.set_position([1, -0.4, 1])
|
||||
p.changeDynamics(heavy_bottle.body_id, -1, mass=500)
|
||||
|
||||
# Light mustard
|
||||
light_bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(light_bottle)
|
||||
light_bottle.set_position([1, -0.6, 1])
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Start user close to counter for interaction
|
||||
s.setVROffset([1.0, 0, -0.4])
|
||||
|
||||
while True:
|
||||
s.step(shouldPrintTime=False)
|
||||
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
rTrig, rTouchX, rTouchY = s.getButtonDataForController('right_controller')
|
||||
|
||||
relative_device = 'hmd'
|
||||
right, _, forward = s.getDeviceCoordinateSystem(relative_device)
|
||||
|
||||
if rIsValid:
|
||||
rHand.move_hand(rTrans, rRot)
|
||||
rHand.toggle_finger_state(rTrig)
|
||||
|
||||
new_offset = translate_vr_position_by_vecs(rTouchX, rTouchY, right, forward, s.getVROffset(), 0.01)
|
||||
s.setVROffset(new_offset)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,162 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.robot_locomotors import Fetch
|
||||
from gibson2.core.physics.scene import BuildingScene, StadiumScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, GripperObj, VisualMarker
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path, dataset_path
|
||||
from gibson2.utils.utils import parse_config
|
||||
from gibson2.utils.vr_utils import get_normalized_translation_vec, translate_vr_position_by_vecs
|
||||
from math import sqrt
|
||||
|
||||
model_path = assets_path + '\\models\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
configs_folder = '..\\configs\\'
|
||||
fetch_config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
optimize = True
|
||||
# Toggle this to only use renderer without VR, for testing purposes
|
||||
vrMode = True
|
||||
|
||||
# Timestep should always be set to 1/90 to match VR system's 90fps
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=False, vrFullscreen=False, vrEyeTracking=False, optimize_render=optimize, vrMode=vrMode)
|
||||
scene = BuildingScene('Placida', is_interactive=False)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# User controls fetch in this demo
|
||||
fetch = Fetch(fetch_config, vr_mode=True)
|
||||
s.import_robot(fetch)
|
||||
# Set differential drive to control wheels
|
||||
fetch.set_position([0,-1.5,0])
|
||||
fetch.robot_specific_reset()
|
||||
fetch.keep_still()
|
||||
|
||||
# Load robot end-effector-tracker
|
||||
effector_marker = VisualMarker(rgba_color = [1, 0, 1, 0.2], radius=0.05)
|
||||
s.import_object(effector_marker)
|
||||
# Hide marker upon initialization
|
||||
effector_marker.set_marker_pos([0,0,-5])
|
||||
|
||||
# Load objects in the environment
|
||||
for i in range(5):
|
||||
bottle = YCBObject('006_mustard_bottle')
|
||||
s.import_object(bottle)
|
||||
_, org_orn = p.getBasePositionAndOrientation(bottle.body_id)
|
||||
bottle_pos = [1 ,0 - 0.2 * i, 1]
|
||||
p.resetBasePositionAndOrientation(bottle.body_id, bottle_pos, org_orn)
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
fetch_height = 1.2
|
||||
|
||||
wheel_axle_half = 0.18738 # half of the distance between the wheels
|
||||
wheel_radius = 0.054 # radius of the wheels themselves
|
||||
|
||||
r_wheel_joint = fetch.ordered_joints[0]
|
||||
l_wheel_joint = fetch.ordered_joints[1]
|
||||
|
||||
fetch_lin_vel_multiplier = 100
|
||||
|
||||
# Variables used in IK to move end effector
|
||||
fetch_body_id = fetch.get_fetch_body_id()
|
||||
fetch_joint_num = p.getNumJoints(fetch_body_id)
|
||||
effector_link_id = 19
|
||||
|
||||
# Setting to determine whether IK should also solve for end effector orientation
|
||||
# based on the VR controller
|
||||
# TODO: Decide which type of control to use
|
||||
set_effector_orn = True
|
||||
|
||||
while True:
|
||||
s.step(shouldPrintTime=False)
|
||||
|
||||
if vrMode:
|
||||
hmdIsValid, hmdTrans, hmdRot = s.getDataForVRDevice('hmd')
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
rTrig, rTouchX, rTouchY = s.getButtonDataForController('right_controller')
|
||||
|
||||
# Only use z angle to rotate fetch around vertical axis
|
||||
# Set orientation directly to avoid lag when turning and resultant motion sickness
|
||||
_, _, hmd_z = p.getEulerFromQuaternion(hmdRot)
|
||||
fetch_rot = p.getQuaternionFromEuler([0, 0, hmd_z])
|
||||
fetch.set_orientation(fetch_rot)
|
||||
|
||||
hmd_world_pos = s.getHmdWorldPos()
|
||||
fetch_pos = fetch.get_position()
|
||||
|
||||
# Calculate x and y offset to get to fetch position
|
||||
# z offset is to the desired hmd height, corresponding to fetch head height
|
||||
offset_to_fetch = [fetch_pos[0] - hmd_world_pos[0],
|
||||
fetch_pos[1] - hmd_world_pos[1],
|
||||
fetch_height - hmd_world_pos[2]]
|
||||
|
||||
s.setVROffset(offset_to_fetch)
|
||||
|
||||
# Fetch only has one end effector, so we can control entirely with the right controller
|
||||
if rIsValid:
|
||||
# Move marker to indicate where the end effector should go
|
||||
effector_marker.set_marker_state(rTrans, rRot)
|
||||
|
||||
# Linear velocity is relative to current direction fetch is pointing,
|
||||
# so only need to know how fast we should travel in that direction (Y touchpad direction is used for this)
|
||||
lin_vel = fetch_lin_vel_multiplier * rTouchY
|
||||
ang_vel = 0
|
||||
|
||||
left_wheel_ang_vel = (lin_vel - ang_vel * wheel_axle_half) / wheel_radius
|
||||
right_wheel_ang_vel = (lin_vel + ang_vel * wheel_axle_half) / wheel_radius
|
||||
|
||||
l_wheel_joint.set_motor_velocity(left_wheel_ang_vel)
|
||||
r_wheel_joint.set_motor_velocity(right_wheel_ang_vel)
|
||||
|
||||
# Ignore sideays rolling dimensions of controller (x axis) since fetch can't "roll" its arm
|
||||
r_euler_rot = p.getEulerFromQuaternion(rRot)
|
||||
r_rot_no_x = p.getQuaternionFromEuler([0, r_euler_rot[1], r_euler_rot[2]])
|
||||
|
||||
# Iteration and residual threshold values are based on recommendations from PyBullet
|
||||
jointPoses = None
|
||||
if set_effector_orn:
|
||||
jointPoses = p.calculateInverseKinematics(fetch_body_id,
|
||||
effector_link_id,
|
||||
rTrans,
|
||||
r_rot_no_x,
|
||||
solver=0,
|
||||
maxNumIterations=100,
|
||||
residualThreshold=.01)
|
||||
else:
|
||||
jointPoses = p.calculateInverseKinematics(fetch_body_id,
|
||||
effector_link_id,
|
||||
rTrans,
|
||||
solver=0)
|
||||
|
||||
# TODO: Hide the arm from the user? Sometimes gets in the way of seeing the end effector
|
||||
if jointPoses is not None:
|
||||
for i in range(len(jointPoses)):
|
||||
next_pose = jointPoses[i]
|
||||
next_joint = fetch.ordered_joints[i]
|
||||
|
||||
# Set wheel joint back to original position so IK calculation does not affect movement
|
||||
# Note: PyBullet does not currently expose the root of the IK calculation
|
||||
# TODO: Create our own IK that allows for a user-defined root?
|
||||
if next_joint.joint_name == 'r_wheel_joint' or next_joint.joint_name == 'l_wheel_joint':
|
||||
next_pose, _, _ = next_joint.get_state()
|
||||
|
||||
p.resetJointState(fetch_body_id, next_joint.joint_index, next_pose)
|
||||
|
||||
# TODO: Arm is not moving with this function - debug!
|
||||
# TODO: This could be causing some problems with movement
|
||||
#p.setJointMotorControl2(bodyIndex=fetch_body_id,
|
||||
# jointIndex=next_joint.joint_index,
|
||||
# controlMode=p.POSITION_CONTROL,
|
||||
# targetPosition=next_pose,
|
||||
# force=500)
|
||||
|
||||
# Open/close the end effectors
|
||||
fetch.set_fetch_gripper_fraction(rTrig)
|
||||
|
||||
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,85 @@
|
|||
import pybullet as p
|
||||
import numpy as np
|
||||
import time
|
||||
|
||||
from gibson2.core.physics.robot_locomotors import Fetch
|
||||
from gibson2.core.physics.scene import BuildingScene
|
||||
from gibson2.core.physics.interactive_objects import YCBObject, InteractiveObj, GripperObj
|
||||
from gibson2.core.simulator import Simulator
|
||||
from gibson2 import assets_path, dataset_path
|
||||
from gibson2.utils.utils import parse_config
|
||||
|
||||
model_path = assets_path + '\\models\\'
|
||||
gripper_folder = model_path + '\\gripper\\'
|
||||
configs_folder = '..\\configs\\'
|
||||
fetch_config = parse_config(configs_folder + 'fetch_p2p_nav.yaml')
|
||||
|
||||
optimize = True
|
||||
# Toggle this to only use renderer without VR, for testing purposes
|
||||
vrMode = True
|
||||
|
||||
# Timestep should always be set to 1/90 to match VR system's 90fps
|
||||
s = Simulator(mode='vr', timestep = 1/90.0, msaa=True, vrFullscreen=False, optimize_render=optimize, vrMode=vrMode)
|
||||
scene = BuildingScene('Rs_interactive', build_graph=False, is_interactive=True)
|
||||
scene.sleep = optimize
|
||||
s.import_scene(scene)
|
||||
|
||||
# Grippers represent hands
|
||||
lGripper = GripperObj()
|
||||
s.import_articulated_object(lGripper)
|
||||
lGripper.set_position([0.0, 0.0, 1.5])
|
||||
|
||||
rGripper = GripperObj()
|
||||
s.import_articulated_object(rGripper)
|
||||
rGripper.set_position([0.0, 0.0, 1.0])
|
||||
|
||||
# Controls how closed each gripper is (maximally open to start)
|
||||
leftGripperFraction = 0.0
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
if optimize:
|
||||
s.optimize_data()
|
||||
|
||||
# Runs simulation
|
||||
while True:
|
||||
start = time.time()
|
||||
|
||||
if vrMode:
|
||||
eventList = s.pollVREvents()
|
||||
for event in eventList:
|
||||
deviceType, eventType = event
|
||||
if deviceType == 'left_controller':
|
||||
if eventType == 'trigger_press':
|
||||
leftGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
leftGripperFraction = 0.0
|
||||
elif deviceType == 'right_controller':
|
||||
if eventType == 'trigger_press':
|
||||
rightGripperFraction = 0.8
|
||||
elif eventType == 'trigger_unpress':
|
||||
rightGripperFraction = 0.0
|
||||
|
||||
s.step(shouldPrintTime=True)
|
||||
|
||||
if vrMode:
|
||||
hmdIsValid, hmdTrans, hmdRot = s.getDataForVRDevice('hmd')
|
||||
lIsValid, lTrans, lRot = s.getDataForVRDevice('left_controller')
|
||||
rIsValid, rTrans, rRot = s.getDataForVRDevice('right_controller')
|
||||
|
||||
if lIsValid:
|
||||
lGripper.move_gripper(lTrans, lRot)
|
||||
lGripper.set_close_fraction(leftGripperFraction)
|
||||
|
||||
if rIsValid:
|
||||
rGripper.move_gripper(rTrans, rRot)
|
||||
rGripper.set_close_fraction(rightGripperFraction)
|
||||
|
||||
elapsed = time.time() - start
|
||||
if (elapsed > 0):
|
||||
curr_fps = 1/elapsed
|
||||
else:
|
||||
curr_fps = 2000
|
||||
|
||||
print("Current fps: %f" % curr_fps)
|
||||
|
||||
s.disconnect()
|
|
@ -0,0 +1,764 @@
|
|||
import pybullet as p
|
||||
import os
|
||||
import pybullet_data
|
||||
import gibson2
|
||||
import numpy as np
|
||||
import random
|
||||
import json
|
||||
|
||||
from gibson2.utils.assets_utils import get_model_path, get_texture_file, get_ig_scene_path, get_ig_model_path, get_ig_category_path
|
||||
import xml.etree.ElementTree as ET
|
||||
from gibson2.utils.utils import rotate_vector_3d
|
||||
from gibson2 import assets_path
|
||||
from gibson2.utils.utils import multQuatLists
|
||||
|
||||
import logging
|
||||
import math
|
||||
from IPython import embed
|
||||
import trimesh
|
||||
|
||||
gripper_path = assets_path + '\\models\\gripper\\gripper.urdf'
|
||||
vr_hand_left_path = assets_path + '\\models\\vr_hand\\vr_hand_left.urdf'
|
||||
vr_hand_right_path = assets_path + '\\models\\vr_hand\\vr_hand_right.urdf'
|
||||
|
||||
class Object(object):
|
||||
def __init__(self):
|
||||
self.body_id = None
|
||||
self.loaded = False
|
||||
|
||||
def load(self):
|
||||
if self.loaded:
|
||||
return self.body_id
|
||||
self.body_id = self._load()
|
||||
self.loaded = True
|
||||
return self.body_id
|
||||
|
||||
def get_position(self):
|
||||
pos, _ = p.getBasePositionAndOrientation(self.body_id)
|
||||
return pos
|
||||
|
||||
def get_orientation(self):
|
||||
"""Return object orientation
|
||||
:return: quaternion in xyzw
|
||||
"""
|
||||
_, orn = p.getBasePositionAndOrientation(self.body_id)
|
||||
return orn
|
||||
|
||||
def set_position(self, pos):
|
||||
_, old_orn = p.getBasePositionAndOrientation(self.body_id)
|
||||
p.resetBasePositionAndOrientation(self.body_id, pos, old_orn)
|
||||
|
||||
def set_orientation(self, orn):
|
||||
old_pos, _ = p.getBasePositionAndOrientation(self.body_id)
|
||||
p.resetBasePositionAndOrientation(self.body_id, old_pos, orn)
|
||||
|
||||
def set_position_orientation(self, pos, orn):
|
||||
p.resetBasePositionAndOrientation(self.body_id, pos, orn)
|
||||
|
||||
|
||||
class YCBObject(Object):
|
||||
def __init__(self, name, scale=1):
|
||||
super(YCBObject, self).__init__()
|
||||
self.visual_filename = os.path.join(gibson2.assets_path, 'models', 'ycb', name,
|
||||
'textured_simple.obj')
|
||||
self.collision_filename = os.path.join(gibson2.assets_path, 'models', 'ycb', name,
|
||||
'textured_simple_vhacd.obj')
|
||||
self.scale = scale
|
||||
|
||||
def _load(self):
|
||||
collision_id = p.createCollisionShape(p.GEOM_MESH,
|
||||
fileName=self.collision_filename,
|
||||
meshScale=self.scale)
|
||||
visual_id = p.createVisualShape(p.GEOM_MESH,
|
||||
fileName=self.visual_filename,
|
||||
meshScale=self.scale)
|
||||
|
||||
body_id = p.createMultiBody(baseCollisionShapeIndex=collision_id,
|
||||
baseVisualShapeIndex=visual_id,
|
||||
basePosition=[0.2, 0.2, 1.5],
|
||||
baseMass=0.1,
|
||||
useMaximalCoordinates=True)
|
||||
return body_id
|
||||
|
||||
|
||||
class ShapeNetObject(Object):
|
||||
def __init__(self, path, scale=1., position=[0, 0, 0], orientation=[0, 0, 0]):
|
||||
super(ShapeNetObject, self).__init__()
|
||||
self.filename = path
|
||||
self.scale = scale
|
||||
self.position = position
|
||||
self.orientation = orientation
|
||||
|
||||
self._default_mass = 3.
|
||||
self._default_transform = {
|
||||
'position': [0, 0, 0],
|
||||
'orientation_quat': [1. / np.sqrt(2), 0, 0, 1. / np.sqrt(2)],
|
||||
}
|
||||
pose = p.multiplyTransforms(positionA=self.position,
|
||||
orientationA=p.getQuaternionFromEuler(
|
||||
self.orientation),
|
||||
positionB=self._default_transform['position'],
|
||||
orientationB=self._default_transform['orientation_quat'])
|
||||
self.pose = {
|
||||
'position': pose[0],
|
||||
'orientation_quat': pose[1],
|
||||
}
|
||||
|
||||
def _load(self):
|
||||
collision_id = p.createCollisionShape(p.GEOM_MESH,
|
||||
fileName=self.filename,
|
||||
meshScale=self.scale)
|
||||
body_id = p.createMultiBody(basePosition=self.pose['position'],
|
||||
baseOrientation=self.pose['orientation_quat'],
|
||||
baseMass=self._default_mass,
|
||||
baseCollisionShapeIndex=collision_id,
|
||||
baseVisualShapeIndex=-1)
|
||||
return body_id
|
||||
|
||||
|
||||
class Pedestrian(Object):
|
||||
def __init__(self, style='standing', pos=[0, 0, 0]):
|
||||
super(Pedestrian, self).__init__()
|
||||
self.collision_filename = os.path.join(
|
||||
gibson2.assets_path, 'models', 'person_meshes',
|
||||
'person_{}'.format(style), 'meshes', 'person_vhacd.obj')
|
||||
self.visual_filename = os.path.join(gibson2.assets_path, 'models', 'person_meshes',
|
||||
'person_{}'.format(style), 'meshes', 'person.obj')
|
||||
self.cid = None
|
||||
self.pos = pos
|
||||
|
||||
def _load(self):
|
||||
collision_id = p.createCollisionShape(
|
||||
p.GEOM_MESH, fileName=self.collision_filename)
|
||||
visual_id = p.createVisualShape(
|
||||
p.GEOM_MESH, fileName=self.visual_filename)
|
||||
body_id = p.createMultiBody(basePosition=[0, 0, 0],
|
||||
baseMass=60,
|
||||
baseCollisionShapeIndex=collision_id,
|
||||
baseVisualShapeIndex=visual_id)
|
||||
p.resetBasePositionAndOrientation(
|
||||
body_id, self.pos, [-0.5, -0.5, -0.5, 0.5])
|
||||
self.cid = p.createConstraint(body_id,
|
||||
-1,
|
||||
-1,
|
||||
-1,
|
||||
p.JOINT_FIXED, [0, 0, 0], [0, 0, 0],
|
||||
self.pos,
|
||||
parentFrameOrientation=[-0.5, -0.5, -0.5, 0.5]) # facing x axis
|
||||
|
||||
return body_id
|
||||
|
||||
def reset_position_orientation(self, pos, orn):
|
||||
p.changeConstraint(self.cid, pos, orn)
|
||||
|
||||
|
||||
class VisualMarker(Object):
|
||||
def __init__(self,
|
||||
visual_shape=p.GEOM_SPHERE,
|
||||
rgba_color=[1, 0, 0, 0.5],
|
||||
radius=1.0,
|
||||
half_extents=[1, 1, 1],
|
||||
length=1,
|
||||
initial_offset=[0, 0, 0]):
|
||||
"""
|
||||
create a visual shape to show in pybullet and MeshRenderer
|
||||
|
||||
:param visual_shape: pybullet.GEOM_BOX, pybullet.GEOM_CYLINDER, pybullet.GEOM_CAPSULE or pybullet.GEOM_SPHERE
|
||||
:param rgba_color: color
|
||||
:param radius: radius (for sphere)
|
||||
:param half_extents: parameters for pybullet.GEOM_BOX, pybullet.GEOM_CYLINDER or pybullet.GEOM_CAPSULE
|
||||
:param length: parameters for pybullet.GEOM_BOX, pybullet.GEOM_CYLINDER or pybullet.GEOM_CAPSULE
|
||||
:param initial_offset: visualFramePosition for the marker
|
||||
"""
|
||||
super(VisualMarker, self).__init__()
|
||||
self.visual_shape = visual_shape
|
||||
self.rgba_color = rgba_color
|
||||
self.radius = radius
|
||||
self.half_extents = half_extents
|
||||
self.length = length
|
||||
self.initial_offset = initial_offset
|
||||
|
||||
def _load(self):
|
||||
if self.visual_shape == p.GEOM_BOX:
|
||||
shape = p.createVisualShape(self.visual_shape,
|
||||
rgbaColor=self.rgba_color,
|
||||
halfExtents=self.half_extents,
|
||||
visualFramePosition=self.initial_offset)
|
||||
elif self.visual_shape in [p.GEOM_CYLINDER, p.GEOM_CAPSULE]:
|
||||
shape = p.createVisualShape(self.visual_shape,
|
||||
rgbaColor=self.rgba_color,
|
||||
radius=self.radius,
|
||||
length=self.length,
|
||||
visualFramePosition=self.initial_offset)
|
||||
else:
|
||||
shape = p.createVisualShape(self.visual_shape,
|
||||
rgbaColor=self.rgba_color,
|
||||
radius=self.radius,
|
||||
visualFramePosition=self.initial_offset)
|
||||
body_id = p.createMultiBody(
|
||||
baseVisualShapeIndex=shape, baseCollisionShapeIndex=-1)
|
||||
|
||||
return body_id
|
||||
|
||||
def set_color(self, color):
|
||||
p.changeVisualShape(self.body_id, -1, rgbaColor=color)
|
||||
|
||||
def set_marker_pos(self, pos):
|
||||
_, original_orn = p.getBasePositionAndOrientation(self.body_id)
|
||||
p.resetBasePositionAndOrientation(self.body_id, pos, original_orn)
|
||||
|
||||
def set_marker_orn(self, orn):
|
||||
original_pos, _ = p.getBasePositionAndOrientation(self.body_id)
|
||||
p.resetBasePositionAndOrientation(self.body_id, original_pos, orn)
|
||||
|
||||
def set_marker_state(self, pos, orn):
|
||||
p.resetBasePositionAndOrientation(self.body_id, pos, orn)
|
||||
|
||||
|
||||
class BoxShape(Object):
|
||||
def __init__(self, pos=[1, 2, 3], dim=[1, 2, 3], visual_only=False, mass=1000, color=[1, 1, 1, 1]):
|
||||
super(BoxShape, self).__init__()
|
||||
self.basePos = pos
|
||||
self.dimension = dim
|
||||
self.visual_only = visual_only
|
||||
self.mass = mass
|
||||
self.color = color
|
||||
|
||||
def _load(self):
|
||||
baseOrientation = [0, 0, 0, 1]
|
||||
colBoxId = p.createCollisionShape(
|
||||
p.GEOM_BOX, halfExtents=self.dimension)
|
||||
visualShapeId = p.createVisualShape(
|
||||
p.GEOM_BOX, halfExtents=self.dimension, rgbaColor=self.color)
|
||||
if self.visual_only:
|
||||
body_id = p.createMultiBody(baseCollisionShapeIndex=-1,
|
||||
baseVisualShapeIndex=visualShapeId)
|
||||
else:
|
||||
body_id = p.createMultiBody(baseMass=self.mass,
|
||||
baseCollisionShapeIndex=colBoxId,
|
||||
baseVisualShapeIndex=visualShapeId)
|
||||
|
||||
p.resetBasePositionAndOrientation(
|
||||
body_id, self.basePos, baseOrientation)
|
||||
|
||||
return body_id
|
||||
|
||||
|
||||
def round_up(n, decimals=0):
|
||||
multiplier = 10 ** decimals
|
||||
return math.ceil(n * multiplier) / multiplier
|
||||
|
||||
|
||||
class URDFObject(Object):
|
||||
"""
|
||||
URDFObjects are instantiated from a URDF file. They can be composed of one or more links and joints. They should be passive
|
||||
We use this class to deparse our modified link tag for URDFs that embed objects into scenes
|
||||
"""
|
||||
|
||||
def __init__(self, xml_element, random_groups, avg_obj_dims=None):
|
||||
super(URDFObject, self).__init__()
|
||||
|
||||
category = xml_element.attrib["category"]
|
||||
model = xml_element.attrib['model']
|
||||
model_path = ""
|
||||
|
||||
print("Category", category)
|
||||
print("Model", model)
|
||||
|
||||
# Find the urdf file that defines this object
|
||||
if category == "building":
|
||||
model_path = get_ig_scene_path(model)
|
||||
filename = model_path + "/" + model + "_building.urdf"
|
||||
else:
|
||||
category_path = get_ig_category_path(category)
|
||||
assert len(os.listdir(category_path)) != 0, \
|
||||
"There are no models in category folder {}".format(
|
||||
category_path)
|
||||
|
||||
if model == 'random':
|
||||
# Using random group to assign the same model to a group of objects
|
||||
# E.g. we want to use the same model for a group of chairs around the same dining table
|
||||
if "random_group" in xml_element.attrib:
|
||||
# random_group is a unique integer within the category
|
||||
random_group = xml_element.attrib["random_group"]
|
||||
random_group_key = (category, random_group)
|
||||
|
||||
# if the model of this random group has already been selected
|
||||
# use that model.
|
||||
if random_group_key in random_groups:
|
||||
model = random_groups[random_group_key]
|
||||
|
||||
# otherwise, this is the first instance of this random group
|
||||
# select a random model and cache it
|
||||
else:
|
||||
model = random.choice(os.listdir(category_path))
|
||||
random_groups[random_group_key] = model
|
||||
else:
|
||||
# Using a random instance
|
||||
model = random.choice(os.listdir(category_path))
|
||||
else:
|
||||
model = xml_element.attrib['model']
|
||||
|
||||
model_path = get_ig_model_path(category, model)
|
||||
filename = model_path + "/" + model + ".urdf"
|
||||
|
||||
self.filename = filename
|
||||
logging.info("Loading " + filename)
|
||||
self.object_tree = ET.parse(filename) # Parse the URDF
|
||||
|
||||
# Change the mesh filenames to include the entire path
|
||||
for mesh in self.object_tree.iter("mesh"):
|
||||
mesh.attrib['filename'] = model_path + \
|
||||
"/" + mesh.attrib['filename']
|
||||
|
||||
# Apply the desired bounding box size / scale
|
||||
# First obtain the scaling factor
|
||||
if "bounding_box" in xml_element.keys() and "scale" in xml_element.keys():
|
||||
logging.error(
|
||||
"You cannot define both scale and bounding box size defined to embed a URDF")
|
||||
exit(-1)
|
||||
|
||||
meta_json = os.path.join(model_path, 'misc/metadata.json')
|
||||
bbox_json = os.path.join(model_path, 'misc/bbox.json')
|
||||
if os.path.isfile(meta_json):
|
||||
with open(meta_json, 'r') as f:
|
||||
meta_data = json.load(f)
|
||||
bbox_size = np.array(meta_data['bbox_size'])
|
||||
base_link_offset = np.array(meta_data['base_link_offset'])
|
||||
elif os.path.isfile(bbox_json):
|
||||
with open(bbox_json, 'r') as f:
|
||||
bbox_data = json.load(f)
|
||||
bbox_max = np.array(bbox_data['max'])
|
||||
bbox_min = np.array(bbox_data['min'])
|
||||
bbox_size = bbox_max - bbox_min
|
||||
base_link_offset = (bbox_min + bbox_max) / 2.0
|
||||
else:
|
||||
bbox_size = np.zeros(3)
|
||||
base_link_offset = np.zeros(3)
|
||||
|
||||
if "bounding_box" in xml_element.keys():
|
||||
# Obtain the scale as the ratio between the desired bounding box size and the normal bounding box size of the object at scale (1, 1, 1)
|
||||
bounding_box = np.array(
|
||||
[float(val) for val in xml_element.attrib["bounding_box"].split(" ")])
|
||||
scale = bounding_box / bbox_size
|
||||
elif "scale" in xml_element.keys():
|
||||
scale = np.array([float(val)
|
||||
for val in xml_element.attrib["scale"].split(" ")])
|
||||
else:
|
||||
scale = np.array([1., 1., 1.])
|
||||
logging.info("Scale: " + np.array2string(scale))
|
||||
|
||||
# We need to scale 1) the meshes, 2) the position of meshes, 3) the position of joints, 4) the orientation axis of joints
|
||||
# The problem is that those quantities are given wrt. its parent link frame, and this can be rotated wrt. the frame the scale was given in
|
||||
# Solution: parse the kin tree joint by joint, extract the rotation, rotate the scale, apply rotated scale to 1, 2, 3, 4 in the child link frame
|
||||
|
||||
# First, define the scale in each link reference frame
|
||||
# and apply it to the joint values
|
||||
scales_in_lf = {}
|
||||
scales_in_lf["base_link"] = scale
|
||||
all_processed = False
|
||||
while not all_processed:
|
||||
all_processed = True
|
||||
for joint in self.object_tree.iter("joint"):
|
||||
parent_link_name = joint.find("parent").attrib["link"]
|
||||
child_link_name = joint.find("child").attrib["link"]
|
||||
if parent_link_name in scales_in_lf and child_link_name not in scales_in_lf:
|
||||
scale_in_parent_lf = scales_in_lf[parent_link_name]
|
||||
# The location of the joint frame is scaled using the scale in the parent frame
|
||||
for origin in joint.iter("origin"):
|
||||
current_origin_xyz = np.array(
|
||||
[float(val) for val in origin.attrib["xyz"].split(" ")])
|
||||
new_origin_xyz = np.multiply(
|
||||
current_origin_xyz, scale_in_parent_lf)
|
||||
new_origin_xyz = np.array(
|
||||
[round_up(val, 10) for val in new_origin_xyz])
|
||||
origin.attrib['xyz'] = ' '.join(
|
||||
map(str, new_origin_xyz))
|
||||
|
||||
# Get the rotation of the joint frame and apply it to the scale
|
||||
if "rpy" in joint.keys():
|
||||
joint_frame_rot = np.array(
|
||||
[float(val) for val in joint.attrib['rpy'].split(" ")])
|
||||
# Rotate the scale
|
||||
scale_in_child_lf = rotate_vector_3d(
|
||||
scale_in_parent_lf, *joint_frame_rot, cck=True)
|
||||
scale_in_child_lf = np.absolute(scale_in_child_lf)
|
||||
else:
|
||||
scale_in_child_lf = scale_in_parent_lf
|
||||
|
||||
#print("Adding: ", joint.find("child").attrib["link"])
|
||||
|
||||
scales_in_lf[joint.find("child").attrib["link"]] = \
|
||||
scale_in_child_lf
|
||||
|
||||
# The axis of the joint is defined in the joint frame, we scale it after applying the rotation
|
||||
for axis in joint.iter("axis"):
|
||||
current_axis_xyz = np.array(
|
||||
[float(val) for val in axis.attrib["xyz"].split(" ")])
|
||||
new_axis_xyz = np.multiply(
|
||||
current_axis_xyz, scale_in_child_lf)
|
||||
new_axis_xyz /= np.linalg.norm(new_axis_xyz)
|
||||
new_axis_xyz = np.array(
|
||||
[round_up(val, 10) for val in new_axis_xyz])
|
||||
axis.attrib['xyz'] = ' '.join(map(str, new_axis_xyz))
|
||||
|
||||
# Iterate again the for loop since we added new elements to the dictionary
|
||||
all_processed = False
|
||||
|
||||
all_links = self.object_tree.findall('link')
|
||||
# compute dynamics properties
|
||||
if category != "building":
|
||||
all_links_trimesh = []
|
||||
total_volume = 0.0
|
||||
for link in all_links:
|
||||
meshes = link.findall('collision/geometry/mesh')
|
||||
if len(meshes) == 0:
|
||||
all_links_trimesh.append(None)
|
||||
continue
|
||||
# assume one collision mesh per link
|
||||
assert len(meshes) == 1, (filename, link.attrib['name'])
|
||||
collision_mesh_path = os.path.join(model_path,
|
||||
meshes[0].attrib['filename'])
|
||||
trimesh_obj = trimesh.load(file_obj=collision_mesh_path)
|
||||
all_links_trimesh.append(trimesh_obj)
|
||||
volume = trimesh_obj.volume
|
||||
# a hack to artificially increase the density of the lamp base
|
||||
if link.attrib['name'] == 'base_link':
|
||||
if category in ['lamp']:
|
||||
volume *= 10.0
|
||||
total_volume += volume
|
||||
|
||||
# avg L x W x H and Weight is given for this object category
|
||||
if avg_obj_dims is not None:
|
||||
avg_density = avg_obj_dims['density']
|
||||
|
||||
# otherwise, use the median density across all existing object categories
|
||||
else:
|
||||
avg_density = 67.0
|
||||
|
||||
# Scale the mass based on bounding box size
|
||||
# TODO: how to scale moment of inertia?
|
||||
total_mass = avg_density * \
|
||||
bounding_box[0] * bounding_box[1] * bounding_box[2]
|
||||
# print('total_mass', total_mass)
|
||||
|
||||
density = total_mass / total_volume
|
||||
# print('avg density', density)
|
||||
for trimesh_obj in all_links_trimesh:
|
||||
if trimesh_obj is not None:
|
||||
trimesh_obj.density = density
|
||||
|
||||
assert len(all_links_trimesh) == len(all_links)
|
||||
|
||||
# Now iterate over all links and scale the meshes and positions
|
||||
for i, link in enumerate(all_links):
|
||||
if category != "building":
|
||||
link_trimesh = all_links_trimesh[i]
|
||||
# assign dynamics properties
|
||||
if link_trimesh is not None:
|
||||
inertials = link.findall('inertial')
|
||||
if len(inertials) == 0:
|
||||
inertial = ET.SubElement(link, 'inertial')
|
||||
else:
|
||||
assert len(inertials) == 1
|
||||
inertial = inertials[0]
|
||||
|
||||
masses = inertial.findall('mass')
|
||||
if len(masses) == 0:
|
||||
mass = ET.SubElement(inertial, 'mass')
|
||||
else:
|
||||
assert len(masses) == 1
|
||||
mass = masses[0]
|
||||
|
||||
inertias = inertial.findall('inertia')
|
||||
if len(inertias) == 0:
|
||||
inertia = ET.SubElement(inertial, 'inertia')
|
||||
else:
|
||||
assert len(inertias) == 1
|
||||
inertia = inertias[0]
|
||||
|
||||
origins = inertial.findall('origin')
|
||||
if len(origins) == 0:
|
||||
origin = ET.SubElement(inertial, 'origin')
|
||||
else:
|
||||
assert len(origins) == 1
|
||||
origin = origins[0]
|
||||
|
||||
# a hack to artificially increase the density of the lamp base
|
||||
if link.attrib['name'] == 'base_link':
|
||||
if category in ['lamp']:
|
||||
link_trimesh.density *= 10.0
|
||||
|
||||
if link_trimesh.is_watertight:
|
||||
center = link_trimesh.center_mass
|
||||
else:
|
||||
center = link_trimesh.centroid
|
||||
|
||||
# The inertial frame origin will be scaled down below.
|
||||
# Here, it has the value BEFORE scaling
|
||||
origin.attrib['xyz'] = ' '.join(map(str, center))
|
||||
origin.attrib['rpy'] = ' '.join(map(str, [0.0, 0.0, 0.0]))
|
||||
|
||||
mass.attrib['value'] = str(round_up(link_trimesh.mass, 10))
|
||||
moment_of_inertia = link_trimesh.moment_inertia
|
||||
inertia.attrib['ixx'] = str(moment_of_inertia[0][0])
|
||||
inertia.attrib['ixy'] = str(moment_of_inertia[0][1])
|
||||
inertia.attrib['ixz'] = str(moment_of_inertia[0][2])
|
||||
inertia.attrib['iyy'] = str(moment_of_inertia[1][1])
|
||||
inertia.attrib['iyz'] = str(moment_of_inertia[1][2])
|
||||
inertia.attrib['izz'] = str(moment_of_inertia[2][2])
|
||||
|
||||
scale_in_lf = scales_in_lf[link.attrib["name"]]
|
||||
# Apply the scale to all mesh elements within the link (original scale and origin)
|
||||
for mesh in link.iter("mesh"):
|
||||
if "scale" in mesh.attrib:
|
||||
mesh_scale = np.array(
|
||||
[float(val) for val in mesh.attrib["scale"].split(" ")])
|
||||
new_scale = np.multiply(mesh_scale, scale_in_lf)
|
||||
new_scale = np.array([round_up(val, 10)
|
||||
for val in new_scale])
|
||||
mesh.attrib['scale'] = ' '.join(map(str, new_scale))
|
||||
else:
|
||||
new_scale = np.array([round_up(val, 10)
|
||||
for val in scale_in_lf])
|
||||
mesh.set('scale', ' '.join(map(str, new_scale)))
|
||||
for origin in link.iter("origin"):
|
||||
origin_xyz = np.array(
|
||||
[float(val) for val in origin.attrib["xyz"].split(" ")])
|
||||
new_origin_xyz = np.multiply(origin_xyz, scale_in_lf)
|
||||
new_origin_xyz = np.array(
|
||||
[round_up(val, 10) for val in new_origin_xyz])
|
||||
origin.attrib['xyz'] = ' '.join(map(str, new_origin_xyz))
|
||||
|
||||
# Finally, we need to know where is the base_link origin wrt. the bounding box center. That allows us to place the model
|
||||
# correctly since the joint transformations given in the scene urdf are for the bounding box center
|
||||
# Coordinates of the bounding box center in the base_link frame
|
||||
# We scale the location. We will subtract this to the joint location
|
||||
scale = scales_in_lf["base_link"]
|
||||
self.scaled_bbxc_in_blf = -scale * base_link_offset
|
||||
|
||||
def _load(self):
|
||||
body_id = p.loadURDF(self.filename,
|
||||
flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
|
||||
self.mass = p.getDynamicsInfo(body_id, -1)[0]
|
||||
|
||||
return body_id
|
||||
|
||||
|
||||
class InteractiveObj(Object):
|
||||
"""
|
||||
Interactive Objects are represented as a urdf, but doesn't have motors
|
||||
"""
|
||||
|
||||
def __init__(self, filename, scale=1):
|
||||
super(InteractiveObj, self).__init__()
|
||||
self.filename = filename
|
||||
self.scale = scale
|
||||
|
||||
def _load(self):
|
||||
body_id = p.loadURDF(self.filename, globalScaling=self.scale,
|
||||
flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
|
||||
self.mass = p.getDynamicsInfo(body_id, -1)[0]
|
||||
|
||||
return body_id
|
||||
|
||||
class VrHand(InteractiveObj):
|
||||
"""
|
||||
Represents the human hand used for VR programs
|
||||
|
||||
Joint indices and names:
|
||||
|
||||
Joint 0 has name palm__base
|
||||
Joint 1 has name Rproximal__palm
|
||||
Joint 2 has name Rmiddle__Rproximal
|
||||
Joint 3 has name Rtip__Rmiddle
|
||||
Joint 4 has name Mproximal__palm
|
||||
Joint 5 has name Mmiddle__Mproximal
|
||||
Joint 6 has name Mtip__Mmiddle
|
||||
Joint 7 has name Pproximal__palm
|
||||
Joint 8 has name Pmiddle__Pproximal
|
||||
Joint 9 has name Ptip__Pmiddle
|
||||
Joint 10 has name palm__thumb_base
|
||||
Joint 11 has name Tproximal__thumb_base
|
||||
Joint 12 has name Tmiddle__Tproximal
|
||||
Joint 13 has name Ttip__Tmiddle
|
||||
Joint 14 has name Iproximal__palm
|
||||
Joint 15 has name Imiddle__Iproximal
|
||||
Joint 16 has name Itip__Imiddle
|
||||
|
||||
Link names in order:
|
||||
base
|
||||
palm
|
||||
Rproximal
|
||||
RmiRmiddle
|
||||
Rtip
|
||||
Mproximal
|
||||
Mmiddle
|
||||
Mtip
|
||||
Pproximal
|
||||
Pmiddle
|
||||
Ptip
|
||||
thumb base
|
||||
Tproximal
|
||||
TmiTmiddle
|
||||
Ttip
|
||||
Iproximal
|
||||
ImiImiddle
|
||||
Itip
|
||||
"""
|
||||
|
||||
def __init__(self, scale=1, start_pos=[0,0,0], leftHand=False, replayMode=False):
|
||||
self.leftHand = leftHand
|
||||
# Indicates whether this is data replay or not
|
||||
self.replayMode = replayMode
|
||||
self.filename = vr_hand_left_path if leftHand else vr_hand_right_path
|
||||
super().__init__(self.filename)
|
||||
self.scale = scale
|
||||
self.start_pos = start_pos
|
||||
# Hand needs to be rotated to visually align with VR controller
|
||||
# TODO: Make this alignment better (will require some experimentation)
|
||||
self.base_rot = p.getQuaternionFromEuler([0, 160, -80])
|
||||
# Lists of joint indices for hand part
|
||||
self.base_idxs = [0]
|
||||
# Proximal indices for non-thumb fingers
|
||||
self.proximal_idxs = [1, 4, 7, 14]
|
||||
# Middle indices for non-thumb fingers
|
||||
self.middle_idxs = [2, 5, 8, 15]
|
||||
# Tip indices for non-thumb fingers
|
||||
self.tip_idxs = [3, 6, 9, 16]
|
||||
# Thumb base (rotates instead of contracting)
|
||||
self.thumb_base_idxs = [10]
|
||||
# Thumb indices (proximal, middle, tip)
|
||||
self.thumb_idxs = [11, 12, 13]
|
||||
# Open positions for all joints
|
||||
self.open_pos = [0, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 1.0, 0.1, 0.1, 0.1, 0.2, 0.3, 0.4]
|
||||
# Closed positions for all joints
|
||||
self.close_pos = [0, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 1.2, 0.5, 0.5, 0.5, 0.8, 0.8, 0.8]
|
||||
|
||||
def _load(self):
|
||||
self.body_id = super()._load()
|
||||
self.set_position(self.start_pos)
|
||||
for jointIndex in range(p.getNumJoints(self.body_id)):
|
||||
# Make masses larger for greater stability
|
||||
# Mass is in kg, friction is coefficient
|
||||
p.changeDynamics(self.body_id, jointIndex, mass=0.2, lateralFriction=1.2)
|
||||
open_pos = self.open_pos[jointIndex]
|
||||
p.resetJointState(self.body_id, jointIndex, open_pos)
|
||||
p.setJointMotorControl2(self.body_id, jointIndex, p.POSITION_CONTROL, targetPosition=open_pos, force=500)
|
||||
# Keep base light for easier hand movement
|
||||
p.changeDynamics(self.body_id, -1, mass=0.05, lateralFriction=0.8)
|
||||
# Only add constraints when we aren't replaying data (otherwise the constraints interfere with data replay)
|
||||
if not self.replayMode:
|
||||
self.movement_cid = p.createConstraint(self.body_id, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0, 0, 0], self.start_pos)
|
||||
|
||||
return self.body_id
|
||||
|
||||
def move_hand(self, trans, rot, maxForce=500):
|
||||
final_rot = multQuatLists(rot, self.base_rot)
|
||||
p.changeConstraint(self.movement_cid, trans, final_rot, maxForce=maxForce)
|
||||
|
||||
# Close frac of 1 indicates fully closed joint, and close frac of 0 indicates fully open joint
|
||||
# Joints move smoothly between their values in self.open_pos and self.close_pos
|
||||
def toggle_finger_state(self, close_frac, maxForce=500):
|
||||
for jointIndex in range(p.getNumJoints(self.body_id)):
|
||||
open_pos = self.open_pos[jointIndex]
|
||||
close_pos = self.close_pos[jointIndex]
|
||||
interp_frac = (close_pos - open_pos) * close_frac
|
||||
target_pos = open_pos + interp_frac
|
||||
p.setJointMotorControl2(self.body_id, jointIndex, p.POSITION_CONTROL, targetPosition=target_pos, force=maxForce)
|
||||
|
||||
class GripperObj(InteractiveObj):
|
||||
"""
|
||||
Represents the gripper used for VR controllers
|
||||
"""
|
||||
|
||||
def __init__(self, scale=1):
|
||||
super().__init__(gripper_path)
|
||||
self.filename = gripper_path
|
||||
self.scale = scale
|
||||
self.max_joint = 0.550569
|
||||
|
||||
def _load(self):
|
||||
self.body_id = super()._load()
|
||||
jointPositions = [0.550569, 0.000000, 0.549657, 0.000000]
|
||||
for jointIndex in range(p.getNumJoints(self.body_id)):
|
||||
joint_info = p.getJointInfo(self.body_id, jointIndex)
|
||||
print("Joint name %s and index %d" % (joint_info[1], joint_info[0]))
|
||||
p.resetJointState(self.body_id, jointIndex, jointPositions[jointIndex])
|
||||
p.setJointMotorControl2(self.body_id, jointIndex, p.POSITION_CONTROL, targetPosition=0, force=0)
|
||||
|
||||
self.cid = p.createConstraint(self.body_id, -1, -1, -1, p.JOINT_FIXED, [0, 0, 0], [0.2, 0, 0], [0.500000, 0.300006, 0.700000])
|
||||
|
||||
return self.body_id
|
||||
|
||||
def set_close_fraction(self, close_fraction):
|
||||
if close_fraction < 0.0 or close_fraction > 1.0:
|
||||
print("Can't set a close_fraction outside the range 0.0 to 1.0!")
|
||||
return
|
||||
|
||||
p.setJointMotorControl2(self.body_id,
|
||||
0,
|
||||
controlMode=p.POSITION_CONTROL,
|
||||
targetPosition=self.max_joint * (1 - close_fraction),
|
||||
force=1.0)
|
||||
p.setJointMotorControl2(self.body_id,
|
||||
2,
|
||||
controlMode=p.POSITION_CONTROL,
|
||||
targetPosition=self.max_joint * (1 - close_fraction),
|
||||
force=1.1)
|
||||
|
||||
def move_gripper(self, trans, rot, maxForce=500):
|
||||
p.changeConstraint(self.cid, trans, rot, maxForce=maxForce)
|
||||
|
||||
class SoftObject(Object):
|
||||
def __init__(self, filename, basePosition=[0, 0, 0], baseOrientation=[0, 0, 0, 1], scale=-1, mass=-1,
|
||||
collisionMargin=-1, useMassSpring=0, useBendingSprings=0, useNeoHookean=0, springElasticStiffness=1,
|
||||
springDampingStiffness=0.1, springBendingStiffness=0.1, NeoHookeanMu=1, NeoHookeanLambda=1,
|
||||
NeoHookeanDamping=0.1, frictionCoeff=0, useFaceContact=0, useSelfCollision=0):
|
||||
super(SoftObject, self).__init__()
|
||||
self.filename = filename
|
||||
self.scale = scale
|
||||
self.basePosition = basePosition
|
||||
self.baseOrientation = baseOrientation
|
||||
self.mass = mass
|
||||
self.collisionMargin = collisionMargin
|
||||
self.useMassSpring = useMassSpring
|
||||
self.useBendingSprings = useBendingSprings
|
||||
self.useNeoHookean = useNeoHookean
|
||||
self.springElasticStiffness = springElasticStiffness
|
||||
self.springDampingStiffness = springDampingStiffness
|
||||
self.springBendingStiffness = springBendingStiffness
|
||||
self.NeoHookeanMu = NeoHookeanMu
|
||||
self.NeoHookeanLambda = NeoHookeanLambda
|
||||
self.NeoHookeanDamping = NeoHookeanDamping
|
||||
self.frictionCoeff = frictionCoeff
|
||||
self.useFaceContact = useFaceContact
|
||||
self.useSelfCollision = useSelfCollision
|
||||
|
||||
def _load(self):
|
||||
body_id = p.loadSoftBody(self.filename, scale=self.scale, basePosition=self.basePosition,
|
||||
baseOrientation=self.baseOrientation, mass=self.mass,
|
||||
collisionMargin=self.collisionMargin, useMassSpring=self.useMassSpring,
|
||||
useBendingSprings=self.useBendingSprings, useNeoHookean=self.useNeoHookean,
|
||||
springElasticStiffness=self.springElasticStiffness,
|
||||
springDampingStiffness=self.springDampingStiffness,
|
||||
springBendingStiffness=self.springBendingStiffness,
|
||||
NeoHookeanMu=self.NeoHookeanMu, NeoHookeanLambda=self.NeoHookeanLambda,
|
||||
NeoHookeanDamping=self.NeoHookeanDamping, frictionCoeff=self.frictionCoeff,
|
||||
useFaceContact=self.useFaceContact, useSelfCollision=self.useSelfCollision)
|
||||
|
||||
# Set signed distance function voxel size (integrate to Simulator class?)
|
||||
p.setPhysicsEngineParameter(sparseSdfVoxelSize=0.1)
|
||||
|
||||
return body_id
|
||||
|
||||
def addAnchor(self, nodeIndex=-1, bodyUniqueId=-1, linkIndex=-1, bodyFramePosition=[0, 0, 0], physicsClientId=0):
|
||||
p.createSoftBodyAnchor(self.body_id, nodeIndex, bodyUniqueId,
|
||||
linkIndex, bodyFramePosition, physicsClientId)
|
||||
|
||||
|
||||
class RBOObject(InteractiveObj):
|
||||
def __init__(self, name, scale=1):
|
||||
filename = os.path.join(gibson2.assets_path, 'models', 'rbo', name, 'configuration',
|
||||
'{}.urdf'.format(name))
|
||||
super(RBOObject, self).__init__(filename, scale)
|
|
@ -0,0 +1,721 @@
|
|||
from gibson2.core.physics.robot_bases import BaseRobot
|
||||
from gibson2.utils.utils import rotate_vector_3d
|
||||
import numpy as np
|
||||
import pybullet as p
|
||||
import os
|
||||
import gym, gym.spaces
|
||||
from transforms3d.euler import euler2quat, euler2mat
|
||||
from transforms3d.quaternions import quat2mat, qmult
|
||||
import transforms3d.quaternions as quat
|
||||
import sys
|
||||
from gibson2.external.pybullet_tools.utils import set_base_values, joint_from_name, set_joint_position, \
|
||||
set_joint_positions, add_data_path, connect, plan_base_motion, plan_joint_motion, enable_gravity, \
|
||||
joint_controller, dump_body, load_model, joints_from_names, user_input, disconnect, get_joint_positions, \
|
||||
get_link_pose, link_from_name, HideOutput, get_pose, wait_for_user, dump_world, plan_nonholonomic_motion, \
|
||||
set_point, create_box, stable_z, control_joints, get_max_limits, get_min_limits, get_base_values
|
||||
|
||||
class LocomotorRobot(BaseRobot):
|
||||
""" Built on top of BaseRobot
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
filename, # robot file name
|
||||
action_dim, # action dimension
|
||||
base_name=None,
|
||||
scale=1.0,
|
||||
control='torque',
|
||||
is_discrete=True,
|
||||
torque_coef=1.0,
|
||||
velocity_coef=1.0,
|
||||
self_collision=False
|
||||
):
|
||||
BaseRobot.__init__(self, filename, base_name, scale, self_collision)
|
||||
self.control = control
|
||||
self.is_discrete = is_discrete
|
||||
|
||||
assert type(action_dim) == int, "Action dimension must be int, got {}".format(type(action_dim))
|
||||
self.action_dim = action_dim
|
||||
|
||||
if self.is_discrete:
|
||||
self.set_up_discrete_action_space()
|
||||
else:
|
||||
self.set_up_continuous_action_space()
|
||||
|
||||
self.torque_coef = torque_coef
|
||||
self.velocity_coef = velocity_coef
|
||||
self.scale = scale
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
"""
|
||||
Each subclass implements their own continuous action spaces
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
"""
|
||||
Each subclass implements their own discrete action spaces
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def robot_specific_reset(self):
|
||||
for j in self.ordered_joints:
|
||||
j.reset_joint_state(0.0, 0.0)
|
||||
|
||||
def get_position(self):
|
||||
'''Get current robot position
|
||||
'''
|
||||
return self.robot_body.get_position()
|
||||
|
||||
def get_orientation(self):
|
||||
'''Return robot orientation
|
||||
:return: quaternion in xyzw
|
||||
'''
|
||||
return self.robot_body.get_orientation()
|
||||
|
||||
def get_rpy(self):
|
||||
return self.robot_body.get_rpy()
|
||||
|
||||
def set_position(self, pos):
|
||||
self.robot_body.set_position(pos)
|
||||
|
||||
def set_orientation(self, orn):
|
||||
self.robot_body.set_orientation(orn)
|
||||
|
||||
def set_position_orientation(self, pos, orn):
|
||||
self.robot_body.set_pose(pos, orn)
|
||||
|
||||
def get_linear_velocity(self):
|
||||
return self.robot_body.get_linear_velocity()
|
||||
|
||||
def get_angular_velocity(self):
|
||||
return self.robot_body.get_angular_velocity()
|
||||
|
||||
def move_by(self, delta):
|
||||
new_pos = np.array(delta) + self.get_position()
|
||||
self.robot_body.reset_position(new_pos)
|
||||
|
||||
def move_forward(self, forward=0.05):
|
||||
x, y, z, w = self.robot_body.get_orientation()
|
||||
self.move_by(quat2mat([w, x, y, z]).dot(np.array([forward, 0, 0])))
|
||||
|
||||
def move_backward(self, backward=0.05):
|
||||
x, y, z, w = self.robot_body.get_orientation()
|
||||
self.move_by(quat2mat([w, x, y, z]).dot(np.array([-backward, 0, 0])))
|
||||
|
||||
def turn_left(self, delta=0.03):
|
||||
orn = self.robot_body.get_orientation()
|
||||
new_orn = qmult((euler2quat(-delta, 0, 0)), orn)
|
||||
self.robot_body.set_orientation(new_orn)
|
||||
|
||||
def turn_right(self, delta=0.03):
|
||||
orn = self.robot_body.get_orientation()
|
||||
new_orn = qmult((euler2quat(delta, 0, 0)), orn)
|
||||
self.robot_body.set_orientation(new_orn)
|
||||
|
||||
def keep_still(self):
|
||||
for n, j in enumerate(self.ordered_joints):
|
||||
j.set_motor_velocity(0.0)
|
||||
|
||||
def apply_robot_action(self, action):
|
||||
if self.control == 'torque':
|
||||
for n, j in enumerate(self.ordered_joints):
|
||||
j.set_motor_torque(self.torque_coef * j.max_torque * float(np.clip(action[n], -1, +1)))
|
||||
elif self.control == 'velocity':
|
||||
for n, j in enumerate(self.ordered_joints):
|
||||
j.set_motor_velocity(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1)))
|
||||
elif self.control == 'position':
|
||||
for n, j in enumerate(self.ordered_joints):
|
||||
j.set_motor_position(action[n])
|
||||
elif self.control == 'differential_drive':
|
||||
# assume self.ordered_joints = [left_wheel, right_wheel]
|
||||
assert action.shape[0] == 2 and len(self.ordered_joints) == 2, 'differential drive requires the first two joints to be two wheels'
|
||||
lin_vel, ang_vel = action
|
||||
if not hasattr(self, 'wheel_axle_half') or not hasattr(self, 'wheel_radius'):
|
||||
raise Exception('Trying to use differential drive, but wheel_axle_half and wheel_radius are not specified.')
|
||||
left_wheel_ang_vel = (lin_vel - ang_vel * self.wheel_axle_half) / self.wheel_radius
|
||||
right_wheel_ang_vel = (lin_vel + ang_vel * self.wheel_axle_half) / self.wheel_radius
|
||||
self.ordered_joints[0].set_motor_velocity(left_wheel_ang_vel)
|
||||
self.ordered_joints[1].set_motor_velocity(right_wheel_ang_vel)
|
||||
elif type(self.control) is list or type(self.control) is tuple:
|
||||
# if control is a tuple, set different control type for each joint
|
||||
|
||||
if 'differential_drive' in self.control:
|
||||
# assume self.ordered_joints = [left_wheel, right_wheel, joint_1, joint_2, ...]
|
||||
assert action.shape[0] >= 2 and len(self.ordered_joints) >= 2, 'differential drive requires the first two joints to be two wheels'
|
||||
assert self.control[0] == self.control[1] == 'differential_drive', 'differential drive requires the first two joints to be two wheels'
|
||||
lin_vel, ang_vel = action[:2]
|
||||
if not hasattr(self, 'wheel_axle_half') or not hasattr(self, 'wheel_radius'):
|
||||
raise Exception('Trying to use differential drive, but wheel_axle_half and wheel_radius are not specified.')
|
||||
left_wheel_ang_vel = (lin_vel - ang_vel * self.wheel_axle_half) / self.wheel_radius
|
||||
right_wheel_ang_vel = (lin_vel + ang_vel * self.wheel_axle_half) / self.wheel_radius
|
||||
self.ordered_joints[0].set_motor_velocity(left_wheel_ang_vel)
|
||||
self.ordered_joints[1].set_motor_velocity(right_wheel_ang_vel)
|
||||
|
||||
for n, j in enumerate(self.ordered_joints):
|
||||
if self.control[n] == 'torque':
|
||||
j.set_motor_torque(self.torque_coef * j.max_torque * float(np.clip(action[n], -1, +1)))
|
||||
elif self.control[n] == 'velocity':
|
||||
j.set_motor_velocity(self.velocity_coef * j.max_velocity * float(np.clip(action[n], -1, +1)))
|
||||
elif self.control[n] == 'position':
|
||||
j.set_motor_position(action[n])
|
||||
else:
|
||||
raise Exception('unknown control type: {}'.format(self.control))
|
||||
|
||||
def policy_action_to_robot_action(self, action):
|
||||
if self.is_discrete:
|
||||
if isinstance(action, (list, np.ndarray)):
|
||||
assert len(action) == 1 and isinstance(action[0], (np.int64, int)), \
|
||||
"discrete action has incorrect format"
|
||||
action = action[0]
|
||||
real_action = self.action_list[action]
|
||||
else:
|
||||
# self.action_space should always be [-1, 1] for policy training
|
||||
action = np.clip(action, self.action_space.low, self.action_space.high)
|
||||
|
||||
# de-normalize action to the appropriate, robot-specific scale
|
||||
real_action = (self.action_high - self.action_low) / 2.0 * action + \
|
||||
(self.action_high + self.action_low) / 2.0
|
||||
return real_action
|
||||
|
||||
def apply_action(self, action):
|
||||
real_action = self.policy_action_to_robot_action(action)
|
||||
self.apply_robot_action(real_action)
|
||||
|
||||
def calc_state(self):
|
||||
j = np.array([j.get_joint_relative_state() for j in self.ordered_joints]).astype(np.float32).flatten()
|
||||
self.joint_position = j[0::3]
|
||||
self.joint_velocity = j[1::3]
|
||||
self.joint_torque = j[2::3]
|
||||
self.joint_at_limit = np.count_nonzero(np.abs(self.joint_position) > 0.99)
|
||||
|
||||
pos = self.get_position()
|
||||
rpy = self.get_rpy()
|
||||
|
||||
# rotate linear and angular velocities to local frame
|
||||
lin_vel = rotate_vector_3d(self.get_linear_velocity(), *rpy)
|
||||
ang_vel = rotate_vector_3d(self.get_angular_velocity(), *rpy)
|
||||
|
||||
state = np.concatenate([pos, rpy, lin_vel, ang_vel, j])
|
||||
return state
|
||||
|
||||
|
||||
class Ant(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.torque = config.get("torque", 1.0)
|
||||
LocomotorRobot.__init__(
|
||||
self,
|
||||
"ant/ant.xml",
|
||||
action_dim=8,
|
||||
torque_coef=2.5,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="torque",
|
||||
)
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
self.action_high = self.torque * np.ones([self.action_dim])
|
||||
self.action_low = -self.action_high
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
assert False, "Ant does not support discrete actions"
|
||||
|
||||
|
||||
class Humanoid(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.torque = config.get("torque", 0.1)
|
||||
self.glass_id = None
|
||||
self.glass_offset = 0.3
|
||||
LocomotorRobot.__init__(
|
||||
self,
|
||||
"humanoid/humanoid.xml",
|
||||
action_dim=17,
|
||||
torque_coef=0.41,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="torque",
|
||||
self_collision=True,
|
||||
)
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
self.action_high = self.torque * np.ones([self.action_dim])
|
||||
self.action_low = -self.action_high
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
self.action_list = [[self.torque] * self.action_dim, [0.0] * self.action_dim]
|
||||
self.action_space = gym.spaces.Discrete(len(self.action_list))
|
||||
self.setup_keys_to_action()
|
||||
|
||||
def robot_specific_reset(self):
|
||||
humanoidId = -1
|
||||
numBodies = p.getNumBodies()
|
||||
for i in range(numBodies):
|
||||
bodyInfo = p.getBodyInfo(i)
|
||||
if bodyInfo[1].decode("ascii") == 'humanoid':
|
||||
humanoidId = i
|
||||
|
||||
## Spherical radiance/glass shield to protect the robot's camera
|
||||
super(Humanoid, self).robot_specific_reset()
|
||||
|
||||
if self.glass_id is None:
|
||||
glass_path = os.path.join(self.physics_model_dir, "humanoid/glass.xml")
|
||||
glass_id = p.loadMJCF(glass_path)[0]
|
||||
self.glass_id = glass_id
|
||||
p.changeVisualShape(self.glass_id, -1, rgbaColor=[0, 0, 0, 0])
|
||||
p.createMultiBody(baseVisualShapeIndex=glass_id, baseCollisionShapeIndex=-1)
|
||||
cid = p.createConstraint(humanoidId,
|
||||
-1,
|
||||
self.glass_id,
|
||||
-1,
|
||||
p.JOINT_FIXED,
|
||||
jointAxis=[0, 0, 0],
|
||||
parentFramePosition=[0, 0, self.glass_offset],
|
||||
childFramePosition=[0, 0, 0])
|
||||
|
||||
robot_pos = list(self.get_position())
|
||||
robot_pos[2] += self.glass_offset
|
||||
robot_orn = self.get_orientation()
|
||||
p.resetBasePositionAndOrientation(self.glass_id, robot_pos, robot_orn)
|
||||
|
||||
self.motor_names = ["abdomen_z", "abdomen_y", "abdomen_x"]
|
||||
self.motor_power = [100, 100, 100]
|
||||
self.motor_names += ["right_hip_x", "right_hip_z", "right_hip_y", "right_knee"]
|
||||
self.motor_power += [100, 100, 300, 200]
|
||||
self.motor_names += ["left_hip_x", "left_hip_z", "left_hip_y", "left_knee"]
|
||||
self.motor_power += [100, 100, 300, 200]
|
||||
self.motor_names += ["right_shoulder1", "right_shoulder2", "right_elbow"]
|
||||
self.motor_power += [75, 75, 75]
|
||||
self.motor_names += ["left_shoulder1", "left_shoulder2", "left_elbow"]
|
||||
self.motor_power += [75, 75, 75]
|
||||
self.motors = [self.jdict[n] for n in self.motor_names]
|
||||
|
||||
def apply_action(self, action):
|
||||
real_action = self.policy_action_to_robot_action(action)
|
||||
if self.is_discrete:
|
||||
self.apply_robot_action(real_action)
|
||||
else:
|
||||
for i, m, joint_torque_coef in zip(range(17), self.motors, self.motor_power):
|
||||
m.set_motor_torque(float(joint_torque_coef * self.torque_coef * real_action[i]))
|
||||
|
||||
def setup_keys_to_action(self):
|
||||
self.keys_to_action = {(ord('w'),): 0, (): 1}
|
||||
|
||||
|
||||
class Husky(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.torque = config.get("torque", 0.03)
|
||||
LocomotorRobot.__init__(self,
|
||||
"husky/husky.urdf",
|
||||
action_dim=4,
|
||||
torque_coef=2.5,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="torque")
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
self.action_high = self.torque * np.ones([self.action_dim])
|
||||
self.action_low = -self.action_high
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
self.action_list = [[self.torque, self.torque, self.torque, self.torque],
|
||||
[-self.torque, -self.torque, -self.torque, -self.torque],
|
||||
[self.torque, -self.torque, self.torque, -self.torque],
|
||||
[-self.torque, self.torque, -self.torque, self.torque], [0, 0, 0, 0]]
|
||||
self.action_space = gym.spaces.Discrete(len(self.action_list))
|
||||
self.setup_keys_to_action()
|
||||
|
||||
def steering_cost(self, action):
|
||||
if not self.is_discrete:
|
||||
return 0
|
||||
if action == 2 or action == 3:
|
||||
return -0.1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def alive_bonus(self, z, pitch):
|
||||
top_xyz = self.parts["top_bumper_link"].get_position()
|
||||
bottom_xyz = self.parts["base_link"].get_position()
|
||||
alive = top_xyz[2] > bottom_xyz[2]
|
||||
return +1 if alive else -100 # 0.25 is central sphere rad, die if it scrapes the ground
|
||||
|
||||
def setup_keys_to_action(self):
|
||||
self.keys_to_action = {
|
||||
(ord('w'),): 0, ## forward
|
||||
(ord('s'),): 1, ## backward
|
||||
(ord('d'),): 2, ## turn right
|
||||
(ord('a'),): 3, ## turn left
|
||||
(): 4
|
||||
}
|
||||
|
||||
|
||||
class Quadrotor(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.torque = config.get("torque", 0.02)
|
||||
LocomotorRobot.__init__(self,
|
||||
"quadrotor/quadrotor.urdf",
|
||||
action_dim=6,
|
||||
torque_coef=2.5,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="torque")
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
self.action_high = self.torque * np.ones([self.action_dim])
|
||||
self.action_low = -self.action_high
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
self.action_list = [[self.torque, 0, 0, 0, 0, 0], [-self.torque, 0, 0, 0, 0, 0],
|
||||
[0, self.torque, 0, 0, 0, 0], [0, -self.torque, 0, 0, 0, 0],
|
||||
[0, 0, self.torque, 0, 0, 0], [0, 0, -self.torque, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0, 0]]
|
||||
self.action_space = gym.spaces.Discrete(len(self.action_list))
|
||||
self.setup_keys_to_action()
|
||||
|
||||
def apply_action(self, action):
|
||||
real_action = self.policy_action_to_robot_action(action)
|
||||
p.setGravity(0, 0, 0)
|
||||
p.resetBaseVelocity(self.robot_ids[0], real_action[:3], real_action[3:])
|
||||
|
||||
def setup_keys_to_action(self):
|
||||
self.keys_to_action = {
|
||||
(ord('w'),): 0, ## +x
|
||||
(ord('s'),): 1, ## -x
|
||||
(ord('d'),): 2, ## +y
|
||||
(ord('a'),): 3, ## -y
|
||||
(ord('z'),): 4, ## +z
|
||||
(ord('x'),): 5, ## -z
|
||||
(): 6
|
||||
}
|
||||
|
||||
|
||||
class Turtlebot(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.velocity = config.get("velocity", 1.0)
|
||||
LocomotorRobot.__init__(self,
|
||||
"turtlebot/turtlebot.urdf",
|
||||
action_dim=2,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="velocity")
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
self.action_high = np.full(shape=self.action_dim, fill_value=self.velocity)
|
||||
self.action_low = -self.action_high
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
self.action_list = [[self.velocity, self.velocity], [-self.velocity, -self.velocity],
|
||||
[self.velocity * 0.5, -self.velocity * 0.5],
|
||||
[-self.velocity * 0.5, self.velocity * 0.5], [0, 0]]
|
||||
self.action_space = gym.spaces.Discrete(len(self.action_list))
|
||||
self.setup_keys_to_action()
|
||||
|
||||
def setup_keys_to_action(self):
|
||||
self.keys_to_action = {
|
||||
(ord('w'),): 0, # forward
|
||||
(ord('s'),): 1, # backward
|
||||
(ord('d'),): 2, # turn right
|
||||
(ord('a'),): 3, # turn left
|
||||
(): 4 # stay still
|
||||
}
|
||||
|
||||
|
||||
class Freight(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.velocity = config.get("velocity", 1.0)
|
||||
LocomotorRobot.__init__(self,
|
||||
"fetch/freight.urdf",
|
||||
action_dim=2,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="velocity")
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
self.action_high = self.velocity * np.ones([self.action_dim])
|
||||
self.action_low = -self.action_high
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
self.action_list = [[self.velocity, self.velocity], [-self.velocity, -self.velocity],
|
||||
[self.velocity * 0.5, -self.velocity * 0.5],
|
||||
[-self.velocity * 0.5, self.velocity * 0.5], [0, 0]]
|
||||
self.action_space = gym.spaces.Discrete(len(self.action_list))
|
||||
self.setup_keys_to_action()
|
||||
|
||||
def setup_keys_to_action(self):
|
||||
self.keys_to_action = {
|
||||
(ord('w'),): 0, # forward
|
||||
(ord('s'),): 1, # backward
|
||||
(ord('d'),): 2, # turn right
|
||||
(ord('a'),): 3, # turn left
|
||||
(): 4 # stay still
|
||||
}
|
||||
|
||||
|
||||
class Fetch(LocomotorRobot):
|
||||
def __init__(self, config, vr_mode=False):
|
||||
self.config = config
|
||||
self.wheel_velocity = config.get('wheel_velocity', 1.0)
|
||||
self.torso_lift_velocity = config.get('torso_lift_velocity', 1.0)
|
||||
self.arm_velocity = config.get('arm_velocity', 1.0)
|
||||
self.wheel_dim = 2
|
||||
self.torso_lift_dim = 1
|
||||
self.arm_dim = 7
|
||||
self.urdf_filename = "fetch/fetch.urdf" if not vr_mode else "fetch/fetch_vr.urdf"
|
||||
LocomotorRobot.__init__(self,
|
||||
self.urdf_filename,
|
||||
action_dim=self.wheel_dim + self.torso_lift_dim + self.arm_dim,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="velocity",
|
||||
self_collision=True)
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_high = np.array([self.wheel_velocity] * self.wheel_dim +
|
||||
[self.torso_lift_velocity] * self.torso_lift_dim +
|
||||
[self.arm_velocity] * self.arm_dim)
|
||||
self.action_low = -self.action_high
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
assert False, "Fetch does not support discrete actions"
|
||||
|
||||
def robot_specific_reset(self):
|
||||
super(Fetch, self).robot_specific_reset()
|
||||
|
||||
# roll the arm to its body
|
||||
robot_id = self.robot_ids[0]
|
||||
arm_joints = joints_from_names(robot_id,
|
||||
[
|
||||
'torso_lift_joint',
|
||||
'shoulder_pan_joint',
|
||||
'shoulder_lift_joint',
|
||||
'upperarm_roll_joint',
|
||||
'elbow_flex_joint',
|
||||
'forearm_roll_joint',
|
||||
'wrist_flex_joint',
|
||||
'wrist_roll_joint'
|
||||
])
|
||||
|
||||
rest_position = (0.02, np.pi / 2.0 - 0.4, np.pi / 2.0 - 0.1, -0.4, np.pi / 2.0 + 0.1, 0.0, np.pi / 2.0, 0.0)
|
||||
# might be a better pose to initiate manipulation
|
||||
# rest_position = (0.30322468280792236, -1.414019864768982,
|
||||
# 1.5178184935241699, 0.8189625336474915,
|
||||
# 2.200358942909668, 2.9631312579803466,
|
||||
# -1.2862852996643066, 0.0008453550418615341)
|
||||
|
||||
set_joint_positions(robot_id, arm_joints, rest_position)
|
||||
|
||||
def get_end_effector_position(self):
|
||||
return self.parts['gripper_link'].get_position()
|
||||
|
||||
# Return body id of fetch robot
|
||||
def get_fetch_body_id(self):
|
||||
return self.robot_body.bodies[self.robot_body.body_index]
|
||||
|
||||
# Set open/close fraction of the end grippers
|
||||
def set_fetch_gripper_fraction(self, frac, maxForce=500):
|
||||
min_joint = 0.0
|
||||
max_joint = 0.05
|
||||
right_finger_joint_idx = 20
|
||||
left_finger_joint_idx = 21
|
||||
# TODO: Set more friction on grippers using p.changeDynamics?
|
||||
# min_joint + frac * (max_joint - min_joint)
|
||||
target_pos = 0.05
|
||||
p.setJointMotorControl2(self.get_fetch_body_id(),
|
||||
right_finger_joint_idx,
|
||||
p.POSITION_CONTROL,
|
||||
targetPosition=target_pos,
|
||||
force=maxForce)
|
||||
|
||||
p.setJointMotorControl2(self.get_fetch_body_id(),
|
||||
left_finger_joint_idx,
|
||||
p.POSITION_CONTROL,
|
||||
targetPosition=target_pos,
|
||||
force=maxForce)
|
||||
|
||||
|
||||
def load(self):
|
||||
ids = super(Fetch, self).load()
|
||||
robot_id = self.robot_ids[0]
|
||||
|
||||
# disable collision between torso_lift_joint and shoulder_lift_joint
|
||||
# between torso_lift_joint and torso_fixed_joint
|
||||
# between caster_wheel_joint and estop_joint
|
||||
# between caster_wheel_joint and laser_joint
|
||||
# between caster_wheel_joint and torso_fixed_joint
|
||||
# between caster_wheel_joint and l_wheel_joint
|
||||
# between caster_wheel_joint and r_wheel_joint
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 3, 13, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 3, 22, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 20, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 21, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 22, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 1, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 2, 0)
|
||||
|
||||
return ids
|
||||
|
||||
|
||||
class JR2(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.velocity = config.get('velocity', 1.0)
|
||||
LocomotorRobot.__init__(self,
|
||||
"jr2_urdf/jr2.urdf",
|
||||
action_dim=4,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", True),
|
||||
control='velocity')
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
self.action_high = self.velocity * np.ones([self.action_dim])
|
||||
self.action_low = -self.action_high
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
self.action_list = [[self.velocity, self.velocity, 0, self.velocity],
|
||||
[-self.velocity, -self.velocity, 0, -self.velocity],
|
||||
[self.velocity, -self.velocity, -self.velocity, 0],
|
||||
[-self.velocity, self.velocity, self.velocity, 0], [0, 0, 0, 0]]
|
||||
self.action_space = gym.spaces.Discrete(len(self.action_list))
|
||||
self.setup_keys_to_action()
|
||||
|
||||
def setup_keys_to_action(self):
|
||||
self.keys_to_action = {
|
||||
(ord('w'),): 0, ## forward
|
||||
(ord('s'),): 1, ## backward
|
||||
(ord('d'),): 2, ## turn right
|
||||
(ord('a'),): 3, ## turn left
|
||||
(): 4
|
||||
}
|
||||
|
||||
|
||||
class JR2_Kinova(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.wheel_velocity = config.get('wheel_velocity', 0.3)
|
||||
self.wheel_dim = 2
|
||||
self.arm_velocity = config.get('arm_velocity', 1.0)
|
||||
self.arm_dim = 5
|
||||
|
||||
LocomotorRobot.__init__(self,
|
||||
"jr2_urdf/jr2_kinova.urdf",
|
||||
action_dim=self.wheel_dim + self.arm_dim,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control='velocity',
|
||||
self_collision=True)
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_high = np.array([self.wheel_velocity] * self.wheel_dim + [self.arm_velocity] * self.arm_dim)
|
||||
self.action_low = -self.action_high
|
||||
self.action_space = gym.spaces.Box(shape=(self.wheel_dim + self.arm_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
assert False, "JR2_Kinova does not support discrete actions"
|
||||
|
||||
def get_end_effector_position(self):
|
||||
return self.parts['m1n6s200_end_effector'].get_position()
|
||||
|
||||
# initialize JR's arm to almost the same height as the door handle to ease exploration
|
||||
def robot_specific_reset(self):
|
||||
super(JR2_Kinova, self).robot_specific_reset()
|
||||
self.ordered_joints[2].reset_joint_state(-np.pi / 2.0, 0.0)
|
||||
self.ordered_joints[3].reset_joint_state(np.pi / 2.0, 0.0)
|
||||
self.ordered_joints[4].reset_joint_state(np.pi / 2.0, 0.0)
|
||||
self.ordered_joints[5].reset_joint_state(np.pi / 2.0, 0.0)
|
||||
self.ordered_joints[6].reset_joint_state(0.0, 0.0)
|
||||
|
||||
def load(self):
|
||||
ids = super(JR2_Kinova, self).load()
|
||||
robot_id = self.robot_ids[0]
|
||||
|
||||
# disable collision between base_chassis_joint and pan_joint
|
||||
# between base_chassis_joint and tilt_joint
|
||||
# between base_chassis_joint and camera_joint
|
||||
# between jr2_fixed_body_joint and pan_joint
|
||||
# between jr2_fixed_body_joint and tilt_joint
|
||||
# between jr2_fixed_body_joint and camera_joint
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 17, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 18, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 0, 19, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 1, 17, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 1, 18, 0)
|
||||
p.setCollisionFilterPair(robot_id, robot_id, 1, 19, 0)
|
||||
|
||||
return ids
|
||||
|
||||
|
||||
class Locobot(LocomotorRobot):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
# https://www.trossenrobotics.com/locobot-pyrobot-ros-rover.aspx
|
||||
# Maximum translational velocity: 70 cm/s
|
||||
# Maximum rotational velocity: 180 deg/s (>110 deg/s gyro performance will degrade)
|
||||
self.linear_velocity = config.get('linear_velocity', 0.5)
|
||||
self.angular_velocity = config.get('angular_velocity', np.pi / 2.0)
|
||||
self.wheel_dim = 2
|
||||
self.wheel_axle_half = 0.115 # half of the distance between the wheels
|
||||
self.wheel_radius = 0.038 # radius of the wheels
|
||||
LocomotorRobot.__init__(self,
|
||||
"locobot/locobot.urdf",
|
||||
base_name="base_link",
|
||||
action_dim=self.wheel_dim,
|
||||
scale=config.get("robot_scale", 1.0),
|
||||
is_discrete=config.get("is_discrete", False),
|
||||
control="differential_drive")
|
||||
|
||||
def set_up_continuous_action_space(self):
|
||||
self.action_high = np.zeros(self.wheel_dim)
|
||||
self.action_high[0] = self.linear_velocity
|
||||
self.action_high[1] = self.angular_velocity
|
||||
self.action_low = -self.action_high
|
||||
self.action_space = gym.spaces.Box(shape=(self.action_dim,),
|
||||
low=-1.0,
|
||||
high=1.0,
|
||||
dtype=np.float32)
|
||||
|
||||
def set_up_discrete_action_space(self):
|
||||
assert False, "Locobot does not support discrete actions"
|
||||
|
||||
def get_end_effector_position(self):
|
||||
return self.parts['gripper_link'].get_position()
|
||||
|
|
@ -0,0 +1,616 @@
|
|||
import time
|
||||
import math
|
||||
import gibson2
|
||||
import xml.etree.ElementTree as ET
|
||||
import logging
|
||||
import pickle
|
||||
import networkx as nx
|
||||
import cv2
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
from gibson2.core.physics.interactive_objects import InteractiveObj, URDFObject
|
||||
from gibson2.utils.utils import l2_distance, get_transform_from_xyz_rpy, quatXYZWFromRotMat, get_rpy_from_transform
|
||||
from gibson2.utils.assets_utils import get_model_path, get_texture_file, get_ig_scene_path
|
||||
import pybullet_data
|
||||
import pybullet as p
|
||||
import os
|
||||
import inspect
|
||||
from gibson2.utils.urdf_utils import save_urdfs_without_floating_joints
|
||||
from gibson2.utils.utils import rotate_vector_2d
|
||||
from gibson2.external.pybullet_tools.utils import get_center_extent
|
||||
from IPython import embed
|
||||
import json
|
||||
|
||||
currentdir = os.path.dirname(os.path.abspath(
|
||||
inspect.getfile(inspect.currentframe())))
|
||||
parentdir = os.path.dirname(currentdir)
|
||||
os.sys.path.insert(0, parentdir)
|
||||
|
||||
|
||||
class Scene:
|
||||
def __init__(self):
|
||||
self.is_interactive = False
|
||||
|
||||
def load(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class EmptyScene(Scene):
|
||||
"""
|
||||
A empty scene for debugging
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def load(self):
|
||||
self.build_graph = False
|
||||
self.is_interactive = False
|
||||
planeName = os.path.join(
|
||||
pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
|
||||
self.ground = p.loadMJCF(planeName)[0]
|
||||
p.changeDynamics(self.ground, -1, lateralFriction=1)
|
||||
# white floor plane for visualization purpose if needed
|
||||
p.changeVisualShape(self.ground, -1, rgbaColor=[1, 1, 1, 1])
|
||||
return [self.ground]
|
||||
|
||||
|
||||
class StadiumScene(Scene):
|
||||
"""
|
||||
A simple stadium scene for debugging
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def load(self):
|
||||
self.build_graph = False
|
||||
self.is_interactive = False
|
||||
filename = os.path.join(
|
||||
pybullet_data.getDataPath(), "stadium_no_collision.sdf")
|
||||
self.stadium = p.loadSDF(filename)
|
||||
planeName = os.path.join(
|
||||
pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
|
||||
self.ground = p.loadMJCF(planeName)[0]
|
||||
pos, orn = p.getBasePositionAndOrientation(self.ground)
|
||||
p.resetBasePositionAndOrientation(
|
||||
self.ground, [pos[0], pos[1], pos[2] - 0.005], orn)
|
||||
p.changeVisualShape(self.ground, -1, rgbaColor=[1, 1, 1, 0.5])
|
||||
return list(self.stadium) + [self.ground]
|
||||
|
||||
def get_random_floor(self):
|
||||
return 0
|
||||
|
||||
def get_random_point(self, random_height=False):
|
||||
return self.get_random_point_floor(0, random_height)
|
||||
|
||||
def get_random_point_floor(self, floor, random_height=False):
|
||||
del floor
|
||||
return 0, np.array([
|
||||
np.random.uniform(-5, 5),
|
||||
np.random.uniform(-5, 5),
|
||||
np.random.uniform(0.4, 0.8) if random_height else 0.0
|
||||
])
|
||||
|
||||
def get_floor_height(self, floor):
|
||||
del floor
|
||||
return 0.0
|
||||
|
||||
def get_shortest_path(self, floor, source_world, target_world, entire_path=False):
|
||||
logging.warning(
|
||||
'WARNING: trying to compute the shortest path in StadiumScene (assuming empty space)')
|
||||
shortest_path = np.stack((source_world, target_world))
|
||||
geodesic_distance = l2_distance(source_world, target_world)
|
||||
return shortest_path, geodesic_distance
|
||||
|
||||
def reset_floor(self, floor=0, additional_elevation=0.05, height=None):
|
||||
return
|
||||
|
||||
|
||||
class BuildingScene(Scene):
|
||||
"""
|
||||
Gibson Environment building scenes
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
model_id,
|
||||
trav_map_resolution=0.1,
|
||||
trav_map_erosion=2,
|
||||
build_graph=True,
|
||||
is_interactive=False,
|
||||
num_waypoints=10,
|
||||
waypoint_resolution=0.2,
|
||||
pybullet_load_texture=False,
|
||||
):
|
||||
"""
|
||||
Load a building scene and compute traversability
|
||||
|
||||
:param model_id: Scene id
|
||||
:param trav_map_resolution: traversability map resolution
|
||||
:param trav_map_erosion: erosion radius of traversability areas, should be robot footprint radius
|
||||
:param build_graph: build connectivity graph
|
||||
:param is_interactive: whether the scene is interactive. If so, we will replace the annotated objects with the corresponding CAD models and add floor planes with the original floor texture.
|
||||
:param num_waypoints: number of way points returned
|
||||
:param waypoint_resolution: resolution of adjacent way points
|
||||
:param pybullet_load_texture: whether to load texture into pybullet. This is for debugging purpose only and does not affect what the robots see
|
||||
"""
|
||||
super().__init__()
|
||||
logging.info("Building scene: {}".format(model_id))
|
||||
self.model_id = model_id
|
||||
self.trav_map_default_resolution = 0.01 # each pixel represents 0.01m
|
||||
self.trav_map_resolution = trav_map_resolution
|
||||
self.trav_map_original_size = None
|
||||
self.trav_map_size = None
|
||||
self.trav_map_erosion = trav_map_erosion
|
||||
self.build_graph = build_graph
|
||||
self.is_interactive = is_interactive
|
||||
self.num_waypoints = num_waypoints
|
||||
self.waypoint_interval = int(waypoint_resolution / trav_map_resolution)
|
||||
self.mesh_body_id = None
|
||||
self.floor_body_ids = []
|
||||
self.pybullet_load_texture = pybullet_load_texture
|
||||
self.sleep=False
|
||||
|
||||
def load_floor_metadata(self):
|
||||
"""
|
||||
Load floor metadata
|
||||
"""
|
||||
floor_height_path = os.path.join(
|
||||
get_model_path(self.model_id), 'floors.txt')
|
||||
if not os.path.isfile(floor_height_path):
|
||||
raise Exception(
|
||||
'floors.txt cannot be found in model: {}'.format(self.model_id))
|
||||
with open(floor_height_path, 'r') as f:
|
||||
self.floors = sorted(list(map(float, f.readlines())))
|
||||
logging.debug('Floors {}'.format(self.floors))
|
||||
|
||||
def load_scene_mesh(self):
|
||||
"""
|
||||
Load scene mesh
|
||||
"""
|
||||
if self.is_interactive:
|
||||
filename = os.path.join(get_model_path(
|
||||
self.model_id), "mesh_z_up_cleaned.obj")
|
||||
else:
|
||||
filename = os.path.join(get_model_path(
|
||||
self.model_id), "mesh_z_up_downsampled.obj")
|
||||
if not os.path.isfile(filename):
|
||||
filename = os.path.join(get_model_path(
|
||||
self.model_id), "mesh_z_up.obj")
|
||||
|
||||
collision_id = p.createCollisionShape(p.GEOM_MESH,
|
||||
fileName=filename,
|
||||
flags=p.GEOM_FORCE_CONCAVE_TRIMESH)
|
||||
if self.pybullet_load_texture:
|
||||
visual_id = p.createVisualShape(p.GEOM_MESH,
|
||||
fileName=filename)
|
||||
texture_filename = get_texture_file(filename)
|
||||
if texture_filename is not None:
|
||||
texture_id = p.loadTexture(texture_filename)
|
||||
else:
|
||||
texture_id = -1
|
||||
else:
|
||||
visual_id = -1
|
||||
texture_id = -1
|
||||
|
||||
self.mesh_body_id = p.createMultiBody(baseCollisionShapeIndex=collision_id,
|
||||
baseVisualShapeIndex=visual_id,
|
||||
useMaximalCoordinates=True)
|
||||
p.changeDynamics(self.mesh_body_id, -1, lateralFriction=1)
|
||||
|
||||
if self.pybullet_load_texture:
|
||||
if texture_id != -1:
|
||||
p.changeVisualShape(self.mesh_body_id,
|
||||
-1,
|
||||
textureUniqueId=texture_id)
|
||||
|
||||
def load_floor_planes(self):
|
||||
if self.is_interactive:
|
||||
for f in range(len(self.floors)):
|
||||
# load the floor plane with the original floor texture for each floor
|
||||
plane_name = os.path.join(get_model_path(
|
||||
self.model_id), "plane_z_up_{}.obj".format(f))
|
||||
collision_id = p.createCollisionShape(p.GEOM_MESH,
|
||||
fileName=plane_name)
|
||||
visual_id = p.createVisualShape(p.GEOM_MESH,
|
||||
fileName=plane_name)
|
||||
texture_filename = get_texture_file(plane_name)
|
||||
if texture_filename is not None:
|
||||
texture_id = p.loadTexture(texture_filename)
|
||||
else:
|
||||
texture_id = -1
|
||||
floor_body_id = p.createMultiBody(baseCollisionShapeIndex=collision_id,
|
||||
baseVisualShapeIndex=visual_id,
|
||||
useMaximalCoordinates=True)
|
||||
if texture_id != -1:
|
||||
p.changeVisualShape(floor_body_id,
|
||||
-1,
|
||||
textureUniqueId=texture_id)
|
||||
floor_height = self.floors[f]
|
||||
p.resetBasePositionAndOrientation(floor_body_id,
|
||||
posObj=[0, 0, floor_height],
|
||||
ornObj=[0, 0, 0, 1])
|
||||
|
||||
# Since both the floor plane and the scene mesh have mass 0 (static),
|
||||
# PyBullet seems to have already disabled collision between them.
|
||||
# Just to be safe, explicit disable collision between them.
|
||||
p.setCollisionFilterPair(
|
||||
self.mesh_body_id, floor_body_id, -1, -1, enableCollision=0)
|
||||
|
||||
self.floor_body_ids.append(floor_body_id)
|
||||
else:
|
||||
# load the default floor plane (only once) and later reset it to different floor heiights
|
||||
plane_name = os.path.join(
|
||||
pybullet_data.getDataPath(), "mjcf/ground_plane.xml")
|
||||
floor_body_id = p.loadMJCF(plane_name)[0]
|
||||
p.resetBasePositionAndOrientation(floor_body_id,
|
||||
posObj=[0, 0, 0],
|
||||
ornObj=[0, 0, 0, 1])
|
||||
p.setCollisionFilterPair(
|
||||
self.mesh_body_id, floor_body_id, -1, -1, enableCollision=0)
|
||||
self.floor_body_ids.append(floor_body_id)
|
||||
|
||||
def load_trav_map(self):
|
||||
self.floor_map = []
|
||||
self.floor_graph = []
|
||||
for f in range(len(self.floors)):
|
||||
trav_map = np.array(Image.open(
|
||||
os.path.join(get_model_path(self.model_id),
|
||||
'floor_trav_{}.png'.format(f))
|
||||
))
|
||||
obstacle_map = np.array(Image.open(
|
||||
os.path.join(get_model_path(self.model_id),
|
||||
'floor_{}.png'.format(f))
|
||||
))
|
||||
if self.trav_map_original_size is None:
|
||||
height, width = trav_map.shape
|
||||
assert height == width, 'trav map is not a square'
|
||||
self.trav_map_original_size = height
|
||||
self.trav_map_size = int(self.trav_map_original_size *
|
||||
self.trav_map_default_resolution /
|
||||
self.trav_map_resolution)
|
||||
trav_map[obstacle_map == 0] = 0
|
||||
trav_map = cv2.resize(
|
||||
trav_map, (self.trav_map_size, self.trav_map_size))
|
||||
trav_map = cv2.erode(trav_map, np.ones(
|
||||
(self.trav_map_erosion, self.trav_map_erosion)))
|
||||
trav_map[trav_map < 255] = 0
|
||||
|
||||
if self.build_graph:
|
||||
graph_file = os.path.join(get_model_path(
|
||||
self.model_id), 'floor_trav_{}.p'.format(f))
|
||||
if os.path.isfile(graph_file):
|
||||
logging.info("Loading traversable graph")
|
||||
with open(graph_file, 'rb') as pfile:
|
||||
g = pickle.load(pfile)
|
||||
else:
|
||||
logging.info("Building traversable graph")
|
||||
g = nx.Graph()
|
||||
for i in range(self.trav_map_size):
|
||||
for j in range(self.trav_map_size):
|
||||
if trav_map[i, j] > 0:
|
||||
g.add_node((i, j))
|
||||
# 8-connected graph
|
||||
neighbors = [
|
||||
(i - 1, j - 1), (i, j - 1), (i + 1, j - 1), (i - 1, j)]
|
||||
for n in neighbors:
|
||||
if 0 <= n[0] < self.trav_map_size and 0 <= n[1] < self.trav_map_size and trav_map[n[0], n[1]] > 0:
|
||||
g.add_edge(
|
||||
n, (i, j), weight=l2_distance(n, (i, j)))
|
||||
|
||||
# only take the largest connected component
|
||||
largest_cc = max(nx.connected_components(g), key=len)
|
||||
g = g.subgraph(largest_cc).copy()
|
||||
with open(graph_file, 'wb') as pfile:
|
||||
pickle.dump(g, pfile, protocol=pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
self.floor_graph.append(g)
|
||||
# update trav_map accordingly
|
||||
trav_map[:, :] = 0
|
||||
for node in g.nodes:
|
||||
trav_map[node[0], node[1]] = 255
|
||||
|
||||
self.floor_map.append(trav_map)
|
||||
|
||||
def load_scene_objects(self):
|
||||
if not self.is_interactive:
|
||||
return
|
||||
|
||||
self.scene_objects = []
|
||||
self.scene_objects_pos = []
|
||||
scene_path = get_model_path(self.model_id)
|
||||
urdf_files = [item for item in os.listdir(
|
||||
scene_path) if item[-4:] == 'urdf' and item != 'scene.urdf']
|
||||
position_files = [
|
||||
item[:-4].replace('alignment_centered', 'pos') + 'txt' for item in urdf_files]
|
||||
for urdf_file, position_file in zip(urdf_files, position_files):
|
||||
logging.info('Loading urdf file {}'.format(urdf_file))
|
||||
with open(os.path.join(scene_path, position_file)) as f:
|
||||
pos = np.array([float(item)
|
||||
for item in f.readlines()[0].strip().split()])
|
||||
obj = InteractiveObj(os.path.join(scene_path, urdf_file))
|
||||
obj.load()
|
||||
if self.sleep:
|
||||
activationState = p.ACTIVATION_STATE_ENABLE_SLEEPING#+p.ACTIVATION_STATE_SLEEP
|
||||
p.changeDynamics(obj.body_id, -1, activationState=activationState)
|
||||
|
||||
self.scene_objects.append(obj)
|
||||
self.scene_objects_pos.append(pos)
|
||||
|
||||
def load_scene_urdf(self):
|
||||
self.mesh_body_id = p.loadURDF(os.path.join(
|
||||
get_model_path(self.model_id), 'scene.urdf'))
|
||||
|
||||
def has_scene_urdf(self):
|
||||
return os.path.exists(os.path.join(get_model_path(self.model_id), 'scene.urdf'))
|
||||
|
||||
def load(self):
|
||||
"""
|
||||
Initialize scene
|
||||
"""
|
||||
self.load_floor_metadata()
|
||||
if self.has_scene_urdf():
|
||||
self.load_scene_urdf()
|
||||
else:
|
||||
self.load_scene_mesh()
|
||||
#self.load_floor_planes()
|
||||
|
||||
self.load_trav_map()
|
||||
self.load_scene_objects()
|
||||
self.reset_scene_objects()
|
||||
|
||||
return [self.mesh_body_id] + self.floor_body_ids
|
||||
|
||||
def get_random_floor(self):
|
||||
return np.random.randint(0, high=len(self.floors))
|
||||
|
||||
def get_random_point(self, random_height=False):
|
||||
floor = self.get_random_floor()
|
||||
return self.get_random_point_floor(floor, random_height)
|
||||
|
||||
def get_random_point_floor(self, floor, random_height=False):
|
||||
trav = self.floor_map[floor]
|
||||
trav_space = np.where(trav == 255)
|
||||
idx = np.random.randint(0, high=trav_space[0].shape[0])
|
||||
xy_map = np.array([trav_space[0][idx], trav_space[1][idx]])
|
||||
x, y = self.map_to_world(xy_map)
|
||||
z = self.floors[floor]
|
||||
if random_height:
|
||||
z += np.random.uniform(0.4, 0.8)
|
||||
return floor, np.array([x, y, z])
|
||||
|
||||
def map_to_world(self, xy):
|
||||
axis = 0 if len(xy.shape) == 1 else 1
|
||||
return np.flip((xy - self.trav_map_size / 2.0) * self.trav_map_resolution, axis=axis)
|
||||
|
||||
def world_to_map(self, xy):
|
||||
return np.flip((xy / self.trav_map_resolution + self.trav_map_size / 2.0)).astype(np.int)
|
||||
|
||||
def has_node(self, floor, world_xy):
|
||||
map_xy = tuple(self.world_to_map(world_xy))
|
||||
g = self.floor_graph[floor]
|
||||
return g.has_node(map_xy)
|
||||
|
||||
def get_shortest_path(self, floor, source_world, target_world, entire_path=False):
|
||||
assert self.build_graph, 'cannot get shortest path without building the graph'
|
||||
source_map = tuple(self.world_to_map(source_world))
|
||||
target_map = tuple(self.world_to_map(target_world))
|
||||
|
||||
g = self.floor_graph[floor]
|
||||
|
||||
if not g.has_node(target_map):
|
||||
nodes = np.array(g.nodes)
|
||||
closest_node = tuple(
|
||||
nodes[np.argmin(np.linalg.norm(nodes - target_map, axis=1))])
|
||||
g.add_edge(closest_node, target_map,
|
||||
weight=l2_distance(closest_node, target_map))
|
||||
|
||||
if not g.has_node(source_map):
|
||||
nodes = np.array(g.nodes)
|
||||
closest_node = tuple(
|
||||
nodes[np.argmin(np.linalg.norm(nodes - source_map, axis=1))])
|
||||
g.add_edge(closest_node, source_map,
|
||||
weight=l2_distance(closest_node, source_map))
|
||||
|
||||
path_map = np.array(nx.astar_path(
|
||||
g, source_map, target_map, heuristic=l2_distance))
|
||||
|
||||
path_world = self.map_to_world(path_map)
|
||||
geodesic_distance = np.sum(np.linalg.norm(
|
||||
path_world[1:] - path_world[:-1], axis=1))
|
||||
path_world = path_world[::self.waypoint_interval]
|
||||
|
||||
if not entire_path:
|
||||
path_world = path_world[:self.num_waypoints]
|
||||
num_remaining_waypoints = self.num_waypoints - path_world.shape[0]
|
||||
if num_remaining_waypoints > 0:
|
||||
remaining_waypoints = np.tile(
|
||||
target_world, (num_remaining_waypoints, 1))
|
||||
path_world = np.concatenate(
|
||||
(path_world, remaining_waypoints), axis=0)
|
||||
|
||||
return path_world, geodesic_distance
|
||||
|
||||
def reset_floor(self, floor=0, additional_elevation=0.02, height=None):
|
||||
if self.is_interactive:
|
||||
# loads the floor plane with the original floor texture for each floor, no need to reset_floor
|
||||
return
|
||||
|
||||
height = height if height is not None else self.floors[floor] + \
|
||||
additional_elevation
|
||||
p.resetBasePositionAndOrientation(self.floor_body_ids[0],
|
||||
posObj=[0, 0, height],
|
||||
ornObj=[0, 0, 0, 1])
|
||||
|
||||
def reset_scene_objects(self):
|
||||
if not self.is_interactive:
|
||||
# does not have objects in the scene
|
||||
return
|
||||
|
||||
for obj, pos in zip(self.scene_objects, self.scene_objects_pos):
|
||||
obj.set_position_orientation(pos,
|
||||
[0, 0, 0, 1])
|
||||
|
||||
def get_floor_height(self, floor):
|
||||
return self.floors[floor]
|
||||
|
||||
|
||||
class iGSDFScene(Scene):
|
||||
"""
|
||||
Create a scene defined with iGibson Scene Description Format (igsdf).
|
||||
|
||||
iGSDF is an extension of URDF that we use to define an interactive scene. It has support for URDF scaling,
|
||||
URDF nesting and randomization.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, scene_name):
|
||||
super().__init__()
|
||||
self.scene_file = get_ig_scene_path(
|
||||
scene_name) + "/" + scene_name + ".urdf"
|
||||
self.scene_tree = ET.parse(self.scene_file)
|
||||
self.links = []
|
||||
self.joints = []
|
||||
self.links_by_name = {}
|
||||
self.joints_by_name = {}
|
||||
self.nested_urdfs = []
|
||||
|
||||
# If this flag is true, we merge fixed joints into unique bodies
|
||||
self.merge_fj = False
|
||||
|
||||
self.random_groups = {}
|
||||
|
||||
# Current time string to use to save the temporal urdfs
|
||||
timestr = time.strftime("%Y%m%d-%H%M%S")
|
||||
# Create the subfolder
|
||||
os.mkdir(gibson2.ig_dataset_path + "/scene_instances/" + timestr)
|
||||
|
||||
self.avg_obj_dims = self.load_avg_obj_dims()
|
||||
# Parse all the special link entries in the root URDF that defines the scene
|
||||
for link in self.scene_tree.findall('link'):
|
||||
|
||||
if 'category' in link.attrib:
|
||||
embedded_urdf = URDFObject(
|
||||
link, self.random_groups,
|
||||
self.avg_obj_dims.get(link.attrib['category']))
|
||||
base_link_name = link.attrib['name']
|
||||
|
||||
# The joint location is given wrt the bounding box center but we need it wrt to the base_link frame
|
||||
joint_connecting_embedded_link = \
|
||||
[joint for joint in self.scene_tree.findall("joint")
|
||||
if joint.find("child").attrib["link"]
|
||||
== base_link_name][0]
|
||||
|
||||
joint_xyz = np.array([float(val) for val in joint_connecting_embedded_link.find(
|
||||
"origin").attrib["xyz"].split(" ")])
|
||||
# scaled_bbxc_in_blf is in object local frame, need to rotate to global (scene) frame
|
||||
x, y, z = embedded_urdf.scaled_bbxc_in_blf
|
||||
yaw = np.array([float(val) for val in joint_connecting_embedded_link.find(
|
||||
"origin").attrib["rpy"].split(" ")])[2]
|
||||
x, y = rotate_vector_2d(np.array([x, y]), -yaw)
|
||||
|
||||
joint_new_xyz = joint_xyz + np.array([x, y, z])
|
||||
joint_connecting_embedded_link.find(
|
||||
"origin").attrib["xyz"] = "{0:f} {1:f} {2:f}".format(*joint_new_xyz)
|
||||
|
||||
for link_emb in embedded_urdf.object_tree.iter('link'):
|
||||
if link_emb.attrib['name'] == "base_link":
|
||||
# The base_link get renamed as the link tag indicates
|
||||
# Just change the name of the base link in the embedded urdf
|
||||
link_emb.attrib['name'] = base_link_name
|
||||
else:
|
||||
# The other links get also renamed to add the name of the link tag as prefix
|
||||
# This allows us to load several instances of the same object
|
||||
link_emb.attrib['name'] = base_link_name + \
|
||||
"_" + link_emb.attrib['name']
|
||||
|
||||
for joint_emb in embedded_urdf.object_tree.iter('joint'):
|
||||
# We change the joint name
|
||||
joint_emb.attrib["name"] = base_link_name + \
|
||||
"_" + joint_emb.attrib["name"]
|
||||
# We change the child link names
|
||||
for child_emb in joint_emb.findall('child'):
|
||||
if child_emb.attrib['link'] == "base_link":
|
||||
child_emb.attrib['link'] = base_link_name
|
||||
else:
|
||||
child_emb.attrib['link'] = base_link_name + \
|
||||
"_" + child_emb.attrib['link']
|
||||
# and the parent link names
|
||||
for parent_emb in joint_emb.findall('parent'):
|
||||
if parent_emb.attrib['link'] == "base_link":
|
||||
parent_emb.attrib['link'] = base_link_name
|
||||
else:
|
||||
parent_emb.attrib['link'] = base_link_name + \
|
||||
"_" + parent_emb.attrib['link']
|
||||
|
||||
# Deal with the joint connecting the embedded urdf to the main link (world or building)
|
||||
urdf_file_name_prefix = gibson2.ig_dataset_path + \
|
||||
"/scene_instances/" + timestr + "/" + base_link_name # + ".urdf"
|
||||
|
||||
# Find the joint in the main urdf that defines the connection to the embedded urdf
|
||||
for joint in self.scene_tree.iter('joint'):
|
||||
if joint.find('child').attrib['link'] == base_link_name:
|
||||
joint_frame = np.eye(4)
|
||||
|
||||
# if the joint is not floating, we add the joint and a link to the embedded urdf
|
||||
if joint.attrib['type'] != "floating":
|
||||
embedded_urdf.object_tree.getroot().append(joint)
|
||||
parent_link = ET.SubElement(embedded_urdf.object_tree.getroot(), "link",
|
||||
dict([("name", joint.find('parent').attrib['link'])])) # "world")]))
|
||||
|
||||
# if the joint is floating, we save the transformation in the floating joint to be used when we load the
|
||||
# embedded urdf
|
||||
else:
|
||||
joint_xyz = np.array(
|
||||
[float(val) for val in joint.find("origin").attrib["xyz"].split(" ")])
|
||||
|
||||
if 'rpy' in joint.find("origin").attrib:
|
||||
joint_rpy = np.array(
|
||||
[float(val) for val in joint.find("origin").attrib["rpy"].split(" ")])
|
||||
else:
|
||||
joint_rpy = np.array([0., 0., 0.])
|
||||
joint_frame = get_transform_from_xyz_rpy(
|
||||
joint_xyz, joint_rpy)
|
||||
|
||||
# Deal with floating joints inside the embedded urdf
|
||||
urdfs_no_floating = save_urdfs_without_floating_joints(embedded_urdf.object_tree,
|
||||
gibson2.ig_dataset_path + "/scene_instances/" + timestr + "/" + base_link_name, self.merge_fj)
|
||||
|
||||
# append a new tuple of file name of the instantiated embedded urdf
|
||||
# and the transformation (!= None if its connection was floating)
|
||||
for urdf in urdfs_no_floating:
|
||||
transformation = np.dot(
|
||||
joint_frame, urdfs_no_floating[urdf][1])
|
||||
self.nested_urdfs += [
|
||||
(urdfs_no_floating[urdf][0], transformation)]
|
||||
|
||||
def load_avg_obj_dims(self):
|
||||
avg_obj_dim_file = os.path.join(
|
||||
gibson2.ig_dataset_path, 'metadata/avg_obj_dims.json')
|
||||
if os.path.isfile(avg_obj_dim_file):
|
||||
with open(avg_obj_dim_file) as f:
|
||||
return json.load(f)
|
||||
else:
|
||||
return {}
|
||||
|
||||
def load(self):
|
||||
body_ids = []
|
||||
for urdf in self.nested_urdfs:
|
||||
logging.info("Loading " + urdf[0])
|
||||
body_id = p.loadURDF(urdf[0])
|
||||
# flags=p.URDF_USE_MATERIAL_COLORS_FROM_MTL)
|
||||
logging.info("Moving URDF to " + np.array_str(urdf[1]))
|
||||
transformation = urdf[1]
|
||||
pos = transformation[0:3, 3]
|
||||
orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))
|
||||
|
||||
_, _, _, inertial_pos, inertial_orn, _, _, _, _, _, _, _ = \
|
||||
p.getDynamicsInfo(body_id, -1)
|
||||
pos, orn = p.multiplyTransforms(
|
||||
pos, orn, inertial_pos, inertial_orn)
|
||||
p.resetBasePositionAndOrientation(body_id, pos, orn)
|
||||
p.changeDynamics(
|
||||
body_id, -1,
|
||||
activationState=p.ACTIVATION_STATE_ENABLE_SLEEPING)
|
||||
body_ids += [body_id]
|
||||
return body_ids
|
|
@ -0,0 +1,142 @@
|
|||
import cv2
|
||||
import numpy as np
|
||||
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
|
||||
from gibson2.core.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR
|
||||
|
||||
class ViewerVR:
|
||||
def __init__(self):
|
||||
self.renderer = None
|
||||
|
||||
def update(self, should_reset_vr_camera=False, vr_camera_pos=None):
|
||||
if should_reset_vr_camera:
|
||||
self.renderer.reset_vr_camera()
|
||||
elif vr_camera_pos is not None:
|
||||
self.renderer.set_vr_camera(vr_camera_pos)
|
||||
|
||||
self.renderer.render()
|
||||
# Viewer is responsible for calling companion window rendering function
|
||||
self.renderer.render_companion_window()
|
||||
|
||||
class Viewer:
|
||||
def __init__(self,
|
||||
initial_pos = [0,0,1.2],
|
||||
initial_view_direction = [1,0,0],
|
||||
initial_up = [0,0,1],
|
||||
):
|
||||
self.px = initial_pos[0]
|
||||
self.py = initial_pos[1]
|
||||
self.pz = initial_pos[2]
|
||||
self.theta = np.arctan2(initial_view_direction[1], initial_view_direction[0])
|
||||
self.phi = np.arctan2(initial_view_direction[2], np.sqrt(initial_view_direction[0] ** 2 +
|
||||
initial_view_direction[1] ** 2))
|
||||
|
||||
self._mouse_ix, self._mouse_iy = -1, -1
|
||||
self.left_down = False
|
||||
self.middle_down = False
|
||||
self.right_down = False
|
||||
self.view_direction = np.array(initial_view_direction)
|
||||
self.up = initial_up
|
||||
self.renderer = None
|
||||
|
||||
cv2.namedWindow('ExternalView')
|
||||
cv2.moveWindow("ExternalView", 0,0)
|
||||
cv2.namedWindow('RobotView')
|
||||
cv2.setMouseCallback('ExternalView', self.change_dir)
|
||||
|
||||
def change_dir(self, event, x, y, flags, param):
|
||||
if flags == cv2.EVENT_FLAG_LBUTTON + cv2.EVENT_FLAG_CTRLKEY and not self.right_down:
|
||||
# Only once, when pressing left mouse while cntrl key is pressed
|
||||
self._mouse_ix, self._mouse_iy = x, y
|
||||
self.right_down = True
|
||||
elif (event == cv2.EVENT_MBUTTONDOWN) or (flags == cv2.EVENT_FLAG_LBUTTON + cv2.EVENT_FLAG_SHIFTKEY and not self.middle_down):
|
||||
#middle mouse button press or only once, when pressing left mouse while shift key is pressed (Mac compatibility)
|
||||
self._mouse_ix, self._mouse_iy = x, y
|
||||
self.middle_down = True
|
||||
elif event == cv2.EVENT_LBUTTONDOWN: #left mouse button press
|
||||
self._mouse_ix, self._mouse_iy = x, y
|
||||
self.left_down = True
|
||||
elif event == cv2.EVENT_LBUTTONUP: #left mouse button released
|
||||
self.left_down = False
|
||||
self.right_down = False
|
||||
self.middle_down = False
|
||||
elif event == cv2.EVENT_MBUTTONUP: #middle mouse button released
|
||||
self.middle_down = False
|
||||
|
||||
if event == cv2.EVENT_MOUSEMOVE: #moving mouse location on the window
|
||||
if self.left_down: #if left button was pressed we change orientation of camera
|
||||
dx = (x - self._mouse_ix) / 100.0
|
||||
dy = (y - self._mouse_iy) / 100.0
|
||||
self._mouse_ix = x
|
||||
self._mouse_iy = y
|
||||
|
||||
self.phi += dy
|
||||
self.phi = np.clip(self.phi, -np.pi/2 + 1e-5, np.pi/2 - 1e-5)
|
||||
self.theta += dx
|
||||
|
||||
self.view_direction = np.array([np.cos(self.theta)* np.cos(self.phi), np.sin(self.theta) * np.cos(
|
||||
self.phi), np.sin(self.phi)])
|
||||
elif self.middle_down: #if middle button was pressed we get closer/further away in the viewing direction
|
||||
d_vd = (y - self._mouse_iy) / 100.0
|
||||
self._mouse_iy = y
|
||||
|
||||
motion_along_vd = d_vd*self.view_direction
|
||||
self.px += motion_along_vd[0]
|
||||
self.py += motion_along_vd[1]
|
||||
self.pz += motion_along_vd[2]
|
||||
elif self.right_down: #if right button was pressed we change translation of camera
|
||||
|
||||
zz = self.view_direction/np.linalg.norm(self.view_direction)
|
||||
xx = np.cross(zz, np.array([0,0,1]))
|
||||
xx = xx/np.linalg.norm(xx)
|
||||
yy = np.cross(xx, zz)
|
||||
motion_along_vx = -((x - self._mouse_ix) / 100.0)*xx
|
||||
motion_along_vy = ((y - self._mouse_iy) / 100.0)*yy
|
||||
self._mouse_ix = x
|
||||
self._mouse_iy = y
|
||||
|
||||
self.px += (motion_along_vx[0] + motion_along_vy[0])
|
||||
self.py += (motion_along_vx[1] + motion_along_vy[1])
|
||||
self.pz += (motion_along_vx[2] + motion_along_vy[2])
|
||||
|
||||
def update(self):
|
||||
camera_pose = np.array([self.px, self.py, self.pz])
|
||||
if not self.renderer is None:
|
||||
self.renderer.set_camera(camera_pose, camera_pose + self.view_direction, self.up)
|
||||
|
||||
if not self.renderer is None:
|
||||
frame = cv2.cvtColor(np.concatenate(self.renderer.render(modes=('rgb')), axis=1),
|
||||
cv2.COLOR_RGB2BGR)
|
||||
else:
|
||||
frame = np.zeros((300, 300, 3)).astype(np.uint8)
|
||||
|
||||
# Text with the position and viewing direction of the camera of the external viewer
|
||||
cv2.putText(frame, "px {:1.1f} py {:1.1f} pz {:1.1f}".format(self.px, self.py, self.pz), (10, 20),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
|
||||
cv2.putText(frame, "[{:1.1f} {:1.1f} {:1.1f}]".format(*self.view_direction), (10, 40),
|
||||
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
|
||||
cv2.imshow('ExternalView', frame)
|
||||
|
||||
# We keep some double functinality for "backcompatibility"
|
||||
q = cv2.waitKey(1)
|
||||
if q == ord('w'):
|
||||
self.px += 0.05
|
||||
elif q == ord('s'):
|
||||
self.px -= 0.05
|
||||
elif q == ord('a'):
|
||||
self.py += 0.05
|
||||
elif q == ord('d'):
|
||||
self.py -= 0.05
|
||||
elif q == ord('q'):
|
||||
exit()
|
||||
|
||||
if not self.renderer is None:
|
||||
frames = self.renderer.render_robot_cameras(modes=('rgb'))
|
||||
if len(frames) > 0:
|
||||
frame = cv2.cvtColor(np.concatenate(frames, axis=1), cv2.COLOR_RGB2BGR)
|
||||
cv2.imshow('RobotView', frame)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
viewer = Viewer()
|
||||
while True:
|
||||
viewer.update()
|
|
@ -1,15 +1,21 @@
|
|||
cmake_minimum_required(VERSION 2.8.12)
|
||||
project(CppMeshRenderer)
|
||||
|
||||
|
||||
set(USE_GLAD TRUE)
|
||||
|
||||
if (MAC_PLATFORM)
|
||||
set(USE_CUDA FALSE)
|
||||
set(USE_GLFW TRUE)
|
||||
set(USE_VR FALSE)
|
||||
elseif (WIN32)
|
||||
set(USE_CUDA FALSE)
|
||||
set(USE_GLFW TRUE)
|
||||
# iGibson on Windows is always in VR mode
|
||||
set(USE_VR TRUE)
|
||||
else ()
|
||||
set(USE_CUDA TRUE)
|
||||
set(USE_GLFW FALSE)
|
||||
set(USE_VR FALSE)
|
||||
endif ()
|
||||
|
||||
include_directories(glad)
|
||||
|
@ -20,6 +26,11 @@ else ()
|
|||
add_definitions(-DUSE_GLAD)
|
||||
endif ()
|
||||
|
||||
if (USE_VR)
|
||||
find_package(OpenGL)
|
||||
# OpenGL is needed for vr-gl interoperation
|
||||
endif()
|
||||
|
||||
if (USE_CUDA)
|
||||
add_definitions(-DUSE_CUDA)
|
||||
find_package(OpenGL)
|
||||
|
@ -31,7 +42,6 @@ add_subdirectory(pybind11)
|
|||
include_directories("glm")
|
||||
|
||||
if (USE_GLFW)
|
||||
# Find GLFW and OpenVR
|
||||
set(GLFW_DIR glfw)
|
||||
set(GLFW_BUILD_EXAMPLES OFF CACHE INTERNAL "Build the GLFW example programs")
|
||||
set(GLFW_BUILD_TESTS OFF CACHE INTERNAL "Build the GLFW test programs")
|
||||
|
@ -41,52 +51,94 @@ if (USE_GLFW)
|
|||
include_directories("${GLFW_DIR}/include")
|
||||
endif ()
|
||||
|
||||
if (USE_CUDA)
|
||||
find_package(CUDA REQUIRED)
|
||||
set(CUDA_LIBRARIES PUBLIC ${CUDA_LIBRARIES})
|
||||
cuda_add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp)
|
||||
else ()
|
||||
add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp)
|
||||
endif ()
|
||||
if (USE_VR)
|
||||
# Find OpenVR
|
||||
set(WINDOWS_PATH_SUFFIXES win64 Win64 x64)
|
||||
set(OPENVR_DIR openvr)
|
||||
find_library(OPENVR_LIBRARIES
|
||||
NAMES
|
||||
openvr_api
|
||||
PATHS
|
||||
"${OPENVR_DIR}/bin"
|
||||
"${OPENVR_DIR}/lib"
|
||||
PATH_SUFFIXES
|
||||
${WINDOWS_PATH_SUFFIXES}
|
||||
NO_DEFAULT_PATH
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
)
|
||||
set(OPENVR_INCLUDE_DIR "${OPENVR_DIR}/headers")
|
||||
include_directories("${OPENVR_INCLUDE_DIR}")
|
||||
|
||||
# Find SRAnipal
|
||||
set(SRANI_DIR sranipal)
|
||||
find_library(SRANI_LIBRARIES
|
||||
NAMES
|
||||
SRAnipal
|
||||
PATHS
|
||||
"${SRANI_DIR}/lib"
|
||||
NO_DEFAULT_PATH
|
||||
NO_CMAKE_FIND_ROOT_PATH
|
||||
)
|
||||
include_directories("${SRANI_DIR}/include")
|
||||
endif()
|
||||
|
||||
add_library(tinyobjloader MODULE cpp/tinyobjloader/tiny_obj_loader.cc cpp/tinyobjloader/bindings.cc)
|
||||
if (USE_GLFW)
|
||||
add_library(GLFWRendererContext MODULE glad/gl.cpp cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp)
|
||||
endif ()
|
||||
|
||||
|
||||
if (USE_GLAD)
|
||||
target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread)
|
||||
if (USE_GLFW)
|
||||
target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES})
|
||||
if (USE_VR)
|
||||
add_library(VRRendererContext MODULE glad/gl.cpp cpp/vr_mesh_renderer.cpp cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp)
|
||||
else()
|
||||
if (USE_CUDA)
|
||||
find_package(CUDA REQUIRED)
|
||||
set(CUDA_LIBRARIES PUBLIC ${CUDA_LIBRARIES})
|
||||
cuda_add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp)
|
||||
else ()
|
||||
add_library(EGLRendererContext MODULE glad/egl.cpp glad/gl.cpp cpp/mesh_renderer.cpp cpp/egl_mesh_renderer.cpp)
|
||||
endif ()
|
||||
if (USE_GLFW)
|
||||
add_library(GLFWRendererContext MODULE glad/gl.cpp cpp/glfw_mesh_renderer.cpp cpp/mesh_renderer.cpp)
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
if (USE_VR)
|
||||
target_link_libraries(VRRendererContext PRIVATE pybind11::module ${CMAKE_DL_LIBS} glfw ${GLFW_LIBRARIES} ${OPENGL_LIBRARIES} ${OPENVR_LIBRARIES} ${SRANI_LIBRARIES})
|
||||
else ()
|
||||
target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread EGL ${OPENGL_LIBRARIES})
|
||||
if (USE_GLFW)
|
||||
target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} ${OPENGL_LIBRARIES})
|
||||
endif ()
|
||||
if (USE_GLAD)
|
||||
target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread)
|
||||
if (USE_GLFW)
|
||||
target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES})
|
||||
endif ()
|
||||
else ()
|
||||
target_link_libraries(EGLRendererContext PRIVATE pybind11::module dl pthread EGL ${OPENGL_LIBRARIES})
|
||||
if (USE_GLFW)
|
||||
target_link_libraries(GLFWRendererContext PRIVATE pybind11::module dl glfw ${GLFW_LIBRARIES} ${OPENGL_LIBRARIES})
|
||||
endif ()
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
target_link_libraries(tinyobjloader PRIVATE pybind11::module)
|
||||
|
||||
set_target_properties(EGLRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||
if (USE_VR)
|
||||
set_target_properties(VRRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||
SUFFIX "${PYTHON_MODULE_EXTENSION}")
|
||||
|
||||
if (USE_GLFW)
|
||||
set_target_properties(GLFWRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||
else()
|
||||
set_target_properties(EGLRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||
SUFFIX "${PYTHON_MODULE_EXTENSION}")
|
||||
if (USE_GLFW)
|
||||
set_target_properties(GLFWRendererContext PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||
SUFFIX "${PYTHON_MODULE_EXTENSION}")
|
||||
endif ()
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
set_target_properties(tinyobjloader PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
|
||||
SUFFIX "${PYTHON_MODULE_EXTENSION}")
|
||||
|
||||
add_executable(query_devices glad/egl.cpp glad/gl.cpp cpp/query_devices.cpp)
|
||||
add_executable(test_device glad/egl.cpp glad/gl.cpp cpp/test_device.cpp)
|
||||
|
||||
if (USE_GLAD)
|
||||
target_link_libraries(query_devices dl pthread)
|
||||
target_link_libraries(test_device dl pthread)
|
||||
else ()
|
||||
target_link_libraries(query_devices dl pthread EGL ${OPENGL_LIBRARIES})
|
||||
target_link_libraries(test_device dl pthread EGL ${OPENGL_LIBRARIES})
|
||||
endif ()
|
||||
if (NOT USE_VR)
|
||||
add_executable(query_devices glad/egl.cpp glad/gl.cpp cpp/query_devices.cpp)
|
||||
add_executable(test_device glad/egl.cpp glad/gl.cpp cpp/test_device.cpp)
|
||||
if (USE_GLAD)
|
||||
target_link_libraries(query_devices dl pthread)
|
||||
target_link_libraries(test_device dl pthread)
|
||||
else ()
|
||||
target_link_libraries(query_devices dl pthread EGL ${OPENGL_LIBRARIES})
|
||||
target_link_libraries(test_device dl pthread EGL ${OPENGL_LIBRARIES})
|
||||
endif ()
|
||||
endif()
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
namespace py = pybind11;
|
||||
|
||||
int GLFWRendererContext::init() {
|
||||
int GLFWRendererContext::init(bool render_window, bool fullscreen) {
|
||||
verbosity = 20;
|
||||
|
||||
// Initialize GLFW context and window
|
||||
|
@ -49,17 +49,26 @@ int GLFWRendererContext::init() {
|
|||
glfwWindowHint(GLFW_DEPTH_BITS, 0);
|
||||
glfwWindowHint(GLFW_STENCIL_BITS, 0);
|
||||
glfwWindowHint(GLFW_SAMPLES, 0);
|
||||
glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
|
||||
// Hide GLFW window by default
|
||||
|
||||
this->window = glfwCreateWindow(m_windowHeight, m_windowHeight, "Gibson GLFW Renderer", NULL, NULL);
|
||||
// Hide GLFW window if user requests
|
||||
if (!render_window) {
|
||||
glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
|
||||
}
|
||||
|
||||
|
||||
if (fullscreen) {
|
||||
this->window = glfwCreateWindow(m_windowWidth, m_windowHeight, "Gibson Renderer Output", glfwGetPrimaryMonitor(), NULL);
|
||||
}
|
||||
else {
|
||||
this->window = glfwCreateWindow(m_windowWidth, m_windowHeight, "Gibson Renderer Output", NULL, NULL);
|
||||
}
|
||||
|
||||
if (this->window == NULL) {
|
||||
fprintf(stderr, "ERROR: Failed to create GLFW window.\n");
|
||||
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
|
||||
glfwMakeContextCurrent(this->window);
|
||||
glfwSwapInterval(0);
|
||||
|
||||
|
@ -79,6 +88,21 @@ void GLFWRendererContext::release() {
|
|||
glfwTerminate();
|
||||
}
|
||||
|
||||
void GLFWRendererContext::render_companion_window_from_buffer(GLuint readBuffer) {
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, readBuffer);
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
|
||||
glReadBuffer(GL_COLOR_ATTACHMENT0);
|
||||
glDrawBuffer(GL_BACK);
|
||||
glBlitFramebuffer(0, 0, m_windowWidth, m_windowHeight, 0, 0, m_windowWidth, m_windowHeight, GL_COLOR_BUFFER_BIT, GL_NEAREST);
|
||||
glFlush();
|
||||
glfwSwapBuffers(this->window);
|
||||
glfwPollEvents();
|
||||
|
||||
if (glfwGetKey(this->window, GLFW_KEY_ESCAPE)) {
|
||||
glfwTerminate();
|
||||
}
|
||||
}
|
||||
|
||||
PYBIND11_MODULE(GLFWRendererContext, m) {
|
||||
|
||||
py::class_<GLFWRendererContext> pymodule = py::class_<GLFWRendererContext>(m, "GLFWRendererContext");
|
||||
|
@ -86,6 +110,7 @@ PYBIND11_MODULE(GLFWRendererContext, m) {
|
|||
pymodule.def(py::init<int, int>());
|
||||
pymodule.def("init", &GLFWRendererContext::init);
|
||||
pymodule.def("release", &GLFWRendererContext::release);
|
||||
pymodule.def("render_companion_window_from_buffer", &GLFWRendererContext::render_companion_window_from_buffer);
|
||||
|
||||
// class MeshRenderer
|
||||
pymodule.def("render_meshrenderer_pre", &GLFWRendererContext::render_meshrenderer_pre,
|
||||
|
|
|
@ -2,13 +2,19 @@
|
|||
#define GLFW_MESH_RENDERER_HEADER
|
||||
|
||||
#include "mesh_renderer.h"
|
||||
#include <GLFW\glfw3.h>
|
||||
|
||||
class GLFWRendererContext: public MeshRendererContext {
|
||||
public:
|
||||
GLFWRendererContext(int w, int h): MeshRendererContext(w, h) {};
|
||||
int width;
|
||||
int height;
|
||||
|
||||
GLFWRendererContext(int w, int h): MeshRendererContext(w, h), width(w), height(h) {};
|
||||
GLFWwindow *window = NULL;
|
||||
int init();
|
||||
// By default don't render window and don't use fullscreen
|
||||
int init(bool render_window = false, bool fullscreen = false);
|
||||
void release();
|
||||
void render_companion_window_from_buffer(GLuint readBuffer);
|
||||
};
|
||||
|
||||
#endif
|
|
@ -3,7 +3,9 @@
|
|||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
#ifndef WIN32
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
#include <fstream>
|
||||
|
||||
#ifdef USE_GLAD
|
||||
|
|
|
@ -0,0 +1,781 @@
|
|||
#include "vr_mesh_renderer.h"
|
||||
#include "SRanipal.h"
|
||||
#include "SRanipal_Eye.h"
|
||||
#include "SRanipal_Enums.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <thread>
|
||||
|
||||
#include <glad/egl.h>
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/matrix_transform.hpp>
|
||||
#include <glm/gtx/euler_angles.hpp>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/stl.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
// Public methods
|
||||
|
||||
// Get button data for a specific controller - either left_controller or right_controller
|
||||
// Returns in order: trigger fraction, analog touch position x, analog touch position y
|
||||
// TIMELINE: Call directly after getDataForVRDevice (relies on isValid to determine data integrity)
|
||||
py::list VRRendererContext::getButtonDataForController(char* controllerType) {
|
||||
float trigger_fraction, touch_x, touch_y;
|
||||
bool isValid;
|
||||
|
||||
if (!strcmp(controllerType, "left_controller")) {
|
||||
trigger_fraction = leftControllerData.trig_frac;
|
||||
touch_x = leftControllerData.touchpad_analog_vec.x;
|
||||
touch_y = leftControllerData.touchpad_analog_vec.y;
|
||||
isValid = leftControllerData.isValidData;
|
||||
}
|
||||
else if (!strcmp(controllerType, "right_controller")) {
|
||||
trigger_fraction = rightControllerData.trig_frac;
|
||||
touch_x = rightControllerData.touchpad_analog_vec.x;
|
||||
touch_y = rightControllerData.touchpad_analog_vec.y;
|
||||
isValid = rightControllerData.isValidData;
|
||||
}
|
||||
|
||||
py::list buttonData;
|
||||
buttonData.append(trigger_fraction);
|
||||
buttonData.append(touch_x);
|
||||
buttonData.append(touch_y);
|
||||
|
||||
return buttonData;
|
||||
}
|
||||
|
||||
// Returns device data in order: isValidData, position, rotation, hmdActualPos (valid only if hmd)
|
||||
// Device type can be either hmd, left_controller or right_controller
|
||||
// TIMELINE: Call at any time after postRenderVR to poll the VR system for device data
|
||||
py::list VRRendererContext::getDataForVRDevice(char* deviceType) {
|
||||
bool isValid = false;
|
||||
|
||||
py::array_t<float> positionData;
|
||||
py::array_t<float> rotationData;
|
||||
py::array_t<float> hmdActualPosData;
|
||||
|
||||
// TODO: Extend this to work with multiple headsets in future
|
||||
if (!strcmp(deviceType, "hmd")) {
|
||||
glm::vec3 transformedPos(vrToGib * glm::vec4(hmdData.devicePos, 1.0));
|
||||
positionData = py::array_t<float>({ 3, }, glm::value_ptr(transformedPos));
|
||||
rotationData = py::array_t<float>({ 4, }, glm::value_ptr(vrToGib * hmdData.deviceRot));
|
||||
glm::vec3 transformedHmdPos(vrToGib * glm::vec4(hmdActualPos, 1.0));
|
||||
hmdActualPosData = py::array_t<float>({ 3, }, glm::value_ptr(transformedHmdPos));
|
||||
isValid = hmdData.isValidData;
|
||||
}
|
||||
else if (!strcmp(deviceType, "left_controller")) {
|
||||
glm::vec3 transformedPos(vrToGib * glm::vec4(leftControllerData.devicePos, 1.0));
|
||||
positionData = py::array_t<float>({ 3, }, glm::value_ptr(transformedPos));
|
||||
rotationData = py::array_t<float>({ 4, }, glm::value_ptr(vrToGib * leftControllerData.deviceRot));
|
||||
isValid = leftControllerData.isValidData;
|
||||
}
|
||||
else if (!strcmp(deviceType, "right_controller")) {
|
||||
glm::vec3 transformedPos(vrToGib * glm::vec4(rightControllerData.devicePos, 1.0));
|
||||
positionData = py::array_t<float>({ 3, }, glm::value_ptr(transformedPos));
|
||||
rotationData = py::array_t<float>({ 4, }, glm::value_ptr(vrToGib * rightControllerData.deviceRot));
|
||||
isValid = rightControllerData.isValidData;
|
||||
}
|
||||
|
||||
py::list deviceData;
|
||||
deviceData.append(isValid);
|
||||
deviceData.append(positionData);
|
||||
deviceData.append(rotationData);
|
||||
deviceData.append(hmdActualPosData);
|
||||
|
||||
return deviceData;
|
||||
}
|
||||
|
||||
// Gets normalized vectors representing HMD coordinate system
|
||||
// Returns transformed x, y and z
|
||||
// Represent "right", "up" and "forward" relative to headset, in iGibson coordinates
|
||||
// TIMELINE: Call any time after postRenderVR
|
||||
py::list VRRendererContext::getDeviceCoordinateSystem(char* device) {
|
||||
py::list vecList;
|
||||
glm::mat4 deviceTransform;
|
||||
|
||||
if (!strcmp(device, "hmd")) {
|
||||
deviceTransform = hmdData.deviceTransform;
|
||||
}
|
||||
else if (!strcmp(device, "left_controller")) {
|
||||
deviceTransform = leftControllerData.deviceTransform;
|
||||
}
|
||||
else if (!strcmp(device, "right_controller")) {
|
||||
deviceTransform = rightControllerData.deviceTransform;
|
||||
}
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
glm::vec3 transformedVrDir = getVec3ColFromMat4(i, deviceTransform);
|
||||
if (i == 2) {
|
||||
transformedVrDir = transformedVrDir * -1.0f;
|
||||
}
|
||||
glm::vec3 transformedGibDir = glm::normalize(glm::vec3(vrToGib * glm::vec4(transformedVrDir, 1.0)));
|
||||
|
||||
py::list vec;
|
||||
vec.append(transformedGibDir.x);
|
||||
vec.append(transformedGibDir.y);
|
||||
vec.append(transformedGibDir.z);
|
||||
vecList.append(vec);
|
||||
}
|
||||
|
||||
return vecList;
|
||||
}
|
||||
|
||||
// Queries eye tracking data and returns to user
|
||||
// Returns in order is_data_valid, gaze origin, gaze direction, left pupil diameter (in mm), right pupil diameter (in mm)
|
||||
// TIMELINE: Call after getDataForVRDevice, since this relies on knowing latest HMD transform
|
||||
py::list VRRendererContext::getEyeTrackingData() {
|
||||
py::list eyeData;
|
||||
|
||||
// Transform data into Gibson coordinate system before returning to user
|
||||
glm::vec3 gibOrigin(vrToGib * glm::vec4(eyeTrackingData.origin, 1.0));
|
||||
glm::vec3 gibDir(vrToGib * glm::vec4(eyeTrackingData.dir, 1.0));
|
||||
|
||||
py::list origin;
|
||||
origin.append(gibOrigin.x);
|
||||
origin.append(gibOrigin.y);
|
||||
origin.append(gibOrigin.z);
|
||||
|
||||
py::list dir;
|
||||
dir.append(gibDir.x);
|
||||
dir.append(gibDir.y);
|
||||
dir.append(gibDir.z);
|
||||
|
||||
eyeData.append(eyeTrackingData.isValid);
|
||||
eyeData.append(origin);
|
||||
eyeData.append(dir);
|
||||
eyeData.append(eyeTrackingData.leftPupilDiameter);
|
||||
eyeData.append(eyeTrackingData.rightPupilDiameter);
|
||||
|
||||
return eyeData;
|
||||
}
|
||||
|
||||
// Gets the VR offset vector in form x, y, z
|
||||
// TIMELINE: Can call any time
|
||||
py::list VRRendererContext::getVROffset() {
|
||||
glm::vec3 transformedOffsetVec(vrToGib * glm::vec4(this->vrOffsetVec, 1.0));
|
||||
|
||||
py::list offset;
|
||||
offset.append(transformedOffsetVec.x);
|
||||
offset.append(transformedOffsetVec.y);
|
||||
offset.append(transformedOffsetVec.z);
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
// Initialize the VR system and compositor
|
||||
// TIMELINE: Call during init of renderer, before height/width are set
|
||||
void VRRendererContext::initVR(bool useEyeTracking) {
|
||||
// Initialize VR systems
|
||||
if (!vr::VR_IsRuntimeInstalled()) {
|
||||
fprintf(stderr, "VR runtime not installed.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
vr::EVRInitError eError = vr::VRInitError_None;
|
||||
m_pHMD = vr::VR_Init(&eError, vr::VRApplication_Scene);
|
||||
|
||||
if (eError != vr::VRInitError_None) {
|
||||
fprintf(stderr, "Unable to initialize VR runtime.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (!vr::VRCompositor()) {
|
||||
fprintf(stderr, "Unable to intialize VR compositor.\n");
|
||||
}
|
||||
|
||||
leftEyeProj = getHMDEyeProjection(vr::Eye_Left);
|
||||
leftEyePos = getHMDEyePose(vr::Eye_Left);
|
||||
rightEyeProj = getHMDEyeProjection(vr::Eye_Right);
|
||||
rightEyePos = getHMDEyePose(vr::Eye_Right);
|
||||
|
||||
// Set gibToVR and vrToGib matrices
|
||||
setCoordinateTransformMatrices();
|
||||
// No VR system offset by default
|
||||
vrOffsetVec = glm::vec3(0, 0, 0);
|
||||
|
||||
// Set eye tracking boolean
|
||||
this->useEyeTracking = useEyeTracking;
|
||||
if (useEyeTracking) {
|
||||
initAnipal();
|
||||
shouldShutDownEyeTracking = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Polls for VR events, such as button presses
|
||||
// TIMELINE: Ideally call before rendering (eg. before simulator step function)
|
||||
py::list VRRendererContext::pollVREvents() {
|
||||
vr::VREvent_t vrEvent;
|
||||
py::list eventData;
|
||||
|
||||
while (m_pHMD->PollNextEvent(&vrEvent, sizeof(vrEvent))) {
|
||||
std::string deviceType, eventType;
|
||||
processVREvent(vrEvent, deviceType, eventType);
|
||||
|
||||
if (deviceType == "invalid" || eventType == "invalid") {
|
||||
continue;
|
||||
}
|
||||
|
||||
py::list singleEventData;
|
||||
singleEventData.append(deviceType);
|
||||
singleEventData.append(eventType);
|
||||
|
||||
eventData.append(singleEventData);
|
||||
}
|
||||
|
||||
return eventData;
|
||||
}
|
||||
|
||||
// Called after the renderer has finished rendering a single eye
|
||||
// TIMELINE: Call immediately after rendering for current eye is finished
|
||||
void VRRendererContext::postRenderVRForEye(char* eye, GLuint texID) {
|
||||
if (!strcmp(eye, "left")) {
|
||||
vr::Texture_t leftEyeTexture = { (void*)(uintptr_t)texID, vr::TextureType_OpenGL, vr::ColorSpace_Gamma };
|
||||
vr::EVRCompositorError err = vr::VRCompositor()->Submit(vr::Eye_Left, &leftEyeTexture);
|
||||
// 0 is no error, 101 is no focus (happens at start of rendering)
|
||||
if (err != 0 && err != 101) {
|
||||
fprintf(stderr, "Compositor error: %d\n", err);
|
||||
}
|
||||
}
|
||||
else if (!strcmp(eye, "right")) {
|
||||
vr::Texture_t rightEyeTexture = { (void*)(uintptr_t)texID, vr::TextureType_OpenGL, vr::ColorSpace_Gamma };;
|
||||
vr::EVRCompositorError err = vr::VRCompositor()->Submit(vr::Eye_Right, &rightEyeTexture);
|
||||
if (err != 0 && err != 101) {
|
||||
fprintf(stderr, "Compositor error: %d\n", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called after both eyes have been rendered
|
||||
// Tell the compositor to begin work immediately instead of waiting for the next WaitGetPoses() call if the user wants
|
||||
// And then update VR data
|
||||
// TIMELINE: Call immediately after calling postRenderVRForEye on both left and right eyes
|
||||
void VRRendererContext::postRenderVRUpdate(bool shouldHandoff) {
|
||||
if (shouldHandoff) {
|
||||
vr::VRCompositor()->PostPresentHandoff();
|
||||
}
|
||||
|
||||
updateVRData();
|
||||
}
|
||||
|
||||
// Returns the projection and view matrices for the left and right eyes, to be used in rendering
|
||||
// Returns in order Left P, left V, right P, right V
|
||||
// Note: GLM is column-major, whereas numpy is row major, so we need to tranpose view matrices before conversion
|
||||
// Note 2: Projection matrices are passed in to OpenGL assuming they are column-major, so we don't need to transpose them
|
||||
// TIMELINE: Call before rendering so the camera is set properly
|
||||
py::list VRRendererContext::preRenderVR() {
|
||||
py::array_t<float> leftEyeProjNp = py::array_t<float>({ 4,4 }, glm::value_ptr(leftEyeProj));
|
||||
py::array_t<float> rightEyeProjNp = py::array_t<float>({ 4,4 }, glm::value_ptr(rightEyeProj));
|
||||
|
||||
glm::mat4 worldToHead = glm::inverse(hmdData.deviceTransform);
|
||||
|
||||
leftEyeView = leftEyePos * worldToHead * gibToVR;
|
||||
rightEyeView = rightEyePos * worldToHead * gibToVR;
|
||||
|
||||
py::array_t<float> leftEyeViewNp = py::array_t<float>({ 4,4 }, glm::value_ptr(glm::transpose(leftEyeView)));
|
||||
py::array_t<float> rightEyeViewNp = py::array_t<float>({ 4,4 }, glm::value_ptr(glm::transpose(rightEyeView)));
|
||||
|
||||
py::list eyeMats;
|
||||
eyeMats.append(leftEyeProjNp);
|
||||
eyeMats.append(leftEyeViewNp);
|
||||
eyeMats.append(rightEyeProjNp);
|
||||
eyeMats.append(rightEyeViewNp);
|
||||
|
||||
return eyeMats;
|
||||
}
|
||||
|
||||
// Releases and cleans up VR system
|
||||
// TIMELINE: Call when the renderer shuts down
|
||||
void VRRendererContext::releaseVR() {
|
||||
vr::VR_Shutdown();
|
||||
m_pHMD = NULL;
|
||||
|
||||
if (this->useEyeTracking) {
|
||||
this->shouldShutDownEyeTracking = true;
|
||||
eyeTrackingThread->join();
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the offset of the VR headset
|
||||
// TIMELINE: Can call any time
|
||||
void VRRendererContext::setVROffset(float x, float y, float z) {
|
||||
this->vrOffsetVec = glm::vec3(x, y, z);
|
||||
}
|
||||
|
||||
// Causes a haptic pulse in the specified controller, for a user-specified duration
|
||||
// Note: Haptic pulses can only trigger every 5ms, regardless of duration
|
||||
// TIMELINE: Call after physics/rendering have been stepped in the simulator
|
||||
void VRRendererContext::triggerHapticPulseForDevice(char* device, unsigned short microSecondDuration) {
|
||||
DeviceData ddata;
|
||||
if (!strcmp(device, "hmd")) {
|
||||
ddata = hmdData;
|
||||
}
|
||||
else if (!strcmp(device, "left_controller")) {
|
||||
ddata = leftControllerData;
|
||||
}
|
||||
else if (!strcmp(device, "right_controller")) {
|
||||
ddata = rightControllerData;
|
||||
}
|
||||
|
||||
if (ddata.index == -1) {
|
||||
std::cerr << "HAPTIC ERROR: Device " << device << " does not have a valid index." << std::endl;
|
||||
}
|
||||
|
||||
// Currently haptics are only supported on one axis (touchpad axis)
|
||||
uint32_t hapticAxis = 0;
|
||||
m_pHMD->TriggerHapticPulse(ddata.index, hapticAxis, microSecondDuration);
|
||||
}
|
||||
|
||||
// Private methods
|
||||
|
||||
// Converts a SteamVR Matrix to a glm mat4
|
||||
glm::mat4 VRRendererContext::convertSteamVRMatrixToGlmMat4(const vr::HmdMatrix34_t& matPose) {
|
||||
glm::mat4 mat(
|
||||
matPose.m[0][0], matPose.m[1][0], matPose.m[2][0], 0.0,
|
||||
matPose.m[0][1], matPose.m[1][1], matPose.m[2][1], 0.0,
|
||||
matPose.m[0][2], matPose.m[1][2], matPose.m[2][2], 0.0,
|
||||
matPose.m[0][3], matPose.m[1][3], matPose.m[2][3], 1.0f
|
||||
);
|
||||
return mat;
|
||||
}
|
||||
|
||||
// Generates a pose matrix for the specified eye (left or right)
|
||||
glm::mat4 VRRendererContext::getHMDEyePose(vr::Hmd_Eye eye) {
|
||||
vr::HmdMatrix34_t mat = m_pHMD->GetEyeToHeadTransform(eye);
|
||||
|
||||
glm::mat4 eyeToHead(
|
||||
mat.m[0][0], mat.m[1][0], mat.m[2][0], 0.0,
|
||||
mat.m[0][1], mat.m[1][1], mat.m[2][1], 0.0,
|
||||
mat.m[0][2], mat.m[1][2], mat.m[2][2], 0.0,
|
||||
mat.m[0][3], mat.m[1][3], mat.m[2][3], 1.0f
|
||||
);
|
||||
|
||||
// Return the head to eye transform
|
||||
return glm::inverse(eyeToHead);
|
||||
}
|
||||
|
||||
// Generates a projection matrix for the specified eye (left or right)
|
||||
glm::mat4 VRRendererContext::getHMDEyeProjection(vr::Hmd_Eye eye) {
|
||||
vr::HmdMatrix44_t mat = m_pHMD->GetProjectionMatrix(eye, nearClip, farClip);
|
||||
|
||||
glm::mat4 eyeProjMat(
|
||||
mat.m[0][0], mat.m[1][0], mat.m[2][0], mat.m[3][0],
|
||||
mat.m[0][1], mat.m[1][1], mat.m[2][1], mat.m[3][1],
|
||||
mat.m[0][2], mat.m[1][2], mat.m[2][2], mat.m[3][2],
|
||||
mat.m[0][3], mat.m[1][3], mat.m[2][3], mat.m[3][3]
|
||||
);
|
||||
|
||||
return eyeProjMat;
|
||||
}
|
||||
|
||||
// Gets position of HMD
|
||||
glm::vec3 VRRendererContext::getPositionFromSteamVRMatrix(vr::HmdMatrix34_t& matrix) {
|
||||
return glm::vec3(matrix.m[0][3], matrix.m[1][3], matrix.m[2][3]);
|
||||
}
|
||||
|
||||
// Gets rotation of HMD in vec4 form
|
||||
glm::vec4 VRRendererContext::getRotationFromSteamVRMatrix(vr::HmdMatrix34_t& matrix) {
|
||||
glm::vec4 q;
|
||||
|
||||
q.w = (float)sqrt(fmax(0, 1 + matrix.m[0][0] + matrix.m[1][1] + matrix.m[2][2])) / 2;
|
||||
q.x = (float)sqrt(fmax(0, 1 + matrix.m[0][0] - matrix.m[1][1] - matrix.m[2][2])) / 2;
|
||||
q.y = (float)sqrt(fmax(0, 1 - matrix.m[0][0] + matrix.m[1][1] - matrix.m[2][2])) / 2;
|
||||
q.z = (float)sqrt(fmax(0, 1 - matrix.m[0][0] - matrix.m[1][1] + matrix.m[2][2])) / 2;
|
||||
q.x = copysign(q.x, matrix.m[2][1] - matrix.m[1][2]);
|
||||
q.y = copysign(q.y, matrix.m[0][2] - matrix.m[2][0]);
|
||||
q.z = copysign(q.z, matrix.m[1][0] - matrix.m[0][1]);
|
||||
|
||||
return q;
|
||||
}
|
||||
|
||||
// Gets vector 3 representation of column from glm mat 4
|
||||
// Useful for extracting rotation component from matrix
|
||||
glm::vec3 VRRendererContext::getVec3ColFromMat4(int col_index, glm::mat4& mat) {
|
||||
glm::vec3 v;
|
||||
v.x = mat[col_index][0];
|
||||
v.y = mat[col_index][1];
|
||||
v.z = mat[col_index][2];
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
// Initializes the SRAnipal runtime, if the user selects this option
|
||||
void VRRendererContext::initAnipal() {
|
||||
if (!ViveSR::anipal::Eye::IsViveProEye()) {
|
||||
fprintf(stderr, "This HMD does not support eye-tracking!\n");
|
||||
}
|
||||
|
||||
int anipalError = ViveSR::anipal::Initial(ViveSR::anipal::Eye::ANIPAL_TYPE_EYE, NULL);
|
||||
switch (anipalError) {
|
||||
case ViveSR::Error::WORK:
|
||||
break;
|
||||
case ViveSR::Error::RUNTIME_NOT_FOUND:
|
||||
fprintf(stderr, "SRAnipal runtime not found!\n");
|
||||
default:
|
||||
fprintf(stderr, "Failed to initialize SRAnipal!\n");
|
||||
}
|
||||
|
||||
// Launch a thread to poll data from the SRAnipal SDK
|
||||
// We poll data asynchronously so as to not slow down the VR rendering loop
|
||||
eyeTrackingThread = new std::thread(&VRRendererContext::pollAnipal, this);
|
||||
}
|
||||
|
||||
// Polls SRAnipal to get updated eye tracking information
|
||||
// See this forum discussion to learn how the coordinate systems of OpenVR and SRAnipal are related:
|
||||
// https://forum.vive.com/topic/5888-vive-pro-eye-finding-a-single-eye-origin-in-world-space/?ct=1593593815
|
||||
// Uses right-handed coordinate system with +ve x left, +ve z forward and +ve y up
|
||||
// We need to convert that to a +ve x right, +ve z backward and +ve y up system at the end of the function
|
||||
void VRRendererContext::pollAnipal() {
|
||||
while (!this->shouldShutDownEyeTracking) {
|
||||
this->result = ViveSR::anipal::Eye::GetEyeData(&this->eyeData);
|
||||
if (result == ViveSR::Error::WORK) {
|
||||
int isOriginValid = ViveSR::anipal::Eye::DecodeBitMask(this->eyeData.verbose_data.combined.eye_data.eye_data_validata_bit_mask,
|
||||
ViveSR::anipal::Eye::SINGLE_EYE_DATA_GAZE_DIRECTION_VALIDITY);
|
||||
int isDirValid = ViveSR::anipal::Eye::DecodeBitMask(this->eyeData.verbose_data.combined.eye_data.eye_data_validata_bit_mask,
|
||||
ViveSR::anipal::Eye::SINGLE_EYE_DATA_GAZE_ORIGIN_VALIDITY);
|
||||
if (!isOriginValid || !isDirValid) {
|
||||
eyeTrackingData.isValid = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
eyeTrackingData.isValid = true;
|
||||
|
||||
// Both origin and dir are relative to the HMD coordinate system, so we need to transform them into HMD coordinate system
|
||||
if (!hmdData.isValidData) {
|
||||
eyeTrackingData.isValid = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Returns value in mm, so need to divide by 1000 to get meters (Gibson uses meters)
|
||||
auto gazeOrigin = this->eyeData.verbose_data.combined.eye_data.gaze_origin_mm;
|
||||
if (gazeOrigin.x == -1.0f && gazeOrigin.y == -1.0f && gazeOrigin.z == -1.0f) {
|
||||
eyeTrackingData.isValid = false;
|
||||
continue;
|
||||
}
|
||||
glm::vec3 eyeSpaceOrigin(-1 * gazeOrigin.x / 1000.0f, gazeOrigin.y / 1000.0f, -1 * gazeOrigin.z / 1000.0f);
|
||||
eyeTrackingData.origin = glm::vec3(hmdData.deviceTransform * glm::vec4(eyeSpaceOrigin, 1.0));
|
||||
|
||||
auto gazeDirection = this->eyeData.verbose_data.combined.eye_data.gaze_direction_normalized;
|
||||
if (gazeDirection.x == -1.0f && gazeDirection.y == -1.0f && gazeDirection.z == -1.0f) {
|
||||
eyeTrackingData.isValid = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Convert to OpenVR coordinates
|
||||
glm::vec3 eyeSpaceDir(-1 * gazeDirection.x, gazeDirection.y, -1 * gazeDirection.z);
|
||||
|
||||
// Only rotate, no translate - remove translation to preserve rotation
|
||||
glm::vec3 hmdSpaceDir(hmdData.deviceTransform * glm::vec4(eyeSpaceDir, 1.0));
|
||||
// Make sure to normalize (and also flip x and z, since anipal coordinate convention is different to OpenGL)
|
||||
eyeTrackingData.dir = glm::normalize(glm::vec3(hmdSpaceDir.x - hmdData.devicePos.x, hmdSpaceDir.y - hmdData.devicePos.y, hmdSpaceDir.z - hmdData.devicePos.z));
|
||||
|
||||
// Record pupil measurements
|
||||
eyeTrackingData.leftPupilDiameter = this->eyeData.verbose_data.left.pupil_diameter_mm;
|
||||
eyeTrackingData.rightPupilDiameter = this->eyeData.verbose_data.right.pupil_diameter_mm;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print string version of mat4 for debugging purposes
|
||||
void VRRendererContext::printMat4(glm::mat4& m) {
|
||||
printf(glm::to_string(m).c_str());
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
// Print string version of vec3 for debugging purposes
|
||||
void VRRendererContext::printVec3(glm::vec3& v) {
|
||||
printf(glm::to_string(v).c_str());
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
// Processes a single VR event
|
||||
void VRRendererContext::processVREvent(vr::VREvent_t& vrEvent, std::string& deviceType, std::string& eventType) {
|
||||
vr::ETrackedDeviceClass trackedDeviceClass = m_pHMD->GetTrackedDeviceClass(vrEvent.trackedDeviceIndex);
|
||||
|
||||
// Exit if we found a non-controller event
|
||||
if (trackedDeviceClass != vr::ETrackedDeviceClass::TrackedDeviceClass_Controller) {
|
||||
deviceType = "invalid";
|
||||
return;
|
||||
}
|
||||
|
||||
vr::ETrackedControllerRole role = m_pHMD->GetControllerRoleForTrackedDeviceIndex(vrEvent.trackedDeviceIndex);
|
||||
if (role == vr::TrackedControllerRole_Invalid) {
|
||||
deviceType = "invalid";
|
||||
}
|
||||
else if (role == vr::TrackedControllerRole_LeftHand) {
|
||||
deviceType = "left_controller";
|
||||
}
|
||||
else if (role == vr::TrackedControllerRole_RightHand) {
|
||||
deviceType = "right_controller";
|
||||
}
|
||||
|
||||
switch (vrEvent.data.controller.button) {
|
||||
case vr::k_EButton_Grip:
|
||||
switch (vrEvent.eventType) {
|
||||
case vr::VREvent_ButtonPress:
|
||||
eventType = "grip_press";
|
||||
break;
|
||||
|
||||
case vr::VREvent_ButtonUnpress:
|
||||
eventType = "grip_unpress";
|
||||
break;
|
||||
default:
|
||||
eventType = "invalid";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case vr::k_EButton_SteamVR_Trigger:
|
||||
switch (vrEvent.eventType) {
|
||||
case vr::VREvent_ButtonPress:
|
||||
eventType = "trigger_press";
|
||||
break;
|
||||
|
||||
case vr::VREvent_ButtonUnpress:
|
||||
eventType = "trigger_unpress";
|
||||
break;
|
||||
default:
|
||||
eventType = "invalid";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case vr::k_EButton_SteamVR_Touchpad:
|
||||
switch (vrEvent.eventType) {
|
||||
case vr::VREvent_ButtonPress:
|
||||
eventType = "touchpad_press";
|
||||
break;
|
||||
|
||||
case vr::VREvent_ButtonUnpress:
|
||||
eventType = "touchpad_unpress";
|
||||
break;
|
||||
|
||||
case vr::VREvent_ButtonTouch:
|
||||
eventType = "touchpad_touch";
|
||||
break;
|
||||
|
||||
case vr::VREvent_ButtonUntouch:
|
||||
eventType = "touchpad_untouch";
|
||||
break;
|
||||
default:
|
||||
eventType = "invalid";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case vr::k_EButton_ApplicationMenu:
|
||||
switch (vrEvent.eventType) {
|
||||
case vr::VREvent_ButtonPress:
|
||||
eventType = "menu_press";
|
||||
break;
|
||||
|
||||
case vr::VREvent_ButtonUnpress:
|
||||
eventType = "menu_unpress";
|
||||
break;
|
||||
default:
|
||||
eventType = "invalid";
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
eventType = "invalid";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Sets coordinate transform matrices
|
||||
void VRRendererContext::setCoordinateTransformMatrices() {
|
||||
gibToVR[0] = glm::vec4(0.0, 0.0, -1.0, 0.0);
|
||||
gibToVR[1] = glm::vec4(-1.0, 0.0, 0.0, 0.0);
|
||||
gibToVR[2] = glm::vec4(0.0, 1.0, 0.0, 0.0);
|
||||
gibToVR[3] = glm::vec4(0.0, 0.0, 0.0, 1.0);
|
||||
|
||||
vrToGib[0] = glm::vec4(0.0, -1.0, 0.0, 0.0);
|
||||
vrToGib[1] = glm::vec4(0.0, 0.0, 1.0, 0.0);
|
||||
vrToGib[2] = glm::vec4(-1.0, 0.0, 0.0, 0.0);
|
||||
vrToGib[3] = glm::vec4(0.0, 0.0, 0.0, 1.0);
|
||||
}
|
||||
|
||||
// Sets the position component of a SteamVR Matrix
|
||||
void VRRendererContext::setSteamVRMatrixPos(glm::vec3& pos, vr::HmdMatrix34_t& mat) {
|
||||
mat.m[0][3] = pos[0];
|
||||
mat.m[1][3] = pos[1];
|
||||
mat.m[2][3] = pos[2];
|
||||
}
|
||||
|
||||
// Calls WaitGetPoses and updates all hmd and controller transformations
|
||||
void VRRendererContext::updateVRData() {
|
||||
hmdData.isValidData = false;
|
||||
leftControllerData.isValidData = false;
|
||||
rightControllerData.isValidData = false;
|
||||
// Stores controller information - see github.com/ValveSoftware/openvr/wiki/IVRSystem::GetControllerState for more info
|
||||
vr::VRControllerState_t controllerState;
|
||||
|
||||
vr::TrackedDevicePose_t trackedDevices[vr::k_unMaxTrackedDeviceCount];
|
||||
vr::VRCompositor()->WaitGetPoses(trackedDevices, vr::k_unMaxTrackedDeviceCount, NULL, 0);
|
||||
|
||||
for (unsigned int idx = 0; idx < vr::k_unMaxTrackedDeviceCount; idx++) {
|
||||
if (!trackedDevices[idx].bPoseIsValid || !m_pHMD->IsTrackedDeviceConnected(idx)) continue;
|
||||
|
||||
vr::HmdMatrix34_t transformMat = trackedDevices[idx].mDeviceToAbsoluteTracking;
|
||||
vr::ETrackedDeviceClass trackedDeviceClass = m_pHMD->GetTrackedDeviceClass(idx);
|
||||
|
||||
if (trackedDeviceClass == vr::ETrackedDeviceClass::TrackedDeviceClass_HMD) {
|
||||
hmdData.index = idx;
|
||||
hmdData.isValidData = true;
|
||||
hmdActualPos = getPositionFromSteamVRMatrix(transformMat);
|
||||
|
||||
setSteamVRMatrixPos(hmdActualPos + vrOffsetVec, transformMat);
|
||||
|
||||
hmdData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat);
|
||||
hmdData.devicePos = getPositionFromSteamVRMatrix(transformMat);
|
||||
hmdData.deviceRot = getRotationFromSteamVRMatrix(transformMat);
|
||||
}
|
||||
else if (trackedDeviceClass == vr::ETrackedDeviceClass::TrackedDeviceClass_Controller) {
|
||||
vr::ETrackedControllerRole role = m_pHMD->GetControllerRoleForTrackedDeviceIndex(idx);
|
||||
if (role == vr::TrackedControllerRole_Invalid) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int trigger_index, touchpad_index;
|
||||
|
||||
// Figures out indices that correspond with trigger and trackpad axes. Index used to read into VRControllerState_t struct array of axes.
|
||||
for (int i = 0; i < vr::k_unControllerStateAxisCount; i++) {
|
||||
int axisType = m_pHMD->GetInt32TrackedDeviceProperty(idx, (vr::ETrackedDeviceProperty)(vr::Prop_Axis0Type_Int32 + i));
|
||||
if (axisType == vr::EVRControllerAxisType::k_eControllerAxis_Trigger) {
|
||||
trigger_index = i;
|
||||
}
|
||||
else if (axisType == vr::EVRControllerAxisType::k_eControllerAxis_TrackPad) {
|
||||
touchpad_index = i;
|
||||
}
|
||||
}
|
||||
|
||||
// If false, sets the controller data validity to false, as data is not valid if we can't read analog touch coordinates and trigger close fraction
|
||||
bool getControllerDataResult = m_pHMD->GetControllerState(idx, &controllerState, sizeof(controllerState));
|
||||
|
||||
if (role == vr::TrackedControllerRole_LeftHand) {
|
||||
leftControllerData.index = idx;
|
||||
leftControllerData.trigger_axis_index = trigger_index;
|
||||
leftControllerData.touchpad_axis_index = touchpad_index;
|
||||
leftControllerData.isValidData = getControllerDataResult;
|
||||
|
||||
glm::vec3 leftControllerPos = getPositionFromSteamVRMatrix(transformMat);
|
||||
setSteamVRMatrixPos(leftControllerPos + vrOffsetVec, transformMat);
|
||||
|
||||
leftControllerData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat);
|
||||
leftControllerData.devicePos = getPositionFromSteamVRMatrix(transformMat);
|
||||
leftControllerData.deviceRot = getRotationFromSteamVRMatrix(transformMat);
|
||||
|
||||
leftControllerData.trig_frac = controllerState.rAxis[leftControllerData.trigger_axis_index].x;
|
||||
leftControllerData.touchpad_analog_vec = glm::vec2(controllerState.rAxis[leftControllerData.touchpad_axis_index].x, controllerState.rAxis[leftControllerData.touchpad_axis_index].y);
|
||||
}
|
||||
else if (role == vr::TrackedControllerRole_RightHand) {
|
||||
rightControllerData.index = idx;
|
||||
rightControllerData.trigger_axis_index = trigger_index;
|
||||
rightControllerData.touchpad_axis_index = touchpad_index;
|
||||
rightControllerData.isValidData = getControllerDataResult;
|
||||
|
||||
glm::vec3 rightControllerPos = getPositionFromSteamVRMatrix(transformMat);
|
||||
setSteamVRMatrixPos(rightControllerPos + vrOffsetVec, transformMat);
|
||||
|
||||
rightControllerData.deviceTransform = convertSteamVRMatrixToGlmMat4(transformMat);
|
||||
rightControllerData.devicePos = getPositionFromSteamVRMatrix(transformMat);
|
||||
rightControllerData.deviceRot = getRotationFromSteamVRMatrix(transformMat);
|
||||
|
||||
rightControllerData.trig_frac = controllerState.rAxis[rightControllerData.trigger_axis_index].x;
|
||||
rightControllerData.touchpad_analog_vec = glm::vec2(controllerState.rAxis[rightControllerData.touchpad_axis_index].x, controllerState.rAxis[rightControllerData.touchpad_axis_index].y);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PYBIND11_MODULE(VRRendererContext, m) {
|
||||
|
||||
py::class_<VRRendererContext> pymodule = py::class_<VRRendererContext>(m, "VRRendererContext");
|
||||
|
||||
pymodule.def(py::init<int, int>());
|
||||
pymodule.def("init", &VRRendererContext::init);
|
||||
pymodule.def("release", &VRRendererContext::release);
|
||||
pymodule.def("render_companion_window_from_buffer", &VRRendererContext::render_companion_window_from_buffer);
|
||||
|
||||
// class MeshRenderer
|
||||
pymodule.def("render_meshrenderer_pre", &VRRendererContext::render_meshrenderer_pre,
|
||||
"pre-executed functions in MeshRenderer.render");
|
||||
pymodule.def("render_meshrenderer_post", &VRRendererContext::render_meshrenderer_post,
|
||||
"post-executed functions in MeshRenderer.render");
|
||||
pymodule.def("getstring_meshrenderer", &VRRendererContext::getstring_meshrenderer, "return GL version string");
|
||||
pymodule.def("readbuffer_meshrenderer", &VRRendererContext::readbuffer_meshrenderer, "read pixel buffer");
|
||||
pymodule.def("readbuffer_meshrenderer_shadow_depth", &VRRendererContext::readbuffer_meshrenderer_shadow_depth,
|
||||
"read pixel buffer");
|
||||
pymodule.def("clean_meshrenderer", &VRRendererContext::clean_meshrenderer, "clean meshrenderer");
|
||||
pymodule.def("setup_framebuffer_meshrenderer", &VRRendererContext::setup_framebuffer_meshrenderer,
|
||||
"setup framebuffer in meshrenderer");
|
||||
pymodule.def("setup_pbr", &VRRendererContext::setup_pbr, "setup pbr");
|
||||
|
||||
pymodule.def("setup_framebuffer_meshrenderer_ms", &VRRendererContext::setup_framebuffer_meshrenderer_ms,
|
||||
"setup framebuffer in meshrenderer with MSAA");
|
||||
pymodule.def("blit_buffer", &VRRendererContext::blit_buffer, "blit buffer");
|
||||
|
||||
pymodule.def("compile_shader_meshrenderer", &VRRendererContext::compile_shader_meshrenderer,
|
||||
"compile vertex and fragment shader");
|
||||
pymodule.def("load_object_meshrenderer", &VRRendererContext::load_object_meshrenderer,
|
||||
"load object into VAO and VBO");
|
||||
pymodule.def("loadTexture", &VRRendererContext::loadTexture, "load texture function");
|
||||
pymodule.def("allocateTexture", &VRRendererContext::allocateTexture, "load texture function");
|
||||
|
||||
// class Instance
|
||||
pymodule.def("render_softbody_instance", &VRRendererContext::render_softbody_instance,
|
||||
"render softbody in instance.render");
|
||||
pymodule.def("initvar_instance", &VRRendererContext::initvar_instance, "init uniforms in instance.render");
|
||||
pymodule.def("init_material_instance", &VRRendererContext::init_material_instance,
|
||||
"init materials in instance.render");
|
||||
pymodule.def("draw_elements_instance", &VRRendererContext::draw_elements_instance,
|
||||
"draw elements in instance.render and instancegroup.render");
|
||||
|
||||
// class InstanceGroup
|
||||
pymodule.def("initvar_instance_group", &VRRendererContext::initvar_instance_group,
|
||||
"init uniforms in instancegroup.render");
|
||||
pymodule.def("init_material_pos_instance", &VRRendererContext::init_material_pos_instance,
|
||||
"init materials and position in instancegroup.render");
|
||||
|
||||
// misc
|
||||
pymodule.def("cglBindVertexArray", &VRRendererContext::cglBindVertexArray, "binding function");
|
||||
pymodule.def("cglUseProgram", &VRRendererContext::cglUseProgram, "binding function");
|
||||
|
||||
// for optimized renderer
|
||||
pymodule.def("generateArrayTextures", &VRRendererContext::generateArrayTextures, "TBA");
|
||||
pymodule.def("renderSetup", &VRRendererContext::renderSetup, "TBA");
|
||||
pymodule.def("updateDynamicData", &VRRendererContext::updateDynamicData, "TBA");
|
||||
pymodule.def("renderOptimized", &VRRendererContext::renderOptimized, "TBA");
|
||||
pymodule.def("clean_meshrenderer_optimized", &VRRendererContext::clean_meshrenderer_optimized, "TBA");
|
||||
|
||||
// VR functions
|
||||
pymodule.def("getButtonDataForController", &VRRendererContext::getButtonDataForController);
|
||||
pymodule.def("getDataForVRDevice", &VRRendererContext::getDataForVRDevice);
|
||||
pymodule.def("getDeviceCoordinateSystem", &VRRendererContext::getDeviceCoordinateSystem);
|
||||
pymodule.def("getEyeTrackingData", &VRRendererContext::getEyeTrackingData);
|
||||
pymodule.def("getVROffset", &VRRendererContext::getVROffset);
|
||||
pymodule.def("initVR", &VRRendererContext::initVR);
|
||||
pymodule.def("pollVREvents", &VRRendererContext::pollVREvents);
|
||||
pymodule.def("postRenderVRForEye", &VRRendererContext::postRenderVRForEye);
|
||||
pymodule.def("postRenderVRUpdate", &VRRendererContext::postRenderVRUpdate);
|
||||
pymodule.def("preRenderVR", &VRRendererContext::preRenderVR);
|
||||
pymodule.def("releaseVR", &VRRendererContext::releaseVR);
|
||||
pymodule.def("setVROffset", &VRRendererContext::setVROffset);
|
||||
pymodule.def("triggerHapticPulseForDevice", &VRRendererContext::triggerHapticPulseForDevice);
|
||||
|
||||
#ifdef VERSION_INFO
|
||||
m.attr("__version__") = VERSION_INFO;
|
||||
#else
|
||||
m.attr("__version__") = "dev";
|
||||
#endif
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
#ifndef VR_MESH_RENDERER_HEADER
|
||||
#define VR_MESH_RENDERER_HEADER
|
||||
|
||||
#include "glfw_mesh_renderer.h"
|
||||
#include "SRanipal.h"
|
||||
#include "SRanipal_Eye.h"
|
||||
#include "SRanipal_Enums.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
#include <glad/gl.h>
|
||||
#include <glm/glm.hpp>
|
||||
#include <glm/gtc/matrix_transform.hpp>
|
||||
#include <glm/gtc/type_ptr.hpp>
|
||||
#include <glm/gtx/string_cast.hpp>
|
||||
#include <openvr.h>
|
||||
#include <pybind11/pybind11.h>
|
||||
#include <pybind11/numpy.h>
|
||||
#include <pybind11/stl.h>
|
||||
|
||||
namespace py = pybind11;
|
||||
|
||||
// Note: VR can only be used with iGibson on Windows, so we need to use a GLFW context for rendering
|
||||
class VRRendererContext : public GLFWRendererContext {
|
||||
public:
|
||||
// Pointer used to reference VR system
|
||||
vr::IVRSystem* m_pHMD;
|
||||
float nearClip;
|
||||
float farClip;
|
||||
|
||||
// Vector indicating the user-defined offset for the VR system (may be used if implementing a teleportation movement scheme, for example)
|
||||
glm::vec3 vrOffsetVec;
|
||||
|
||||
// Device data stored in VR coordinates
|
||||
struct DeviceData {
|
||||
// standard 4x4 transform
|
||||
glm::mat4 deviceTransform;
|
||||
// x,y,z
|
||||
glm::vec3 devicePos;
|
||||
// w, x, y, z (quaternion)
|
||||
glm::vec4 deviceRot;
|
||||
// is device valid and being tracked
|
||||
bool isValidData = false;
|
||||
// index of current device in device array
|
||||
int index = -1;
|
||||
// trigger pressed fraction (0 min, 1 max) - controllers only!
|
||||
float trig_frac;
|
||||
// analog touch vector - controllers only!
|
||||
glm::vec2 touchpad_analog_vec;
|
||||
// both indices are used to obtain analog data for trigger and touchpadd - controllers only!
|
||||
int trigger_axis_index;
|
||||
int touchpad_axis_index;
|
||||
};
|
||||
|
||||
DeviceData hmdData;
|
||||
DeviceData leftControllerData;
|
||||
DeviceData rightControllerData;
|
||||
|
||||
// Indicates where the headset actually is in the room
|
||||
glm::vec3 hmdActualPos;
|
||||
|
||||
// View matrices for both left and right eyes (only proj and view are actually returned to the user)
|
||||
glm::mat4 leftEyeProj;
|
||||
glm::mat4 leftEyePos;
|
||||
glm::mat4 leftEyeView;
|
||||
glm::mat4 rightEyeProj;
|
||||
glm::mat4 rightEyePos;
|
||||
glm::mat4 rightEyeView;
|
||||
|
||||
glm::mat4 gibToVR;
|
||||
glm::mat4 vrToGib;
|
||||
|
||||
// SRAnipal variables
|
||||
bool useEyeTracking;
|
||||
std::thread* eyeTrackingThread;
|
||||
ViveSR::anipal::Eye::EyeData eyeData;
|
||||
int result;
|
||||
bool shouldShutDownEyeTracking;
|
||||
|
||||
// Struct storing eye data for SR anipal - we only return origin and direction in world space
|
||||
// As most users will want to use this ray to query intersection or something similar
|
||||
struct EyeTrackingData {
|
||||
bool isValid;
|
||||
glm::vec3 origin;
|
||||
glm::vec3 dir;
|
||||
// Both in mm
|
||||
float leftPupilDiameter;
|
||||
float rightPupilDiameter;
|
||||
};
|
||||
|
||||
EyeTrackingData eyeTrackingData;
|
||||
|
||||
VRRendererContext(int w, int h) : GLFWRendererContext(w, h), m_pHMD(NULL), nearClip(0.1f), farClip(30.0f) {};
|
||||
|
||||
py::list getButtonDataForController(char* controllerType);
|
||||
|
||||
py::list getDataForVRDevice(char* deviceType);
|
||||
|
||||
py::list getDeviceCoordinateSystem(char* device);
|
||||
|
||||
py::list getEyeTrackingData();
|
||||
|
||||
py::list getVROffset();
|
||||
|
||||
void initVR(bool useEyeTracking);
|
||||
|
||||
py::list pollVREvents();
|
||||
|
||||
void postRenderVRForEye(char* eye, GLuint texID);
|
||||
|
||||
void postRenderVRUpdate(bool shouldHandoff);
|
||||
|
||||
py::list preRenderVR();
|
||||
|
||||
void releaseVR();
|
||||
|
||||
void setVROffset(float x, float y, float z);
|
||||
|
||||
void triggerHapticPulseForDevice(char* device, unsigned short microSecondDuration);
|
||||
|
||||
private:
|
||||
glm::mat4 convertSteamVRMatrixToGlmMat4(const vr::HmdMatrix34_t& matPose);
|
||||
|
||||
glm::mat4 getHMDEyePose(vr::Hmd_Eye eye);
|
||||
|
||||
glm::mat4 getHMDEyeProjection(vr::Hmd_Eye eye);
|
||||
|
||||
glm::vec3 getPositionFromSteamVRMatrix(vr::HmdMatrix34_t& matrix);
|
||||
|
||||
glm::vec4 getRotationFromSteamVRMatrix(vr::HmdMatrix34_t& matrix);
|
||||
|
||||
glm::vec3 getVec3ColFromMat4(int col_index, glm::mat4& mat);
|
||||
|
||||
void initAnipal();
|
||||
|
||||
void pollAnipal();
|
||||
|
||||
void printMat4(glm::mat4& m);
|
||||
|
||||
void printVec3(glm::vec3& v);
|
||||
|
||||
void processVREvent(vr::VREvent_t& vrEvent, std::string& deviceType, std::string& eventType);
|
||||
|
||||
void setCoordinateTransformMatrices();
|
||||
|
||||
void setSteamVRMatrixPos(glm::vec3& pos, vr::HmdMatrix34_t& mat);
|
||||
|
||||
void updateVRData();
|
||||
};
|
||||
|
||||
#endif
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 96f9f5c4b970140ab5380fb9886ba787c8698937
|
|
@ -1 +0,0 @@
|
|||
Subproject commit b3f87720261d623986f164b2a7f6a0a938430271
|
|
@ -1,13 +1,15 @@
|
|||
import logging
|
||||
import platform
|
||||
from gibson2.render.mesh_renderer import tinyobjloader
|
||||
# TODO: Need to add a post-install command so we don't need to use the Release folder
|
||||
from gibson2.core.render.mesh_renderer.Release import tinyobjloader
|
||||
import gibson2
|
||||
import pybullet as p
|
||||
import gibson2.render.mesh_renderer as mesh_renderer
|
||||
from gibson2.render.mesh_renderer.get_available_devices import get_available_devices
|
||||
from gibson2.render.mesh_renderer import EGLRendererContext
|
||||
from gibson2.utils.mesh_util import perspective, lookat, xyz2mat, quat2rotmat, mat2xyz, \
|
||||
safemat2quat, xyzw2wxyz, ortho
|
||||
import gibson2.core.render.mesh_renderer as mesh_renderer
|
||||
from gibson2.core.render.mesh_renderer.get_available_devices import get_available_devices
|
||||
from transforms3d.euler import quat2euler, mat2euler
|
||||
from transforms3d.quaternions import axangle2quat, mat2quat
|
||||
from gibson2.core.render.mesh_renderer.glutils.meshutil import perspective, lookat, xyz2mat, quat2rotmat, mat2xyz, \
|
||||
safemat2quat, xyzw2wxyz
|
||||
import numpy as np
|
||||
import os
|
||||
import sys
|
||||
|
@ -20,7 +22,6 @@ Image.MAX_IMAGE_PIXELS = None
|
|||
|
||||
# from pyassimp import load, release
|
||||
|
||||
|
||||
class VisualObject(object):
|
||||
"""
|
||||
A visual object manages a set of VAOs and textures, one wavefront obj file loads into openGL, and managed
|
||||
|
@ -152,7 +153,8 @@ class InstanceGroup(object):
|
|||
else:
|
||||
buffer = self.renderer.fbo
|
||||
|
||||
self.renderer.r.draw_elements_instance(self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture(),
|
||||
self.renderer.r.draw_elements_instance(self.renderer.shaderProgram,
|
||||
self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture(),
|
||||
texture_id,
|
||||
metallic_texture_id,
|
||||
roughness_texture_id,
|
||||
|
@ -310,7 +312,8 @@ class Instance(object):
|
|||
else:
|
||||
buffer = self.renderer.fbo
|
||||
|
||||
self.renderer.r.draw_elements_instance(self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture(),
|
||||
self.renderer.r.draw_elements_instance(self.renderer.shaderProgram,
|
||||
self.renderer.materials_mapping[self.renderer.mesh_materials[object_idx]].is_texture(),
|
||||
texture_id,
|
||||
metallic_texture_id,
|
||||
roughness_texture_id,
|
||||
|
@ -511,7 +514,7 @@ class MeshRenderer(object):
|
|||
|
||||
def __init__(self, width=512, height=512, vertical_fov=90, device_idx=0, use_fisheye=False, msaa=False,
|
||||
enable_shadow=False, env_texture_filename=os.path.join(gibson2.assets_path, 'test', 'Rs.hdr'),
|
||||
optimized=False, skybox_size=20.):
|
||||
optimized=False, fullscreen=False, skybox_size=20.):
|
||||
"""
|
||||
:param width: width of the renderer output
|
||||
:param height: width of the renderer output
|
||||
|
@ -522,6 +525,7 @@ class MeshRenderer(object):
|
|||
:param env_texture_filename: texture filename for PBR lighting
|
||||
"""
|
||||
self.shaderProgram = None
|
||||
self.windowShaderProgram = None
|
||||
self.fbo = None
|
||||
self.color_tex_rgb, self.color_tex_normal, self.color_tex_semantics, self.color_tex_3d = None, None, None, None
|
||||
self.depth_tex = None
|
||||
|
@ -538,37 +542,50 @@ class MeshRenderer(object):
|
|||
self.instances = []
|
||||
self.fisheye = use_fisheye
|
||||
self.optimized = optimized
|
||||
self.fullscreen = fullscreen
|
||||
self.texture_files = {}
|
||||
self.texture_load_counter = 0
|
||||
self.enable_shadow = enable_shadow
|
||||
self.platform = platform.system()
|
||||
|
||||
device_idx = None
|
||||
device = None
|
||||
if os.environ.get('GIBSON_DEVICE_ID', None):
|
||||
device = int(os.environ.get('GIBSON_DEVICE_ID'))
|
||||
logging.info("GIBSON_DEVICE_ID environment variable has been manually set. "
|
||||
"Using device {} for rendering".format(device))
|
||||
else:
|
||||
available_devices = get_available_devices()
|
||||
if device_idx < len(available_devices):
|
||||
device = available_devices[device_idx]
|
||||
logging.info("Using device {} for rendering".format(device))
|
||||
else:
|
||||
logging.info(
|
||||
"Device index is larger than number of devices, falling back to use 0")
|
||||
device = 0
|
||||
if self.platform != 'Windows':
|
||||
available_devices = get_available_devices()
|
||||
if device_idx < len(available_devices):
|
||||
device = available_devices[device_idx]
|
||||
logging.info("Using device {} for rendering".format(device))
|
||||
else:
|
||||
logging.info(
|
||||
"Device index is larger than number of devices, falling back to use 0")
|
||||
device = 0
|
||||
|
||||
self.device_idx = device_idx
|
||||
self.device_minor = device
|
||||
self.msaa = msaa
|
||||
self.platform = platform.system()
|
||||
if self.platform == 'Darwin' and self.optimized:
|
||||
logging.error('Optimized renderer is not supported on Mac')
|
||||
exit()
|
||||
if self.platform == 'Darwin':
|
||||
from gibson2.core.render.mesh_renderer import GLFWRendererContext
|
||||
self.r = GLFWRendererContext.GLFWRendererContext(width, height)
|
||||
elif self.platform == 'Windows':
|
||||
from gibson2.core.render.mesh_renderer.Release import VRRendererContext
|
||||
self.r = VRRendererContext.VRRendererContext(width, height)
|
||||
else:
|
||||
from gibson2.core.render.mesh_renderer import EGLRendererContext
|
||||
self.r = EGLRendererContext.EGLRendererContext(
|
||||
width, height, device)
|
||||
self.r.init()
|
||||
|
||||
if self.platform == 'Windows':
|
||||
self.r.init(True, self.fullscreen)
|
||||
else:
|
||||
self.r.init()
|
||||
|
||||
self.glstring = self.r.getstring_meshrenderer()
|
||||
|
||||
|
@ -635,6 +652,8 @@ class MeshRenderer(object):
|
|||
self.P = np.ascontiguousarray(P, np.float32)
|
||||
self.materials_mapping = {}
|
||||
self.mesh_materials = []
|
||||
self.texture_files = []
|
||||
self.texture_load_counter = 0
|
||||
|
||||
self.env_texture_filename = env_texture_filename
|
||||
self.skybox_size = skybox_size
|
||||
|
@ -785,10 +804,10 @@ class MeshRenderer(object):
|
|||
|
||||
if input_kd is not None: # append the default material in the end, in case material loading fails
|
||||
self.materials_mapping[len(
|
||||
materials) + material_count] = Material('color', kd=input_kd)
|
||||
materials) + material_count] = Material('color', kd=input_kd, texture_id=-1)
|
||||
else:
|
||||
self.materials_mapping[len(
|
||||
materials) + material_count] = Material('color', kd=[0.5, 0.5, 0.5])
|
||||
materials) + material_count] = Material('color', kd=[0.5, 0.5, 0.5], texture_id=-1)
|
||||
|
||||
VAO_ids = []
|
||||
|
||||
|
@ -869,8 +888,13 @@ class MeshRenderer(object):
|
|||
self.vertex_data.append(vertexData)
|
||||
self.shapes.append(shape)
|
||||
if material_id == -1: # if material loading fails, use the default material
|
||||
mapping_idx = len(materials) + material_count
|
||||
mat = self.materials_mapping[mapping_idx]
|
||||
self.mesh_materials.append(len(materials) + material_count)
|
||||
else:
|
||||
mapping_idx = material_id + material_count
|
||||
mat = self.materials_mapping[mapping_idx]
|
||||
tex_id = mat.texture_id
|
||||
self.mesh_materials.append(material_id + material_count)
|
||||
|
||||
logging.debug('mesh_materials: {}'.format(self.mesh_materials))
|
||||
|
@ -881,6 +905,24 @@ class MeshRenderer(object):
|
|||
self.visual_objects.append(new_obj)
|
||||
return VAO_ids
|
||||
|
||||
def update_dynamic_positions(self):
|
||||
"""
|
||||
A function to update all dynamic positions.
|
||||
"""
|
||||
trans_data = []
|
||||
rot_data = []
|
||||
|
||||
for instance in self.instances:
|
||||
if isinstance(instance, Instance):
|
||||
trans_data.append(instance.pose_trans)
|
||||
rot_data.append(instance.pose_rot)
|
||||
elif isinstance(instance, InstanceGroup) or isinstance(instance, Robot):
|
||||
trans_data.extend(instance.poses_trans)
|
||||
rot_data.extend(instance.poses_rot)
|
||||
|
||||
self.pose_trans_array = np.ascontiguousarray(np.concatenate(trans_data, axis=0))
|
||||
self.pose_rot_array = np.ascontiguousarray(np.concatenate(rot_data, axis=0))
|
||||
|
||||
def add_instance(self,
|
||||
object_id,
|
||||
pybullet_uuid=None,
|
||||
|
@ -1008,15 +1050,15 @@ class MeshRenderer(object):
|
|||
results.append(frame)
|
||||
return results
|
||||
|
||||
|
||||
|
||||
def render(self, modes=('rgb', 'normal', 'seg', '3d'), hidden=()):
|
||||
def render(self, modes=('rgb', 'normal', 'seg', '3d'), hidden=(), display_companion_window=False):
|
||||
"""
|
||||
A function to render all the instances in the renderer and read the output from framebuffer.
|
||||
|
||||
:param modes: it should be a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d').
|
||||
:param hidden: Hidden instances to skip. When rendering from a robot's perspective, it's own body can be
|
||||
hidden
|
||||
:param display_companion_window: bool indicating whether we should render the companion window. If set to true,
|
||||
render the window and don't return the frame buffers as numpy arrays (to increase speed)
|
||||
:return: a list of float32 numpy arrays of shape (H, W, 4) corresponding to `modes`, where last channel is alpha
|
||||
"""
|
||||
|
||||
|
@ -1072,10 +1114,24 @@ class MeshRenderer(object):
|
|||
instance.render(shadow_pass=0)
|
||||
|
||||
self.r.render_meshrenderer_post()
|
||||
|
||||
if self.msaa:
|
||||
self.r.blit_buffer(self.width, self.height, self.fbo_ms, self.fbo)
|
||||
|
||||
return self.readbuffer(modes)
|
||||
if display_companion_window:
|
||||
self.render_companion_window()
|
||||
else:
|
||||
return self.readbuffer(modes)
|
||||
|
||||
# The viewer is responsible for calling this function to update the window, if cv2 is not being used for window display
|
||||
def render_companion_window(self):
|
||||
self.r.render_companion_window_from_buffer(self.fbo)
|
||||
|
||||
def get_visual_objects(self):
|
||||
return self.visual_objects
|
||||
|
||||
def get_instances(self):
|
||||
return self.instances
|
||||
|
||||
def set_light_pos(self, light):
|
||||
self.lightpos = light
|
||||
|
@ -1116,7 +1172,6 @@ class MeshRenderer(object):
|
|||
if self.optimized:
|
||||
self.r.clean_meshrenderer_optimized(clean_list, [self.tex_id_1, self.tex_id_2], fbo_list,
|
||||
[self.optimized_VAO], [self.optimized_VBO], [self.optimized_EBO])
|
||||
# TODO: self.VAOs, self.VBOs might also need to be cleaned
|
||||
else:
|
||||
self.r.clean_meshrenderer(
|
||||
clean_list, self.textures, fbo_list, self.VAOs, self.VBOs)
|
||||
|
@ -1180,7 +1235,8 @@ class MeshRenderer(object):
|
|||
def optimize_vertex_and_texture(self):
|
||||
for tex_file in self.texture_files:
|
||||
print("Texture: ", tex_file)
|
||||
cutoff = 4000 * 4000
|
||||
# Set cutoff about 4096, otherwise we end up filling VRAM very quickly
|
||||
cutoff = 5000 * 5000
|
||||
shouldShrinkSmallTextures = True
|
||||
smallTexSize = 512
|
||||
texture_files = sorted(self.texture_files.items(), key=lambda x:x[1])
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
import os
|
||||
|
||||
import gibson2
|
||||
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
|
||||
|
||||
class MeshRendererVR(MeshRenderer):
|
||||
"""
|
||||
MeshRendererVR is iGibson's VR rendering class. It handles rendering to the VR headset and provides
|
||||
a link to the underlying VRRendererContext, on which various functions can be called.
|
||||
"""
|
||||
|
||||
def __init__(self, width=512, height=512, vertical_fov=90, device_idx=0, use_fisheye=False, msaa=False,
|
||||
enable_shadow=False, env_texture_filename=os.path.join(gibson2.assets_path, 'test', 'Rs.hdr'),
|
||||
optimized=False, fullscreen=False, useEyeTracking=False, vrMode=True):
|
||||
self.fullscreen = fullscreen
|
||||
self.useEyeTracking = useEyeTracking
|
||||
self.vrMode = vrMode
|
||||
self.baseWidth = 1080
|
||||
self.baseHeight = 1200
|
||||
self.scaleFactor = 1.4
|
||||
self.width = int(self.baseWidth * self.scaleFactor)
|
||||
self.height = int(self.baseHeight * self.scaleFactor)
|
||||
super().__init__(width=self.width, height=self.height, optimized=True, fullscreen=self.fullscreen)
|
||||
|
||||
# Rename self.r to self.vrsys to make it easier to understand and use
|
||||
self.vrsys = self.r
|
||||
# Default recommended is 2016 x 2240
|
||||
if self.vrMode:
|
||||
self.vrsys.initVR(self.useEyeTracking)
|
||||
|
||||
# Renders VR scenes and returns the left eye frame
|
||||
def render(self):
|
||||
if self.vrMode:
|
||||
leftProj, leftView, rightProj, rightView = self.vrsys.preRenderVR()
|
||||
|
||||
# Render and submit left eye
|
||||
self.V = leftView
|
||||
self.P = leftProj
|
||||
|
||||
super().render(modes=('rgb'), display_companion_window=True)
|
||||
self.vrsys.postRenderVRForEye("left", self.color_tex_rgb)
|
||||
# Render and submit right eye
|
||||
self.V = rightView
|
||||
self.P = rightProj
|
||||
|
||||
super().render(modes=('rgb'), display_companion_window=True)
|
||||
self.vrsys.postRenderVRForEye("right", self.color_tex_rgb)
|
||||
|
||||
# TODO: Experiment with this boolean for handoff
|
||||
self.vrsys.postRenderVRUpdate(False)
|
||||
else:
|
||||
super().render(modes=('rgb'), display_companion_window=True)
|
||||
|
||||
# Get view and projection matrices from renderer
|
||||
def get_view_proj(self):
|
||||
return [self.V, self.P]
|
||||
|
||||
# Set view and projection matrices in renderer
|
||||
def set_view_proj(self, v_to_set, p_to_set):
|
||||
self.V = v_to_set
|
||||
self.P = p_to_set
|
||||
|
||||
# Releases VR system and renderer
|
||||
def release(self):
|
||||
super().release()
|
||||
self.vrsys.releaseVR()
|
|
@ -0,0 +1,50 @@
|
|||
#version 410
|
||||
|
||||
#define MAX_ARRAY_SIZE 512
|
||||
|
||||
uniform sampler2DArray bigTex;
|
||||
uniform sampler2DArray smallTex;
|
||||
|
||||
uniform TexColorData {
|
||||
vec4 tex_data[MAX_ARRAY_SIZE];
|
||||
vec4 diffuse_colors[MAX_ARRAY_SIZE];
|
||||
};
|
||||
|
||||
in vec2 theCoords;
|
||||
in vec3 Normal;
|
||||
in vec3 Normal_cam;
|
||||
in vec3 FragPos;
|
||||
in vec3 Pos_cam;
|
||||
flat in int Draw_id;
|
||||
|
||||
layout (location = 0) out vec4 outputColour;
|
||||
layout (location = 1) out vec4 NormalColour;
|
||||
layout (location = 2) out vec4 InstanceColour;
|
||||
layout (location = 3) out vec4 PCColour;
|
||||
|
||||
uniform vec3 light_position; // in world coordinate
|
||||
uniform vec3 light_color; // light color
|
||||
|
||||
void main() {
|
||||
float ambientStrength = 0.2;
|
||||
vec3 ambient = ambientStrength * light_color;
|
||||
vec3 lightDir = normalize(light_position - FragPos);
|
||||
float diff = 0.5 + 0.5 * max(dot(Normal, lightDir), 0.0);
|
||||
vec3 diffuse = diff * light_color;
|
||||
vec4 curr_tex_data = tex_data[Draw_id];
|
||||
int tex_num = int(curr_tex_data.x);
|
||||
int tex_layer = int(curr_tex_data.y);
|
||||
float instance_color = curr_tex_data.z;
|
||||
|
||||
if (tex_num == -1) {
|
||||
outputColour = diffuse_colors[Draw_id] * diff; //diffuse color
|
||||
} else if (tex_num == 0) {
|
||||
outputColour = texture(bigTex, vec3(theCoords.x, theCoords.y, tex_layer));
|
||||
} else if (tex_num == 1) {
|
||||
outputColour = texture(smallTex, vec3(theCoords.x, theCoords.y, tex_layer));
|
||||
}
|
||||
|
||||
NormalColour = vec4((Normal_cam + 1) / 2,1);
|
||||
InstanceColour = vec4(instance_color, 0, 0, 1);
|
||||
PCColour = vec4(Pos_cam,1);
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
#version 410
|
||||
|
||||
#define MAX_ARRAY_SIZE 512
|
||||
|
||||
layout (std140) uniform TransformData {
|
||||
mat4 pose_trans_array[MAX_ARRAY_SIZE];
|
||||
mat4 pose_rot_array[MAX_ARRAY_SIZE];
|
||||
};
|
||||
|
||||
in int gl_DrawID;
|
||||
|
||||
uniform mat4 V;
|
||||
uniform mat4 P;
|
||||
|
||||
uniform vec3 instance_color;
|
||||
uniform vec3 diffuse_color;
|
||||
|
||||
layout (location=0) in vec3 position;
|
||||
layout (location=1) in vec3 normal;
|
||||
layout (location=2) in vec2 texCoords;
|
||||
out vec2 theCoords;
|
||||
out vec3 Normal;
|
||||
out vec3 FragPos;
|
||||
out vec3 Normal_cam;
|
||||
out vec3 Pos_cam;
|
||||
flat out int Draw_id;
|
||||
|
||||
void main() {
|
||||
mat4 pose_trans = pose_trans_array[gl_DrawID];
|
||||
mat4 pose_rot = transpose(pose_rot_array[gl_DrawID]);
|
||||
gl_Position = P * V * pose_trans * pose_rot * vec4(position, 1);
|
||||
vec4 world_position4 = pose_trans * pose_rot * vec4(position, 1);
|
||||
FragPos = vec3(world_position4.xyz / world_position4.w); // in world coordinate
|
||||
Normal = normalize(mat3(pose_rot) * normal); // in world coordinate
|
||||
Normal_cam = normalize(mat3(V) * mat3(pose_rot) * normal); // in camera coordinate
|
||||
vec4 pos_cam4 = V * pose_trans * pose_rot * vec4(position, 1);
|
||||
Pos_cam = pos_cam4.xyz / pos_cam4.w;
|
||||
theCoords = texCoords;
|
||||
Draw_id = gl_DrawID;
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
Subproject commit 38370a87f417001f3984cfa929860dabce6b69e7
|
|
@ -2,6 +2,21 @@ import cv2
|
|||
import numpy as np
|
||||
import pybullet as p
|
||||
from gibson2.objects.visual_marker import VisualMarker
|
||||
from gibson2.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR
|
||||
|
||||
class ViewerVR:
|
||||
def __init__(self):
|
||||
self.renderer = None
|
||||
|
||||
def update(self, should_reset_vr_camera=False, vr_camera_pos=None):
|
||||
if should_reset_vr_camera:
|
||||
self.renderer.reset_vr_camera()
|
||||
elif vr_camera_pos is not None:
|
||||
self.renderer.set_vr_camera(vr_camera_pos)
|
||||
|
||||
self.renderer.render()
|
||||
# Viewer is responsible for calling companion window rendering function
|
||||
self.renderer.render_companion_window()
|
||||
|
||||
class Viewer:
|
||||
def __init__(self,
|
||||
|
|
|
@ -118,6 +118,7 @@ class BaseRobot:
|
|||
force=0)
|
||||
_, joint_name, joint_type, _, _, _, _, _, _, _, _, _, part_name, _, _, _, _ = \
|
||||
p.getJointInfo(bodies[0], j)
|
||||
print("Joint name and index: ", joint_name, j)
|
||||
logging.debug('Robot joint: {}'.format(p.getJointInfo(bodies[0], j)))
|
||||
joint_name = joint_name.decode("utf8")
|
||||
part_name = part_name.decode("utf8")
|
||||
|
@ -165,6 +166,7 @@ class BodyPart:
|
|||
self.body_part_index = body_part_index
|
||||
self.initialPosition = self.get_position()
|
||||
self.initialOrientation = self.get_orientation()
|
||||
self.movement_cid = -1
|
||||
|
||||
def get_name(self):
|
||||
return self.body_name
|
||||
|
@ -244,7 +246,6 @@ class BodyPart:
|
|||
def contact_list(self):
|
||||
return p.getContactPoints(self.bodies[self.body_index], -1, self.body_part_index, -1)
|
||||
|
||||
|
||||
class Joint:
|
||||
def __init__(self, joint_name, bodies, body_index, joint_index):
|
||||
self.bodies = bodies
|
||||
|
|
|
@ -5,6 +5,8 @@ from gibson2.utils.utils import parse_config
|
|||
import os
|
||||
import gibson2
|
||||
from gibson2.utils.assets_utils import download_assets, download_demo_data
|
||||
import time
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
class DemoInteractive(object):
|
||||
|
@ -17,15 +19,24 @@ class DemoInteractive(object):
|
|||
s = Simulator(mode='gui', image_width=700, image_height=700)
|
||||
scene = StaticIndoorScene('Rs_interactive')
|
||||
s.import_scene(scene)
|
||||
turtlebot = Turtlebot(config)
|
||||
s.import_robot(turtlebot)
|
||||
if optimize:
|
||||
s.renderer.optimize_vertex_and_texture()
|
||||
|
||||
for i in range(10000):
|
||||
turtlebot.apply_action([0.1,0.5])
|
||||
fps = []
|
||||
for i in range(5000):
|
||||
#turtlebot.apply_action([0.1,0.5])
|
||||
start = time.time()
|
||||
s.step()
|
||||
|
||||
elapsed = time.time() - start
|
||||
print(1/elapsed)
|
||||
fps.append(1/elapsed)
|
||||
s.disconnect()
|
||||
return fps
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
DemoInteractive().run_demo()
|
||||
res1 = DemoInteractive().run_demo(True)
|
||||
res2 = DemoInteractive().run_demo(False)
|
||||
|
||||
from IPython import embed
|
||||
embed()
|
|
@ -1,7 +1,8 @@
|
|||
from gibson2.utils.mesh_util import quat2rotmat, xyzw2wxyz, xyz2mat
|
||||
from gibson2.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR
|
||||
from gibson2.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer, InstanceGroup, Instance
|
||||
from gibson2.render.mesh_renderer.mesh_renderer_tensor import MeshRendererG2G
|
||||
from gibson2.render.viewer import Viewer
|
||||
from gibson2.render.viewer import Viewer, ViewerVR
|
||||
import pybullet as p
|
||||
import gibson2
|
||||
import os
|
||||
|
@ -9,7 +10,7 @@ import numpy as np
|
|||
import platform
|
||||
import logging
|
||||
from IPython import embed
|
||||
|
||||
import time
|
||||
|
||||
class Simulator:
|
||||
def __init__(self,
|
||||
|
@ -27,7 +28,11 @@ class Simulator:
|
|||
auto_sync=True,
|
||||
optimized_renderer=False,
|
||||
env_texture_filename=None,
|
||||
skybox_size=20.):
|
||||
skybox_size=20.,
|
||||
use_dynamic_timestep=False,
|
||||
vrFullscreen=True,
|
||||
vrEyeTracking=False,
|
||||
vrMode=True):
|
||||
"""
|
||||
Simulator class is a wrapper of physics simulator (pybullet) and MeshRenderer, it loads objects into
|
||||
both pybullet and also MeshRenderer and syncs the pose of objects and robot parts.
|
||||
|
@ -58,14 +63,32 @@ class Simulator:
|
|||
|
||||
self.use_pb_renderer = False
|
||||
self.use_ig_renderer = False
|
||||
|
||||
self.use_vr_renderer = False
|
||||
|
||||
if self.mode in ['gui', 'iggui']:
|
||||
self.use_ig_renderer = True
|
||||
|
||||
if self.mode in ['gui', 'pbgui']:
|
||||
self.use_pb_renderer = True
|
||||
|
||||
if self.mode in ['vr']:
|
||||
self.use_vr_renderer = True
|
||||
|
||||
# This will only be set once (to 20, 45 or 90) after initial measurements
|
||||
self.use_dynamic_timestep = use_dynamic_timestep
|
||||
# Number of frames to average over to figure out fps value
|
||||
self.frame_measurement_num = 45
|
||||
self.current_frame_count = 0
|
||||
self.frame_time_sum = 0.0
|
||||
self.should_set_timestep = True
|
||||
# Low pass-filtered average frame time, set to 0 to start
|
||||
self.avg_frame_time = 0
|
||||
|
||||
# renderer
|
||||
self.vrFullscreen = vrFullscreen
|
||||
self.vrEyeTracking = vrEyeTracking
|
||||
self.vrMode = vrMode
|
||||
self.max_haptic_duration = 4000
|
||||
self.image_width = image_width
|
||||
self.image_height = image_height
|
||||
self.vertical_fov = vertical_fov
|
||||
|
@ -92,7 +115,11 @@ class Simulator:
|
|||
Attach a debugging viewer to the renderer. This will make the step much slower so should be avoided when
|
||||
training agents
|
||||
"""
|
||||
self.viewer = Viewer(simulator=self, renderer=self.renderer)
|
||||
if self.use_vr_renderer:
|
||||
self.viewer = ViewerVR()
|
||||
else:
|
||||
self.viewer = Viewer()
|
||||
self.viewer.renderer = self.renderer
|
||||
|
||||
def reload(self):
|
||||
"""
|
||||
|
@ -113,6 +140,18 @@ class Simulator:
|
|||
use_fisheye=self.use_fisheye,
|
||||
enable_shadow=self.enable_shadow,
|
||||
msaa=self.enable_msaa)
|
||||
elif self.use_vr_renderer:
|
||||
self.renderer = MeshRendererVR(width=self.image_width,
|
||||
height=self.image_height,
|
||||
vertical_fov=self.vertical_fov,
|
||||
device_idx=self.device_idx,
|
||||
use_fisheye=self.use_fisheye,
|
||||
enable_shadow=self.enable_shadow,
|
||||
msaa=self.enable_msaa,
|
||||
optimized=self.optimized_renderer,
|
||||
fullscreen=self.vrFullscreen,
|
||||
useEyeTracking=self.vrEyeTracking,
|
||||
vrMode=self.vrMode)
|
||||
else:
|
||||
if self.env_texture_filename is not None:
|
||||
self.renderer = MeshRenderer(width=self.image_width,
|
||||
|
@ -145,6 +184,9 @@ class Simulator:
|
|||
p.setPhysicsEngineParameter(enableFileCaching=0)
|
||||
print("PyBullet Logging Information******************")
|
||||
|
||||
if (self.use_ig_renderer or self.use_vr_renderer) and not self.render_to_tensor:
|
||||
self.add_viewer()
|
||||
|
||||
self.visual_objects = {}
|
||||
self.robots = []
|
||||
self.scene = None
|
||||
|
@ -287,7 +329,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=[dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5])
|
||||
visual_object = len(self.renderer.visual_objects) - 1
|
||||
visual_object = len(self.renderer.get_visual_objects()) - 1
|
||||
elif type == p.GEOM_CAPSULE or type == p.GEOM_CYLINDER:
|
||||
filename = os.path.join(
|
||||
gibson2.assets_path, 'models/mjcf_primitives/cube.obj')
|
||||
|
@ -297,7 +339,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=[dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]])
|
||||
visual_object = len(self.renderer.visual_objects) - 1
|
||||
visual_object = len(self.renderer.get_visual_objects()) - 1
|
||||
elif type == p.GEOM_BOX:
|
||||
filename = os.path.join(
|
||||
gibson2.assets_path, 'models/mjcf_primitives/cube.obj')
|
||||
|
@ -306,7 +348,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=np.array(dimensions))
|
||||
visual_object = len(self.renderer.visual_objects) - 1
|
||||
visual_object = len(self.renderer.get_visual_objects()) - 1
|
||||
|
||||
if visual_object is not None:
|
||||
self.renderer.add_instance(visual_object,
|
||||
|
@ -361,7 +403,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=[dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5])
|
||||
visual_objects.append(len(self.renderer.visual_objects) - 1)
|
||||
visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
|
||||
link_ids.append(link_id)
|
||||
elif type == p.GEOM_CAPSULE or type == p.GEOM_CYLINDER:
|
||||
filename = os.path.join(
|
||||
|
@ -372,7 +414,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=[dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]])
|
||||
visual_objects.append(len(self.renderer.visual_objects) - 1)
|
||||
visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
|
||||
link_ids.append(link_id)
|
||||
elif type == p.GEOM_BOX:
|
||||
filename = os.path.join(
|
||||
|
@ -382,7 +424,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=np.array(dimensions))
|
||||
visual_objects.append(len(self.renderer.visual_objects) - 1)
|
||||
visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
|
||||
link_ids.append(link_id)
|
||||
|
||||
if link_id == -1:
|
||||
|
@ -458,7 +500,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=[dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5])
|
||||
visual_objects.append(len(self.renderer.visual_objects) - 1)
|
||||
visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
|
||||
link_ids.append(link_id)
|
||||
elif type == p.GEOM_CAPSULE or type == p.GEOM_CYLINDER:
|
||||
filename = os.path.join(
|
||||
|
@ -469,7 +511,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=[dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]])
|
||||
visual_objects.append(len(self.renderer.visual_objects) - 1)
|
||||
visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
|
||||
link_ids.append(link_id)
|
||||
elif type == p.GEOM_BOX:
|
||||
filename = os.path.join(
|
||||
|
@ -479,7 +521,7 @@ class Simulator:
|
|||
transform_pos=rel_pos,
|
||||
input_kd=color[:3],
|
||||
scale=np.array(dimensions))
|
||||
visual_objects.append(len(self.renderer.visual_objects) - 1)
|
||||
visual_objects.append(len(self.renderer.get_visual_objects()) - 1)
|
||||
link_ids.append(link_id)
|
||||
|
||||
if link_id == -1:
|
||||
|
@ -499,25 +541,158 @@ class Simulator:
|
|||
robot=None)
|
||||
|
||||
return id
|
||||
|
||||
def optimize_data(self):
|
||||
"""Optimizes data for optimized rendering.
|
||||
This should be called once before starting/stepping the simulation.
|
||||
"""
|
||||
self.renderer.optimize_vertex_and_texture()
|
||||
|
||||
def step(self):
|
||||
def step(self, shouldPrintTime=False):
|
||||
"""
|
||||
Step the simulation and update positions in renderer
|
||||
"""
|
||||
start_time = time.time()
|
||||
|
||||
p.stepSimulation()
|
||||
|
||||
physics_time = time.time() - start_time
|
||||
curr_time = time.time()
|
||||
|
||||
if self.auto_sync:
|
||||
self.sync()
|
||||
|
||||
render_time = time.time() - curr_time
|
||||
curr_frame_time = physics_time + render_time
|
||||
self.frame_time_sum += curr_frame_time
|
||||
self.current_frame_count += 1
|
||||
if self.current_frame_count >= self.frame_measurement_num and self.should_set_timestep and self.use_dynamic_timestep:
|
||||
unrounded_timestep = self.frame_time_sum / float(self.frame_measurement_num)
|
||||
rounded_timestep = 0
|
||||
if unrounded_timestep > 1.0 / 32.5:
|
||||
rounded_timestep = 1.0 / 20.0
|
||||
elif unrounded_timestep > 1.0 / 67.5:
|
||||
rounded_timestep = 1.0 / 45.0
|
||||
else:
|
||||
rounded_timestep = 1.0 / 90.0
|
||||
|
||||
print("Final rounded timestep is: ", rounded_timestep)
|
||||
print("Final fps is: ", int(1 / rounded_timestep))
|
||||
self.set_timestep(rounded_timestep)
|
||||
self.should_set_timestep = False
|
||||
|
||||
if shouldPrintTime:
|
||||
print("Physics time: %f" % float(physics_time/0.001))
|
||||
print("Render time: %f" % float(render_time/0.001))
|
||||
print("Total frame time: %f" % float(curr_frame_time/0.001))
|
||||
if curr_frame_time <= 0:
|
||||
print("Curr fps: 1000")
|
||||
else:
|
||||
print("Curr fps: %f" % float(1/curr_frame_time))
|
||||
print("___________________________________")
|
||||
|
||||
def sync(self):
|
||||
"""
|
||||
Update positions in renderer without stepping the simulation. Usually used in the reset() function
|
||||
"""
|
||||
for instance in self.renderer.instances:
|
||||
for instance in self.renderer.get_instances():
|
||||
if instance.dynamic:
|
||||
self.update_position(instance)
|
||||
if self.use_ig_renderer and self.viewer is not None:
|
||||
if (self.use_ig_renderer or self.use_vr_renderer) and not self.viewer is None:
|
||||
self.viewer.update()
|
||||
|
||||
# Returns event data as list of lists. Each sub-list contains deviceType and eventType. List is empty is all
|
||||
# events are invalid.
|
||||
# deviceType: left_controller, right_controller
|
||||
# eventType: grip_press, grip_unpress, trigger_press, trigger_unpress, touchpad_press, touchpad_unpress,
|
||||
# touchpad_touch, touchpad_untouch, menu_press, menu_unpress (menu is the application button)
|
||||
def pollVREvents(self):
|
||||
if not self.use_vr_renderer:
|
||||
return []
|
||||
|
||||
eventData = self.renderer.vrsys.pollVREvents()
|
||||
return eventData
|
||||
|
||||
# Call this after step - returns all VR device data for a specific device
|
||||
# Device can be hmd, left_controller or right_controller
|
||||
# Returns isValid (indicating validity of data), translation and rotation in Gibson world space
|
||||
def getDataForVRDevice(self, deviceName):
|
||||
if not self.use_vr_renderer:
|
||||
return [None, None, None]
|
||||
|
||||
# Use fourth variable in list to get actual hmd position in space
|
||||
isValid, translation, rotation, _ = self.renderer.vrsys.getDataForVRDevice(deviceName)
|
||||
return [isValid, translation, rotation]
|
||||
|
||||
# Get world position of HMD without offset
|
||||
def getHmdWorldPos(self):
|
||||
if not self.use_vr_renderer:
|
||||
return None
|
||||
|
||||
_, _, _, hmd_world_pos = self.renderer.vrsys.getDataForVRDevice('hmd')
|
||||
return hmd_world_pos
|
||||
|
||||
# Call this after getDataForVRDevice - returns analog data for a specific controller
|
||||
# Controller can be left_controller or right_controller
|
||||
# Returns trigger_fraction, touchpad finger position x, touchpad finger position y
|
||||
# Data is only valid if isValid is true from previous call to getDataForVRDevice
|
||||
# Trigger data: 1 (closed) <------> 0 (open)
|
||||
# Analog data: X: -1 (left) <-----> 1 (right) and Y: -1 (bottom) <------> 1 (top)
|
||||
def getButtonDataForController(self, controllerName):
|
||||
if not self.use_vr_renderer:
|
||||
return [None, None, None]
|
||||
|
||||
trigger_fraction, touch_x, touch_y = self.renderer.vrsys.getButtonDataForController(controllerName)
|
||||
return [trigger_fraction, touch_x, touch_y]
|
||||
|
||||
# Returns eye tracking data as list of lists. Order: is_valid, gaze origin, gaze direction, gaze point, left pupil diameter, right pupil diameter (both in millimeters)
|
||||
# Call after getDataForVRDevice, to guarantee that latest HMD transform has been acquired
|
||||
def getEyeTrackingData(self):
|
||||
if not self.use_vr_renderer or not self.vrEyeTracking:
|
||||
return [None, None, None, None, None]
|
||||
|
||||
is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter = self.renderer.vrsys.getEyeTrackingData()
|
||||
return [is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter]
|
||||
|
||||
# Sets the translational offset of the VR system (HMD, left controller, right controller)
|
||||
# Can be used for many things, including adjusting height and teleportation-based movement
|
||||
# Input must be a list of three floats, corresponding to x, y, z in Gibson coordinate space
|
||||
def setVROffset(self, pos=None):
|
||||
if not self.use_vr_renderer:
|
||||
return
|
||||
|
||||
self.renderer.vrsys.setVROffset(-pos[1], pos[2], -pos[0])
|
||||
|
||||
# Gets the current VR offset vector in list form: x, y, z (in Gibson coordinates)
|
||||
def getVROffset(self):
|
||||
if not self.use_vr_renderer:
|
||||
return [None, None, None]
|
||||
|
||||
x, y, z = self.renderer.vrsys.getVROffset()
|
||||
return [x, y, z]
|
||||
|
||||
# Gets the direction vectors representing the device's coordinate system in list form: x, y, z (in Gibson coordinates)
|
||||
# List contains "right", "up" and "forward" vectors in that order
|
||||
# Device can be one of "hmd", "left_controller" or "right_controller"
|
||||
def getDeviceCoordinateSystem(self, device):
|
||||
if not self.use_vr_renderer:
|
||||
return [None, None, None]
|
||||
|
||||
vec_list = []
|
||||
|
||||
coordinate_sys = self.renderer.vrsys.getDeviceCoordinateSystem(device)
|
||||
for dir_vec in coordinate_sys:
|
||||
vec_list.append(dir_vec)
|
||||
|
||||
return vec_list
|
||||
|
||||
# Triggers a haptic pulse of the specified strength (0 is weakest, 1 is strongest)
|
||||
# Device can be one of "hmd", "left_controller" or "right_controller"
|
||||
def triggerHapticPulse(self, device, strength):
|
||||
if not self.use_vr_renderer:
|
||||
print("Error: can't use haptics without VR system!")
|
||||
else:
|
||||
self.renderer.vrsys.triggerHapticPulseForDevice(device, int(self.max_haptic_duration * strength))
|
||||
|
||||
@staticmethod
|
||||
def update_position(instance):
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from __future__ import print_function
|
||||
import torch.utils.data as data
|
||||
#import torch.utils.data as data
|
||||
from PIL import Image
|
||||
import os
|
||||
import os.path
|
||||
|
@ -10,7 +10,7 @@ import cv2
|
|||
import numpy as np
|
||||
import ctypes as ct
|
||||
from tqdm import tqdm
|
||||
import torchvision.transforms as transforms
|
||||
#import torchvision.transforms as transforms
|
||||
import argparse
|
||||
import json
|
||||
from numpy.linalg import inv
|
||||
|
@ -186,8 +186,7 @@ def get_item_fn(inds, select, root, loader, transform, off_3d, target_transform,
|
|||
render, target_depth)
|
||||
return (out_i, out)
|
||||
|
||||
|
||||
class ViewDataSet3D(data.Dataset):
|
||||
""" class ViewDataSet3D(data.Dataset):
|
||||
def __init__(self,
|
||||
root=None,
|
||||
train=False,
|
||||
|
@ -652,4 +651,4 @@ if __name__ == '__main__':
|
|||
transforms.ToTensor()]))
|
||||
print(len(d))
|
||||
sample = d[0]
|
||||
print(sample[0].size(), sample[1].size())
|
||||
print(sample[0].size(), sample[1].size()) """
|
|
@ -23,7 +23,9 @@ def frustum(left, right, bottom, top, znear, zfar):
|
|||
M[0, 0] = +2.0 * znear / (right - left)
|
||||
M[2, 0] = (right + left) / (right - left)
|
||||
M[1, 1] = +2.0 * znear / (top - bottom)
|
||||
M[3, 1] = (top + bottom) / (top - bottom)
|
||||
# TODO: Put this back to 3,1
|
||||
#M[3, 1] = (top + bottom) / (top - bottom)
|
||||
M[2, 1] = (top + bottom) / (top - bottom)
|
||||
M[2, 2] = -(zfar + znear) / (zfar - znear)
|
||||
M[3, 2] = -2.0 * znear * zfar / (zfar - znear)
|
||||
M[2, 3] = -1.0
|
||||
|
|
|
@ -91,4 +91,23 @@ def quatXYZWFromRotMat(rot_mat):
|
|||
quatWXYZ = quaternions.mat2quat(rot_mat)
|
||||
quatXYZW = quatToXYZW(quatWXYZ, 'wxyz')
|
||||
return quatXYZW
|
||||
|
||||
# Represents a rotation by q1, followed by q0
|
||||
def multQuatLists(q0, q1):
|
||||
"""Multiply two quaternions that are represented as lists."""
|
||||
x0, y0, z0, w0 = q0
|
||||
x1, y1, z1, w1 = q1
|
||||
|
||||
return [w0 * x1 + x0 * w1 + y0 * z1 - z0 * y1,
|
||||
w0 * y1 - x0 * z1 + y0 * w1 + z0 * x1,
|
||||
w0 * z1 + x0 * y1 - y0 * x1 + z0 * w1,
|
||||
w0 * w1 - x0 * x1 - y0 * y1 - z0 * z1]
|
||||
|
||||
def normalizeListVec(v):
|
||||
"""Normalizes a vector list."""
|
||||
length = v[0] ** 2 + v[1] ** 2 + v[2] ** 2
|
||||
if length <= 0:
|
||||
length = 1
|
||||
v = [val/np.sqrt(length) for val in v]
|
||||
return v
|
||||
|
||||
|
|
|
@ -0,0 +1,326 @@
|
|||
"""
|
||||
VRLog classes that write/read iGibson VR data to/from HDF5.
|
||||
|
||||
TODO: Save velocity/torque for algorithmic training? Not necessary for replay, but might be helpful.
|
||||
Can easily save velocity for joints, but might have to use link states for normal pybullet objects.
|
||||
|
||||
HDF5 hierarchy:
|
||||
/ (root)
|
||||
|
||||
--- physics_data (group)
|
||||
|
||||
------ body_id_n (dataset, n is positive integer)
|
||||
--------- DATA: [pos, orn, joint values] (len 7 + M, where M is number of joints)
|
||||
|
||||
--- vr (group)
|
||||
|
||||
------ vr_camera (group)
|
||||
|
||||
Note: we only need one eye to render in VR - we choose the right eye, since that is what is used to create
|
||||
the computer's display when the VR is running
|
||||
--------- right_eye_view (dataset)
|
||||
------------ DATA: 4x4 mat
|
||||
--------- right_eye_proj (dataset)
|
||||
------------ DATA: 4x4 mat
|
||||
|
||||
------ vr_device_data (group)
|
||||
|
||||
--------- hmd (dataset)
|
||||
------------ DATA: [is_valid, trans, rot] (len 8)
|
||||
--------- left_controller (dataset)
|
||||
------------ DATA: [is_valid, trans, rot] (len 8)
|
||||
--------- right_controller (dataset)
|
||||
------------ DATA: [is_valid, trans, rot] (len 8)
|
||||
|
||||
------ vr_button_data (group)
|
||||
|
||||
--------- left_controller (dataset)
|
||||
------------ DATA: [trig_frac, touch_x, touch_y] (len 3)
|
||||
--------- right_controller (dataset)
|
||||
------------ DATA: [trig_frac, touch_x, touch_y] (len 3)
|
||||
|
||||
------ vr_eye_tracking_data (dataset)
|
||||
--------- DATA: [is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter] (len 9)
|
||||
"""
|
||||
|
||||
import h5py
|
||||
import numpy as np
|
||||
import pybullet as p
|
||||
import time
|
||||
|
||||
class VRLogWriter():
|
||||
# TIMELINE: Initialize the VRLogger just before simulation starts, once all bodies have been loaded
|
||||
def __init__(self, frames_before_write, log_filepath, profiling_mode=False):
|
||||
# The number of frames to store data on the stack before writing to HDF5.
|
||||
# We buffer and flush data like this to cause a small an impact as possible
|
||||
# on the VR frame-rate.
|
||||
self.frames_before_write = frames_before_write
|
||||
# File path to write log to (path relative to script location)
|
||||
self.log_filepath = log_filepath
|
||||
# If true, will print out time it takes to save to hd5
|
||||
self.profiling_mode = profiling_mode
|
||||
# PyBullet body ids to be saved
|
||||
# TODO: Make sure this is the correct way to get the body ids!
|
||||
self.pb_ids = [p.getBodyUniqueId(i) for i in range(p.getNumBodies())]
|
||||
self.pb_id_data_len_map = dict()
|
||||
self.data_map = None
|
||||
# Sentinel that indicates a certain value was not set in the HDF5
|
||||
self.default_fill_sentinel = -1.0
|
||||
# Counts number of frames (reset to 0 every self.frames_before_write)
|
||||
self.frame_counter = 0
|
||||
# Counts number of frames and does not reset
|
||||
self.persistent_frame_count = 0
|
||||
|
||||
# Refresh the data map after initialization
|
||||
self.refresh_data_map()
|
||||
# VR data is all invalid before first frame, but
|
||||
# PyBullet objects have starting positions that must be loaded
|
||||
self.write_pybullet_data_to_map()
|
||||
|
||||
# Name path data - used to extract data from data map and save to hd5
|
||||
self.name_path_data = []
|
||||
self.generate_name_path_data()
|
||||
self.hf = None
|
||||
self.set_up_data_storage()
|
||||
|
||||
def generate_name_path_data(self):
|
||||
"""Generates lists of name paths for resolution in hd5 saving.
|
||||
Eg. ['vr', 'vr_camera', 'right_eye_view']."""
|
||||
for n in self.pb_ids:
|
||||
self.name_path_data.append(['physics_data', 'body_id_{0}'.format(n)])
|
||||
|
||||
self.name_path_data.extend([
|
||||
['vr', 'vr_camera', 'right_eye_view'],
|
||||
['vr', 'vr_camera', 'right_eye_proj'],
|
||||
['vr', 'vr_device_data', 'hmd'],
|
||||
['vr', 'vr_device_data', 'left_controller'],
|
||||
['vr', 'vr_device_data', 'right_controller'],
|
||||
['vr', 'vr_button_data', 'left_controller'],
|
||||
['vr', 'vr_button_data', 'right_controller'],
|
||||
['vr', 'vr_eye_tracking_data'],
|
||||
])
|
||||
|
||||
def refresh_data_map(self):
|
||||
"""Creates a map of data that will go into HDF5 file.
|
||||
Will be flushed after self.frames_before_write frames.
|
||||
|
||||
Args:
|
||||
pb_ids: list of pybullet body ids
|
||||
"""
|
||||
self.data_map = dict()
|
||||
self.data_map['physics_data'] = dict()
|
||||
for pb_id in self.pb_ids:
|
||||
# pos + orn + number of joints
|
||||
array_len = 7 + p.getNumJoints(pb_id)
|
||||
self.pb_id_data_len_map[pb_id] = array_len
|
||||
self.data_map['physics_data']['body_id_{0}'.format(pb_id)] = np.full((self.frames_before_write, array_len), self.default_fill_sentinel)
|
||||
|
||||
self.data_map['vr'] = {
|
||||
'vr_camera': {
|
||||
'right_eye_view': np.full((self.frames_before_write, 4, 4), self.default_fill_sentinel),
|
||||
'right_eye_proj': np.full((self.frames_before_write, 4, 4), self.default_fill_sentinel)
|
||||
},
|
||||
'vr_device_data': {
|
||||
'hmd': np.full((self.frames_before_write, 8), self.default_fill_sentinel),
|
||||
'left_controller': np.full((self.frames_before_write, 8), self.default_fill_sentinel),
|
||||
'right_controller': np.full((self.frames_before_write, 8), self.default_fill_sentinel)
|
||||
},
|
||||
'vr_button_data': {
|
||||
'left_controller': np.full((self.frames_before_write, 3), self.default_fill_sentinel),
|
||||
'right_controller': np.full((self.frames_before_write, 3), self.default_fill_sentinel)
|
||||
},
|
||||
'vr_eye_tracking_data': np.full((self.frames_before_write, 9), self.default_fill_sentinel)
|
||||
}
|
||||
|
||||
def get_data_for_name_path(self, name_path):
|
||||
"""Resolves a list of names (group/dataset) into a numpy array.
|
||||
eg. [vr, vr_camera, right_eye_view] -> self.data_map['vr']['vr_camera']['right_eye_view']"""
|
||||
next_data = self.data_map
|
||||
for name in name_path:
|
||||
next_data = next_data[name]
|
||||
|
||||
return next_data
|
||||
|
||||
def set_up_data_storage(self):
|
||||
"""Sets up hd5 data structure to write to."""
|
||||
# Note: this erases the file contents previously stored as self.log_filepath
|
||||
hf = h5py.File(self.log_filepath, 'w')
|
||||
for name_path in self.name_path_data:
|
||||
joined_path = '/'.join(name_path)
|
||||
curr_data_shape = (0,) + self.get_data_for_name_path(name_path).shape[1:]
|
||||
# None as first shape value allows dataset to grow without bound through time
|
||||
max_shape = (None,) + curr_data_shape[1:]
|
||||
# Create_dataset with a '/'-joined path automatically creates the required groups
|
||||
hf.create_dataset(joined_path, curr_data_shape, maxshape=max_shape)
|
||||
|
||||
hf.close()
|
||||
# Now open in r+ mode to append to the file
|
||||
self.hf = h5py.File(self.log_filepath, 'r+')
|
||||
|
||||
def write_vr_data_to_map(self, s):
|
||||
"""Writes all VR data to map. This will write data
|
||||
that the user has not even processed in their demos.
|
||||
For example, we will store eye tracking data if it is
|
||||
valid, even if they do not explicitly use that data
|
||||
in their code. This will help us store all the necessary
|
||||
data without remembering to call the simulator's data
|
||||
extraction functions every time we want to save data.
|
||||
|
||||
Args:
|
||||
s (simulator): used to extract information about VR system
|
||||
"""
|
||||
# At end of each frame, renderer has camera information for VR right eye
|
||||
right_eye_view, right_eye_proj = s.renderer.get_view_proj()
|
||||
self.data_map['vr']['vr_camera']['right_eye_view'][self.frame_counter, ...] = right_eye_view
|
||||
self.data_map['vr']['vr_camera']['right_eye_proj'][self.frame_counter, ...] = right_eye_proj
|
||||
|
||||
for device in ['hmd', 'left_controller', 'right_controller']:
|
||||
is_valid, trans, rot = s.getDataForVRDevice(device)
|
||||
if is_valid is not None:
|
||||
data_list = [is_valid]
|
||||
data_list.extend(trans)
|
||||
data_list.extend(rot)
|
||||
self.data_map['vr']['vr_device_data'][device][self.frame_counter, ...] = np.array(data_list)
|
||||
|
||||
if device == 'left_controller' or device == 'right_controller':
|
||||
button_data_list = s.getButtonDataForController(device)
|
||||
if button_data_list[0] is not None:
|
||||
self.data_map['vr']['vr_button_data'][device][self.frame_counter, ...] = np.array(button_data_list)
|
||||
|
||||
is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter = s.getEyeTrackingData()
|
||||
if is_valid is not None:
|
||||
eye_data_list = [is_valid]
|
||||
eye_data_list.extend(origin)
|
||||
eye_data_list.extend(dir)
|
||||
eye_data_list.append(left_pupil_diameter)
|
||||
eye_data_list.append(right_pupil_diameter)
|
||||
self.data_map['vr']['vr_eye_tracking_data'][self.frame_counter, ...] = np.array(eye_data_list)
|
||||
|
||||
def write_pybullet_data_to_map(self):
|
||||
"""Write all pybullet data to the class' internal map."""
|
||||
for pb_id in self.pb_ids:
|
||||
data_list = []
|
||||
pos, orn = p.getBasePositionAndOrientation(pb_id)
|
||||
data_list.extend(pos)
|
||||
data_list.extend(orn)
|
||||
data_list.extend([p.getJointState(pb_id, n)[0] for n in range(p.getNumJoints(pb_id))])
|
||||
self.data_map['physics_data']['body_id_{0}'.format(pb_id)][self.frame_counter] = np.array(data_list)
|
||||
|
||||
# TIMELINE: Call this at the end of each frame (eg. at end of while loop)
|
||||
def process_frame(self, s):
|
||||
"""Asks the VRLogger to process frame data. This includes:
|
||||
-- updating pybullet data
|
||||
-- incrementing frame counter by 1
|
||||
|
||||
Args:
|
||||
s (simulator): used to extract information about VR system
|
||||
"""
|
||||
self.write_vr_data_to_map(s)
|
||||
self.write_pybullet_data_to_map()
|
||||
self.frame_counter += 1
|
||||
self.persistent_frame_count += 1
|
||||
if (self.frame_counter >= self.frames_before_write):
|
||||
self.frame_counter = 0
|
||||
# We have accumulated enough data, which we will write to hd5
|
||||
# TODO: Make this multi-threaded to increase speed?
|
||||
self.write_to_hd5()
|
||||
|
||||
def write_to_hd5(self):
|
||||
"""Writes data stored in self.data_map to hd5.
|
||||
The data is saved each time this function is called, so data
|
||||
will be saved even if a Ctrl+C event interrupts the program."""
|
||||
print('----- Writing log data to hd5 on frame: {0} -----'.format(self.persistent_frame_count))
|
||||
start_time = time.time()
|
||||
for name_path in self.name_path_data:
|
||||
curr_dset = self.hf['/'.join(name_path)]
|
||||
# Resize to accommodate new data
|
||||
curr_dset.resize(curr_dset.shape[0] + self.frames_before_write, axis=0)
|
||||
# Set last self.frames_before_write rows to numpy data from data map
|
||||
curr_dset[-self.frames_before_write:, ...] = self.get_data_for_name_path(name_path)
|
||||
|
||||
self.refresh_data_map()
|
||||
delta = time.time() - start_time
|
||||
if self.profiling_mode:
|
||||
print('Time to write: {0}'.format(delta))
|
||||
|
||||
def end_log_session(self):
|
||||
"""Closes hdf5 log file at end of logging session."""
|
||||
self.hf.close()
|
||||
|
||||
class VRLogReader():
|
||||
# TIMELINE: Initialize the VRLogReader before reading any frames
|
||||
def __init__(self, log_filepath, playback_fps):
|
||||
self.log_filepath = log_filepath
|
||||
# The fps to playback data (approximately)
|
||||
self.playback_fps = playback_fps
|
||||
self.playback_sleep_time = 1.0/float(self.playback_fps)
|
||||
# Frame counter keeping track of how many frames have been reproduced
|
||||
self.frame_counter = 0
|
||||
self.hf = h5py.File(self.log_filepath, 'r')
|
||||
self.pb_ids = self.extract_pb_ids()
|
||||
# Get total frame num (dataset row length) from an arbitary dataset
|
||||
self.total_frame_num = self.hf['vr/vr_device_data/hmd'].shape[0]
|
||||
# Boolean indicating if we still have data left to read
|
||||
self.data_left_to_read = True
|
||||
print('----- VRLogReader initialized -----')
|
||||
print('Preparing to read {0} frames'.format(self.total_frame_num))
|
||||
|
||||
def extract_pb_ids(self):
|
||||
"""Extracts pybullet body ids from saved data."""
|
||||
return [int(metadata[0].split('_')[-1]) for metadata in self.hf['physics_data'].items()]
|
||||
|
||||
def read_frame(self, s):
|
||||
"""Reads a frame from the VR logger and steps simulation with stored data."""
|
||||
|
||||
"""Reads a frame from the VR logger and steps simulation with stored data.
|
||||
This includes the following two steps:
|
||||
-- update camera data
|
||||
-- update pybullet physics data
|
||||
|
||||
Args:
|
||||
s (simulator): used to set camera view and projection matrices
|
||||
"""
|
||||
# Note: Currently returns hmd position, as a test
|
||||
# TODO: Add more functionality - eg. return part of the dataset?
|
||||
# Catch error where the user tries to keep reading a frame when all frames have been read
|
||||
if self.frame_counter >= self.total_frame_num:
|
||||
return
|
||||
|
||||
# Each frame we first set the camera data
|
||||
cam_view = self.hf['vr/vr_camera/right_eye_view'][self.frame_counter]
|
||||
cam_proj = self.hf['vr/vr_camera/right_eye_proj'][self.frame_counter]
|
||||
s.renderer.set_view_proj(cam_view, cam_proj)
|
||||
|
||||
# Then we update the physics
|
||||
for pb_id in self.pb_ids:
|
||||
id_name = 'body_id_{0}'.format(pb_id)
|
||||
id_data = self.hf['physics_data/' + id_name][self.frame_counter]
|
||||
pos = id_data[:3]
|
||||
orn = id_data[3:7]
|
||||
joint_data = id_data[7:]
|
||||
p.resetBasePositionAndOrientation(pb_id, pos, orn)
|
||||
for i in range(len(joint_data)):
|
||||
p.resetJointState(pb_id, i, joint_data[i])
|
||||
|
||||
# TODO: Change this to return something else/potentially all data in a map?
|
||||
ret_data = self.hf['vr/vr_device_data/hmd'][self.frame_counter, 1:4]
|
||||
print(ret_data)
|
||||
print(self.frame_counter)
|
||||
|
||||
self.frame_counter += 1
|
||||
if self.frame_counter >= self.total_frame_num:
|
||||
self.data_left_to_read = False
|
||||
self.end_log_session()
|
||||
|
||||
time.sleep(self.playback_sleep_time)
|
||||
|
||||
# TIMELINE: Use this as the while loop condition to keep reading frames!
|
||||
def get_data_left_to_read(self):
|
||||
"""Returns whether there is still data left to read."""
|
||||
return self.data_left_to_read
|
||||
|
||||
def end_log_session(self):
|
||||
"""This is called once reading has finished to clean up resources used."""
|
||||
print('Ending frame reading session after reading {0} frames'.format(self.total_frame_num))
|
||||
self.hf.close()
|
||||
print('----- VRLogReader shutdown -----')
|
|
@ -0,0 +1,14 @@
|
|||
import numpy as np
|
||||
from gibson2.utils.utils import normalizeListVec
|
||||
|
||||
def get_normalized_translation_vec(right_frac, forward_frac, right, forward):
|
||||
"""Generates a normalized translation vector that is a linear combination of forward and right."""
|
||||
vr_offset_vec = [right[i] * right_frac + forward[i] * forward_frac for i in range(3)]
|
||||
vr_offset_vec[2] = 0
|
||||
return normalizeListVec(vr_offset_vec)
|
||||
|
||||
def translate_vr_position_by_vecs(right_frac, forward_frac, right, forward, curr_offset, movement_speed):
|
||||
"""Generates a normalized translation vector that is a linear combination of forward and right, the"""
|
||||
"""direction vectors of the chosen VR device (HMD/controller), and adds this vector to the current offset."""
|
||||
vr_offset_vec = get_normalized_translation_vec(right_frac, forward_frac, right, forward)
|
||||
return [curr_offset[i] + vr_offset_vec[i] * movement_speed for i in range(3)]
|
Binary file not shown.
3
setup.py
3
setup.py
|
@ -19,7 +19,6 @@ use_clang = False
|
|||
|
||||
here = os.path.abspath(os.path.dirname(__file__))
|
||||
|
||||
|
||||
def read(*parts):
|
||||
with codecs.open(os.path.join(here, *parts), 'r') as fp:
|
||||
return fp.read()
|
||||
|
@ -80,7 +79,7 @@ class CMakeBuild(build_ext):
|
|||
build_args = ['--config', cfg]
|
||||
|
||||
if platform.system() == "Windows":
|
||||
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
|
||||
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY{}={}'.format(cfg.upper(), extdir)]
|
||||
if sys.maxsize > 2**32:
|
||||
cmake_args += ['-A', 'x64']
|
||||
build_args += ['--', '/m']
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
|
||||
import cv2
|
||||
import sys
|
||||
import numpy as np
|
||||
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import VisualObject, InstanceGroup, MeshRenderer
|
||||
import time
|
||||
|
||||
renderer = MeshRenderer(width=512, height=512)
|
||||
renderer.load_object("C:\\Users\\shen\\Desktop\\GibsonVRStuff\\vr_branch\\gibsonv2\\gibson2\\assets\\datasets\\Ohoopee\\Ohoopee_mesh_texture.obj")
|
||||
renderer.add_instance(0)
|
||||
|
||||
print(renderer.visual_objects, renderer.instances)
|
||||
print(renderer.materials_mapping, renderer.mesh_materials)
|
||||
camera_pose = np.array([0, 0, 1.2])
|
||||
view_direction = np.array([1, 0, 0])
|
||||
renderer.set_camera(camera_pose, camera_pose + view_direction, [1, 0, 0])
|
||||
renderer.set_fov(90)
|
||||
|
||||
while True:
|
||||
frame = renderer.render(modes=('rgb'))
|
||||
cv2.imshow('test', cv2.cvtColor(np.concatenate(frame, axis=1), cv2.COLOR_RGB2BGR))
|
||||
q = cv2.waitKey(1)
|
||||
|
||||
renderer.release()
|
|
@ -0,0 +1,24 @@
|
|||
from gibson2.core.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR
|
||||
import cv2
|
||||
import sys
|
||||
import numpy as np
|
||||
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import VisualObject, InstanceGroup, MeshRenderer
|
||||
from gibson2 import assets_path, dataset_path
|
||||
import time
|
||||
|
||||
renderer = MeshRendererVR(MeshRenderer, msaa=True)
|
||||
# Note that it is necessary to load the full path of an object!
|
||||
renderer.load_object(dataset_path + '\\Ohoopee\\Ohoopee_mesh_texture.obj')
|
||||
renderer.add_instance(0)
|
||||
|
||||
while True:
|
||||
startFrame = time.time()
|
||||
renderer.render()
|
||||
|
||||
endFrame = time.time()
|
||||
deltaT = endFrame - startFrame
|
||||
fps = 1/float(deltaT)
|
||||
|
||||
print("Current fps: %f" % fps)
|
||||
|
||||
renderer.release()
|
|
@ -0,0 +1,8 @@
|
|||
from gibson2.core.render.mesh_renderer.Release import MeshRendererContext
|
||||
|
||||
vrsys = MeshRendererContext.VRSystem()
|
||||
recommendedWidth, recommendedHeight = vrsys.initVR()
|
||||
|
||||
print("Initialized VR with width: %d and height: %d" % (recommendedWidth, recommendedHeight))
|
||||
|
||||
vrsys.releaseVR()
|
Loading…
Reference in New Issue