This commit is contained in:
fxia22 2019-05-06 11:11:01 -07:00
commit 4c11eafad2
23 changed files with 284 additions and 225 deletions

View File

@ -48,7 +48,7 @@ class InteractiveObj:
class RBOObject(InteractiveObj):
def __init__(self, name, scale=1):
filename = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), 'models', 'rbo', name,
filename = os.path.join(gibson2.assets_path, 'models', 'rbo', name,
'configuration', '{}.urdf'.format(name))
super(RBOObject, self).__init__(filename, scale)

View File

@ -37,11 +37,11 @@ class WalkerBase(BaseRobot):
assert type(action_dim) == int, "Action dimension must be int, got {}".format(type(action_dim))
action_high = np.ones([action_dim])
self.action_space = gym.spaces.Box(-action_high, action_high)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
obs_high = np.inf * np.ones(self.obs_dim)
self.observation_space = gym.spaces.Box(-obs_high, obs_high)
self.observation_space = gym.spaces.Box(-obs_high, obs_high, dtype=np.float32)
sensor_high = np.inf * np.ones([sensor_dim])
self.sensor_space = gym.spaces.Box(-sensor_high, sensor_high)
self.sensor_space = gym.spaces.Box(-sensor_high, sensor_high, dtype=np.float32)
self.power = power
self.camera_x = 0
@ -96,6 +96,9 @@ class WalkerBase(BaseRobot):
return self.robot_body.get_rpy()
def apply_action(self, a):
if isinstance(a, list):
action = np.array(a)
if self.control == 'torque':
for n, j in enumerate(self.ordered_joints):
j.set_motor_torque(self.power * j.power_coef * float(np.clip(a[n], -1, +1)))
@ -174,6 +177,9 @@ class Ant(WalkerBase):
self.setup_keys_to_action()
def apply_action(self, action):
if isinstance(action, list):
action = np.array(action)
if self.is_discrete:
real_action = self.action_list[action]
else:
@ -262,6 +268,10 @@ class Humanoid(WalkerBase):
self.motors = [self.jdict[n] for n in self.motor_names]
def apply_action(self, a):
if isinstance(action, list):
action = np.array(action)
if self.is_discrete:
real_action = self.action_list[a]
else:
@ -304,9 +314,12 @@ class Husky(WalkerBase):
self.setup_keys_to_action()
else:
action_high = 0.02 * np.ones([4])
self.action_space = gym.spaces.Box(-action_high, action_high)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
def apply_action(self, action):
if isinstance(action, list):
action = np.array(action)
if self.is_discrete:
real_action = self.action_list[action]
else:
@ -372,9 +385,12 @@ class Quadrotor(WalkerBase):
self.setup_keys_to_action()
else:
action_high = 0.02 * np.ones([6])
self.action_space = gym.spaces.Box(-action_high, action_high)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
def apply_action(self, action):
if isinstance(action, list):
action = np.array(action)
if self.is_discrete:
real_action = self.action_list[action]
else:
@ -424,11 +440,14 @@ class Turtlebot(WalkerBase):
[-self.velocity * 0.5, self.velocity * 0.5]]
self.setup_keys_to_action()
else:
self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=0.0, high=1.0)
self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=0.0, high=1.0, dtype=np.float32)
self.action_low = -self.velocity * np.ones([self.action_dim])
self.action_high = -self.action_low
def apply_action(self, action):
if isinstance(action, list):
action = np.array(action)
if self.is_discrete:
real_action = self.action_list[action]
else:
@ -470,6 +489,8 @@ class JR2(WalkerBase):
control=['velocity', 'velocity', 'position', 'position'],
)
self.is_discrete = config["is_discrete"]
self.velocity = config.get('velocity', 1.0)
print('velocity', self.velocity)
if self.is_discrete:
self.action_space = gym.spaces.Discrete(5)
@ -482,9 +503,12 @@ class JR2(WalkerBase):
self.setup_keys_to_action()
else:
action_high = self.velocity * np.ones([4])
self.action_space = gym.spaces.Box(-action_high, action_high)
self.action_space = gym.spaces.Box(-action_high, action_high, dtype=np.float32)
def apply_action(self, action):
if isinstance(action, list):
action = np.array(action)
if self.is_discrete:
real_action = self.action_list[action]
else:
@ -545,7 +569,7 @@ class JR2_Kinova(WalkerBase):
# assert self.action_dim == wheel_dim + camera_dim + arm_dim
self.action_low = np.array([-self.wheel_velocity] * 2 + [-self.arm_velocity] * 8)
self.action_high = -self.action_low
self.action_space = gym.spaces.Box(shape=(10,), low=-1.0, high=1.0)
self.action_space = gym.spaces.Box(shape=(10,), low=-1.0, high=1.0, dtype=np.float32)
# self.action_low = np.array([-self.wheel_velocity] * 2 + [-self.arm_velocity] * 1)
# self.action_high = -self.action_low
# self.action_space = gym.spaces.Box(shape=(3,), low=0.0, high=1.0)
@ -575,6 +599,8 @@ class JR2_Kinova(WalkerBase):
# self.action_space = gym.spaces.Box(shape=(8,), low=0.0, high=1.0)
def apply_action(self, action):
if isinstance(action, list):
action = np.array(action)
action = np.clip(action, self.action_space.low, self.action_space.high)
normalized_action = self.action_high * action
real_action = np.zeros(self.action_dim)

View File

@ -5,14 +5,14 @@ include_directories(glad)
find_package(OpenGL)
add_subdirectory(pybind11)
add_library(CppMeshRenderer MODULE glad/egl.c glad/gl.c cpp/Mesh_renderer.cpp)
add_library(CppMeshRenderer MODULE glad/egl.cpp glad/gl.cpp cpp/Mesh_renderer.cpp)
target_link_libraries(CppMeshRenderer PRIVATE pybind11::module dl pthread EGL ${OPENGL_LIBRARIES})
set_target_properties(CppMeshRenderer PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}"
SUFFIX "${PYTHON_MODULE_EXTENSION}")
add_executable(query_devices glad/egl.c glad/gl.c cpp/query_devices.cpp)
add_executable(test_device glad/egl.c glad/gl.c cpp/test_device.cpp)
add_executable(query_devices glad/egl.cpp glad/gl.cpp cpp/query_devices.cpp)
add_executable(test_device glad/egl.cpp glad/gl.cpp cpp/test_device.cpp)
target_link_libraries(query_devices dl pthread EGL ${OPENGL_LIBRARIES})
target_link_libraries(test_device dl pthread EGL ${OPENGL_LIBRARIES})

View File

@ -20,6 +20,8 @@ class Simulator:
# renderer
self.renderer = MeshRenderer(width=resolution, height=resolution, device_idx=device_idx)
self.resolution = resolution
self.device_idx = device_idx
self.renderer.set_fov(90)
self.visual_objects = {}
@ -38,6 +40,24 @@ class Simulator:
self.viewer = Viewer()
self.viewer.renderer = self.renderer
def reload(self):
self.renderer.release()
self.renderer = MeshRenderer(width=self.resolution, height=self.resolution, device_idx=self.device_idx)
p.disconnect(self.cid)
if self.mode == 'gui':
self.cid = p.connect(p.GUI)
else:
self.cid = p.connect(p.DIRECT)
p.setTimeStep(self.timestep)
p.setGravity(0, 0, -self.gravity)
self.visual_objects = {}
if self.mode == 'gui':
self.viewer.renderer = self.renderer
self.renderer.set_fov(90)
self.robots = []
self.scene = None
self.objects = []
def import_scene(self, scene):
new_objects = scene.load()
for item in new_objects:

View File

@ -48,7 +48,7 @@ def depth_loader(path):
def get_model_path(model_id):
data_path = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), 'dataset')
data_path = os.path.join(gibson2.assets_path, 'dataset')
assert (model_id in os.listdir(data_path)) or model_id == 'stadium', "Model {} does not exist".format(model_id)
return os.path.join(data_path, model_id)
@ -178,7 +178,7 @@ class ViewDataSet3D(data.Dataset):
semantic_transform=np.array, env = None, only_load = None):
print('Processing the data:')
if not root:
self.root = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), "dataset")
self.root = os.path.join(gibson2.assets_path, "dataset")
else:
self.root = root
self.train = train

View File

@ -39,6 +39,38 @@ class BaseEnv(gym.Env):
for robot in self.robots:
self.simulator.import_robot(robot)
def reload(self, config_file):
self.config = parse_config(config_file)
self.simulator.reload()
if self.config['scene'] == 'stadium':
scene = StadiumScene()
elif self.config['scene'] == 'building':
scene = BuildingScene(self.config['model_id'])
self.simulator.import_scene(scene)
if self.config['robot'] == 'Turtlebot':
robot = Turtlebot(self.config)
elif self.config['robot'] == 'Husky':
robot = Husky(self.config)
elif self.config['robot'] == 'Ant':
robot = Ant(self.config)
elif self.config['robot'] == 'Humanoid':
robot = Humanoid(self.config)
elif self.config['robot'] == 'JR2':
robot = JR2(self.config)
elif self.config['robot'] == 'JR2_Kinova':
robot = JR2_Kinova(self.config)
else:
raise Exception('unknown robot type: {}'.format(self.config['robot']))
self.scene = scene
self.robots = [robot]
for robot in self.robots:
self.simulator.import_robot(robot)
def clean(self):
if not self.simulator is None:
self.simulator.disconnect()

View File

@ -96,6 +96,58 @@ class NavigateEnv(BaseEnv):
else:
self.target_pos_vis_obj.load()
def reload(self, config_file):
super().reload(config_file)
self.initial_pos = np.array(self.config.get('initial_pos', [0, 0, 0]))
self.initial_orn = np.array(self.config.get('initial_orn', [0, 0, 0]))
self.target_pos = np.array(self.config.get('target_pos', [5, 5, 0]))
self.target_orn = np.array(self.config.get('target_orn', [0, 0, 0]))
self.additional_states_dim = self.config['additional_states_dim']
# termination condition
self.dist_tol = self.config.get('dist_tol', 0.5)
self.max_step = self.config.get('max_step', float('inf'))
# reward
self.terminal_reward = self.config.get('terminal_reward', 0.0)
self.electricity_cost = self.config.get('electricity_cost', 0.0)
self.stall_torque_cost = self.config.get('stall_torque_cost', 0.0)
self.collision_cost = self.config.get('collision_cost', 0.0)
self.discount_factor = self.config.get('discount_factor', 1.0)
self.output = self.config['output']
self.sensor_dim = self.additional_states_dim
self.action_dim = self.robots[0].action_dim
# self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.sensor_dim,), dtype=np.float64)
observation_space = OrderedDict()
if 'sensor' in self.output:
self.sensor_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.sensor_dim,), dtype=np.float32)
observation_space['sensor'] = self.sensor_space
if 'rgb' in self.output:
self.rgb_space = gym.spaces.Box(low=0.0, high=1.0,
shape=(self.config['resolution'], self.config['resolution'], 3),
dtype=np.float32)
observation_space['rgb'] = self.rgb_space
if 'depth' in self.output:
self.depth_space = gym.spaces.Box(low=0.0, high=1.0,
shape=(self.config['resolution'], self.config['resolution'], 1),
dtype=np.float32)
observation_space['depth'] = self.depth_space
if 'rgb_filled' in self.output: # use filler
self.comp = CompletionNet(norm=nn.BatchNorm2d, nf=64)
self.comp = torch.nn.DataParallel(self.comp).cuda()
self.comp.load_state_dict(
torch.load(os.path.join(gibson2.assets_path, 'networks', 'model.pth')))
self.comp.eval()
if 'pointgoal' in self.output:
observation_space['pointgoal'] = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(2,), dtype=np.float32)
self.observation_space = gym.spaces.Dict(observation_space)
self.action_space = self.robots[0].action_space
def get_additional_states(self):
relative_position = self.target_pos - self.robots[0].get_position()
# rotate relative position back to body point of view

View File

@ -101,7 +101,7 @@ class CompletionNet(nn.Module):
)
def forward(self, x):
return F.tanh(self.convs(x))
return torch.tanh(self.convs(x))
def identity_init(m):

View File

@ -12,6 +12,8 @@ from distutils.version import LooseVersion
import subprocess
import platform
use_clang = False
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
@ -39,6 +41,9 @@ class CMakeBuild(build_ext):
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=' + os.path.join(extdir, 'gibson2/core/render/mesh_renderer', 'build'),
'-DPYTHON_EXECUTABLE=' + sys.executable]
if use_clang:
cmake_args += ['-DCMAKE_C_COMPILER=/usr/bin/clang', '-DCMAKE_CXX_COMPILER=/usr/bin/clang++']
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
@ -90,7 +95,8 @@ setup(name='gibson2',
'aenum',
'pyopengl==3.1.0',
'pyopengl-accelerate==3.1.0',
'pyassimp==4.1.3'
'pyassimp==4.1.3',
'gputil'
],
ext_modules=[CMakeExtension('CppMeshRenderer', sourcedir='gibson2/core/render')],
cmdclass=dict(build_ext=CMakeBuild),

View File

@ -4,20 +4,18 @@ model_id: Ohoopee
# robot
robot: JR2
velocity: 0.01
# task, observation and action
task: pointgoal # pointgoal|objectgoal|areagoal|reaching
initial_orn: [0.0, 0.0, 0.0]
#initial_pos: [0.0, 0.0, 0.0]
initial_pos: [-2.5, -1.0, 0.0]
target_orn: [0.0, 0.0, 0.0]
target_pos: [-5.0, 3.2, 0.0]
#target_pos: [3.0, 5.0, 0.0]
is_discrete: false
additional_states_dim: 8
additional_states_dim: 3
# reward and termination condition
dist_tol: 0.5

View File

@ -1,6 +1,6 @@
# scene
scene: stadium
#scene: building
#scene: stadium
scene: building
model_id: Ohoopee
# robot
@ -18,7 +18,7 @@ target_orn: [0.0, 0.0, 0.0]
target_pos: [-5.0, 3.2, 0.0]
is_discrete: true
additional_states_dim: 8
additional_states_dim: 3
# reward
terminal_reward: 5000

View File

@ -10,10 +10,10 @@ import cv2
def test_env():
config_filename = os.path.join(os.path.dirname(gibson2.__file__), '../test/goggle.yaml')
nav_env = NavigateEnv(config_file=config_filename, mode='headless')
nav_env = NavigateEnv(config_file=config_filename, mode='gui')
for j in range(2):
nav_env.reset()
for i in range(500): # 300 steps, 30s world time
for i in range(10): # 10 steps, 1s world time
s = time()
action = nav_env.action_space.sample()
ts = nav_env.step(action)
@ -25,4 +25,5 @@ def test_env():
if ts[2]:
print("Episode finished after {} timesteps".format(i + 1))
break
test_env()
nav_env.clean()

View File

@ -10,36 +10,22 @@ config = parse_config('test.yaml')
def test_jr2():
s =Simulator(mode='gui')
scene = BuildingScene('Ohoopee')
s.import_scene(scene)
jr2 = JR2_Kinova(config)
s.import_robot(jr2)
try:
scene = BuildingScene('Ohoopee')
s.import_scene(scene)
jr2 = JR2_Kinova(config)
s.import_robot(jr2)
jr2.set_position([-6,0,0.1])
obj3 = InteractiveObj(os.path.join(gibson2.assets_path, 'models', 'scene_components', 'door.urdf'), scale=2)
s.import_interactive_object(obj3)
obj3.set_position_rotation([-5, -1, 0], [0, 0, np.sqrt(0.5), np.sqrt(0.5)])
jr2.apply_action([0.005, 0.005, 0, 0, 0,0,0, 0, 0, 0])
for _ in range(400):
s.step()
#for i in range(p.getNumJoints(jr2.robot_ids[0])):
# for j in range(p.getNumJoints(jr2.robot_ids[0])):
# if 'm1n6s200' in str(p.getJointInfo(jr2.robot_ids[0], i)[1]):
# link_id1 = p.getJointInfo(jr2.robot_ids[0], i)[0]
# link_id2 = p.getJointInfo(jr2.robot_ids[0], j)[0]
# print('set collision', i, j, link_id1, link_id2, p.getJointInfo(jr2.robot_ids[0], i))
# p.setCollisionFilterPair(jr2.robot_ids[0], jr2.robot_ids[0], link_id1, link_id2, 1)
# p.setCollisionFilterPair(jr2.robot_ids[0], jr2.robot_ids[0], link_id2, link_id1, 1)
jr2.apply_action([0,0, 0,0, 3.1408197119196117, -1.37402907967774, -0.8377005721485424, -1.9804208517373096, 0.09322135043256494, 2.62937740156038])
obj3 = InteractiveObj(os.path.join(gibson2.assets_path, 'models', 'scene_components', 'door.urdf'), scale=2)
s.import_interactive_object(obj3)
obj3.set_position_rotation([-5, -1, 0], [0, 0, np.sqrt(0.5), np.sqrt(0.5)])
jr2.set_position([-6,0,0.1])
#from IPython import embed; embed()
jr2.apply_action([0.005, 0.005, 0, 0, 0,0,0, 0, 0, 0, 0, 0])
for _ in range(400):
s.step()
jr2.apply_action([0,0, 0,0, 3.3607160552645428, 3.3310046132823998, 3.1408197119196117, -1.37402907967774, -0.8377005721485424, -1.9804208517373096, 0.09322135043256494, 2.62937740156038])
for _ in range(400):
s.step()
s.disconnect()
for _ in range(400):
s.step()
finally:
s.disconnect()

View File

@ -12,14 +12,15 @@ def test_jr2():
'../examples/configs/jr_interactive_nav.yaml')
nav_env = InteractiveNavigateEnv(config_file=config_filename, mode='gui',
action_timestep=1.0 / 10.0, physics_timestep=1 / 40.0)
nav_env.reset()
for i in range(1000): # 300 steps, 30s world time
action = nav_env.action_space.sample()
state, reward, done, _ = nav_env.step(action)
if done:
print('Episode finished after {} timesteps'.format(i + 1))
nav_env.clean()
try:
nav_env.reset()
for i in range(10): # 300 steps, 30s world time
action = nav_env.action_space.sample()
state, reward, done, _ = nav_env.step(action)
if done:
print('Episode finished after {} timesteps'.format(i + 1))
finally:
nav_env.clean()
"""
s =Simulator(mode='gui')

View File

@ -1,44 +1,41 @@
from gibson2.envs.locomotor_env import *
from time import time
from tf_agents.environments import utils, tf_py_environment, parallel_py_environment
import tensorflow as tf
from gibson2.utils.tf_utils import env_load_fn
import numpy as np
import time
from time import time
def test_env():
config_filename = os.path.join(os.path.dirname(gibson2.__file__), '../test/test.yaml')
nav_env = NavigateEnv(config_file=config_filename, mode='headless')
for j in range(2):
nav_env.reset()
for i in range(300): # 300 steps, 30s world time
s = time()
action = nav_env.action_space.sample()
ts = nav_env.step(action)
print(ts, 1/(time()-s))
if ts[2]:
print("Episode finished after {} timesteps".format(i + 1))
break
try:
for j in range(2):
nav_env.reset()
for i in range(300): # 300 steps, 30s world time
s = time()
action = nav_env.action_space.sample()
ts = nav_env.step(action)
print(ts, 1/(time()-s))
if ts[2]:
print("Episode finished after {} timesteps".format(i + 1))
break
finally:
nav_env.clean()
def test_env_reload():
config_filename = os.path.join(os.path.dirname(gibson2.__file__), '../test/test.yaml')
nav_env = NavigateEnv(config_file=config_filename, mode='headless')
try:
for i in range(3):
nav_env.reload(config_filename)
nav_env.reset()
for i in range(300): # 300 steps, 30s world time
s = time()
action = nav_env.action_space.sample()
ts = nav_env.step(action)
print(ts, 1 / (time() - s))
if ts[2]:
print("Episode finished after {} timesteps".format(i + 1))
break
def test_py_env():
py_env = env_load_fn()
# print("action spec", py_env.action_spec())
# print("observation spec", py_env.observation_spec())
utils.validate_py_environment(py_env, episodes=2)
def test_tf_py_env():
tf_py_env = [lambda: env_load_fn() for _ in range(2)]
tf_py_env = tf_py_environment.TFPyEnvironment(parallel_py_environment.ParallelPyEnvironment(tf_py_env))
# print("action spec", tf_py_env.action_spec())
# print("observation spec", tf_py_env.observation_spec())
action = tf.constant(np.zeros((2, 2)))
reset_op = tf_py_env.reset()
step_op = tf_py_env.step(action)
with tf.Session() as sess:
for j in range(10):
time_step = sess.run(reset_op)
for i in range(100):
time_step = sess.run(step_op)
finally:
nav_env.clean()

View File

@ -1,83 +0,0 @@
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.networks import network
from gibson2.utils.agents.networks import encoding_network
from gibson2.utils.agents.networks import value_network
from gibson2.utils.agents.networks import actor_distribution_network
from gibson2.utils.tf_utils import LayerParams
import collections
import numpy as np
def get_encoder():
fc_layer_params = (128, 64)
conv_layer_params = ((32, (8, 8), 4), (64, (4, 4), 2), (64, (3, 3), 1))
observation_spec = collections.OrderedDict([
('sensor', tensor_spec.BoundedTensorSpec(shape=(22),
dtype=tf.float32,
minimum=-np.inf,
maximum=np.inf)),
('rgb', tensor_spec.BoundedTensorSpec(shape=(128, 128, 3),
dtype=tf.float32,
minimum=0.0,
maximum=1.0)),
('depth', tensor_spec.BoundedTensorSpec(shape=(128, 128, 1),
dtype=tf.float32,
minimum=0.0,
maximum=1.0)),
])
preprocessing_layers_params = {
'sensor': LayerParams(conv=None, fc=fc_layer_params),
'rgb': LayerParams(conv=conv_layer_params, fc=None),
'depth': LayerParams(conv=conv_layer_params, fc=None),
}
preprocessing_combiner_type = 'concat'
encoder = encoding_network.EncodingNetwork(
observation_spec,
preprocessing_layers_params=preprocessing_layers_params,
preprocessing_combiner_type=preprocessing_combiner_type,
kernel_initializer=tf.compat.v1.keras.initializers.glorot_uniform()
)
return encoder
def get_states():
batch_size = 4
return {'sensor': tf.random.uniform([batch_size, 22]),
'rgb': tf.random.uniform([batch_size, 128, 128, 3]),
'depth': tf.random.uniform([batch_size, 128, 128, 1])}
def test_encoding_network():
encoder = get_encoder()
states = get_states()
output, _ = encoder(states)
assert output.shape == (4, 192)
def test_value_network():
encoder = get_encoder()
states = get_states()
value_net = value_network.ValueNetwork(
encoder.input_tensor_spec,
encoder=encoder,
fc_layer_params=(128, 64),
kernel_initializer=tf.compat.v1.keras.initializers.glorot_uniform()
)
value, _ = value_net(states)
assert value.shape == (4,)
def test_actor_distribution_network():
encoder = get_encoder()
states = get_states()
actor_net = actor_distribution_network.ActorDistributionNetwork(
encoder.input_tensor_spec,
tensor_spec.BoundedTensorSpec(shape=(2,), dtype=tf.float32, minimum=-0.25, maximum=0.25),
encoder=encoder,
fc_layer_params=(128, 64),
kernel_initializer=tf.compat.v1.keras.initializers.glorot_uniform(),
)
actions, _ = actor_net(states)
assert actions.batch_shape == (4, 2)

View File

@ -9,9 +9,9 @@ def test_import_object():
obj = YCBObject('003_cracker_box')
s.import_object(obj)
assert s.objects == list(range(5))
objs = s.objects
s.disconnect()
assert objs == list(range(5))
def test_import_many_object():
@ -25,27 +25,30 @@ def test_import_many_object():
for j in range(1000):
s.step()
assert (s.objects[-1] == 33)
last_obj = s.objects[-1]
s.disconnect()
assert (last_obj == 33)
def test_import_rbo_object():
s = Simulator(mode='gui')
scene = StadiumScene()
s.import_scene(scene)
try:
scene = StadiumScene()
s.import_scene(scene)
obj = RBOObject('book')
s.import_interactive_object(obj)
obj = RBOObject('book')
s.import_interactive_object(obj)
obj2 = RBOObject('microwave')
s.import_interactive_object(obj2)
obj2 = RBOObject('microwave')
s.import_interactive_object(obj2)
obj.set_position([0, 0, 2])
obj2.set_position([0, 1, 2])
obj.set_position([0, 0, 2])
obj2.set_position([0, 1, 2])
obj3 = InteractiveObj(os.path.join(gibson2.assets_path, 'models', 'scene_components', 'door.urdf'))
s.import_interactive_object(obj3)
obj3 = InteractiveObj(os.path.join(gibson2.assets_path, 'models', 'scene_components', 'door.urdf'))
s.import_interactive_object(obj3)
for i in range(100):
s.step()
s.disconnect()
for i in range(100):
s.step()
finally:
s.disconnect()

View File

@ -1,25 +1,19 @@
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer
from gibson2.core.render.mesh_renderer.deprecated.mesh_renderer_tensor import MeshTensorRenderer
import numpy as np
import torch
import os
import matplotlib.pyplot as plt
import gibson2
import GPUtil
dir = os.path.join(gibson2.assets_path, 'test')
def test_render_loading_cleaning():
renderer = MeshRenderer(width=800, height=600)
renderer.release()
def test_tensor_render_loading_cleaning():
renderer = MeshTensorRenderer(width=800, height=600)
renderer.release()
def test_render_rendering():
renderer = MeshRenderer(width=800, height=600)
renderer.load_object(os.path.join(dir, 'mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj'))
@ -32,6 +26,20 @@ def test_render_rendering():
assert (np.allclose(np.mean(rgb, axis=(0, 1)), np.array([0.51661223, 0.5035339, 0.4777793, 1.]), rtol=1e-3))
renderer.release()
def test_render_rendering_cleaning():
for i in range(5):
renderer = MeshRenderer(width=800, height=600)
renderer.load_object(os.path.join(dir, 'mesh/bed1a77d92d64f5cbbaaae4feed64ec1_new.obj'))
renderer.add_instance(0)
renderer.set_camera([0, 0, 1.2], [0, 1, 1.2], [0, 1, 0])
renderer.set_fov(90)
rgb, _, seg, _ = renderer.render()
assert (np.allclose(np.mean(rgb, axis=(0, 1)), np.array([0.51661223, 0.5035339, 0.4777793, 1.]), rtol=1e-3))
GPUtil.showUtilization()
renderer.release()
GPUtil.showUtilization()
'''
def test_tensor_render_rendering():
w = 800
@ -57,4 +65,4 @@ def test_tensor_render_rendering():
# print(np.mean(img_np.astype(np.float32), axis = (0,1)))
# print(np.mean(img_np2.astype(np.float32), axis = (0,1)))
renderer.release()
'''
'''

View File

@ -3,20 +3,18 @@ from gibson2.core.physics.robot_locomotors import *
from gibson2.core.simulator import Simulator
from gibson2.core.physics.scene import *
from gibson2.utils.utils import parse_config
import pytest
config = parse_config('test.yaml')
def test_turtlebot():
s =Simulator(mode='headless')
scene = StadiumScene()
s.import_scene(scene)
turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
assert p.getNumBodies() == 5
nbody = p.getNumBodies()
s.disconnect()
assert nbody == 5
def test_jr2():
s = Simulator(mode='headless')
@ -24,8 +22,9 @@ def test_jr2():
s.import_scene(scene)
jr2 = JR2(config)
s.import_robot(jr2)
assert p.getNumBodies() == 5
nbody = p.getNumBodies()
s.disconnect()
assert nbody == 5
def test_ant():
s = Simulator(mode='gui', timestep=1/40.0)
@ -36,13 +35,14 @@ def test_ant():
ant2 = Ant(config)
s.import_robot(ant2)
ant2.set_position([0,2,2])
assert p.getNumBodies() == 6
nbody = p.getNumBodies()
s.add_viewer()
for i in range(100):
s.step()
#ant.apply_action(np.random.randint(17))
#ant2.apply_action(np.random.randint(17))
s.disconnect()
assert nbody == 6
def test_husky():
s = Simulator(mode='headless')
@ -50,8 +50,9 @@ def test_husky():
s.import_scene(scene)
husky = Husky(config)
s.import_robot(husky)
assert p.getNumBodies() == 5
nbody = p.getNumBodies()
s.disconnect()
assert nbody == 5
def test_humanoid():
s = Simulator(mode='headless')
@ -59,8 +60,9 @@ def test_humanoid():
s.import_scene(scene)
humanoid = Humanoid(config)
s.import_robot(humanoid)
assert p.getNumBodies() == 5
nbody = p.getNumBodies()
s.disconnect()
assert nbody == 5
def test_quadrotor():
@ -69,8 +71,9 @@ def test_quadrotor():
s.import_scene(scene)
quadrotor = Quadrotor(config)
s.import_robot(quadrotor)
assert p.getNumBodies() == 5
nbody = p.getNumBodies()
s.disconnect()
assert nbody == 5
def test_turtlebot_position():
@ -79,11 +82,15 @@ def test_turtlebot_position():
s.import_scene(scene)
turtlebot = Turtlebot(config)
s.import_robot(turtlebot)
assert p.getNumBodies() == 5
turtlebot.set_position([0, 0, 5])
assert np.allclose(turtlebot.get_position(), np.array([0, 0, 5]))
nbody = p.getNumBodies()
pos = turtlebot.get_position()
s.disconnect()
assert nbody == 5
assert np.allclose(pos, np.array([0, 0, 5]))
def test_humanoid_position():
s = Simulator(mode='headless')
@ -91,10 +98,13 @@ def test_humanoid_position():
s.import_scene(scene)
humanoid = Humanoid(config)
s.import_robot(humanoid)
assert p.getNumBodies() == 5
humanoid.set_position([0, 0, 5])
assert np.allclose(humanoid.get_position(), np.array([0, 0, 5]))
nbody = p.getNumBodies()
pos = humanoid.get_position()
s.disconnect()
assert nbody == 5
assert np.allclose(pos, np.array([0, 0, 5]))
def test_multiagent():
s = Simulator(mode='headless')
@ -112,8 +122,7 @@ def test_multiagent():
turtlebot2.set_position([0, 0, 0.5])
turtlebot3.set_position([-1, 0, 0.5])
assert p.getNumBodies() == 7
nbody = p.getNumBodies()
for i in range(100):
#turtlebot1.apply_action(1)
#turtlebot2.apply_action(1)
@ -121,6 +130,7 @@ def test_multiagent():
s.step()
s.disconnect()
assert nbody == 7
def show_action_sensor_space():
s = Simulator(mode='gui')
@ -158,5 +168,7 @@ def show_action_sensor_space():
for robot in s.robots:
print(type(robot), len(robot.ordered_joints), robot.calc_state().shape)
while True:
for i in range(100):
s.step()
s.disconnect()

View File

@ -13,7 +13,7 @@ config = parse_config('test.yaml')
def test_import_building():
s = Simulator(mode='headless')
scene = BuildingScene('space7')
scene = BuildingScene('Ohoopee')
s.import_scene(scene)
assert s.objects == list(range(2))
s.disconnect()
@ -44,7 +44,7 @@ def test_import_building_viewing():
turtlebot2.set_position([0, 0, 0.5])
turtlebot3.set_position([-0.5, 0, 0.5])
for i in range(1000):
for i in range(10):
s.step()
#turtlebot1.apply_action(np.random.randint(4))
#turtlebot2.apply_action(np.random.randint(4))