Merge branch 'master' into igdsl2
This commit is contained in:
commit
2a67f14046
|
@ -65,19 +65,19 @@ def main():
|
|||
p.loadMJCF(floor)
|
||||
|
||||
robots = []
|
||||
config = parse_config('../configs/fetch_p2p_nav.yaml')
|
||||
config = parse_config('../configs/fetch_reaching.yaml')
|
||||
fetch = Fetch(config)
|
||||
robots.append(fetch)
|
||||
|
||||
config = parse_config('../configs/jr_p2p_nav.yaml')
|
||||
config = parse_config('../configs/jr_reaching.yaml')
|
||||
jr = JR2_Kinova(config)
|
||||
robots.append(jr)
|
||||
|
||||
config = parse_config('../configs/locobot_p2p_nav.yaml')
|
||||
config = parse_config('../configs/locobot_point_nav.yaml')
|
||||
locobot = Locobot(config)
|
||||
robots.append(locobot)
|
||||
|
||||
config = parse_config('../configs/turtlebot_p2p_nav.yaml')
|
||||
config = parse_config('../configs/turtlebot_point_nav.yaml')
|
||||
turtlebot = Turtlebot(config)
|
||||
robots.append(turtlebot)
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import numpy as np
|
|||
|
||||
|
||||
def main():
|
||||
config = parse_config('../configs/fetch_p2p_nav.yaml')
|
||||
config = parse_config('../configs/fetch_reaching.yaml')
|
||||
s = Simulator(mode='gui', physics_timestep=1 / 240.0)
|
||||
scene = EmptyScene()
|
||||
s.import_scene(scene)
|
||||
|
@ -21,41 +21,44 @@ def main():
|
|||
|
||||
robot_id = fetch.robot_ids[0]
|
||||
|
||||
arm_joints = joints_from_names(robot_id, ['torso_lift_joint','shoulder_pan_joint', 'shoulder_lift_joint', 'upperarm_roll_joint',
|
||||
'elbow_flex_joint', 'forearm_roll_joint', 'wrist_flex_joint', 'wrist_roll_joint'])
|
||||
#finger_joints = joints_from_names(robot_id, ['l_gripper_finger_joint', 'r_gripper_finger_joint'])
|
||||
arm_joints = joints_from_names(robot_id, [
|
||||
'torso_lift_joint',
|
||||
'shoulder_pan_joint',
|
||||
'shoulder_lift_joint',
|
||||
'upperarm_roll_joint',
|
||||
'elbow_flex_joint',
|
||||
'forearm_roll_joint',
|
||||
'wrist_flex_joint',
|
||||
'wrist_roll_joint'])
|
||||
|
||||
fetch.robot_body.reset_position([0, 0, 0])
|
||||
fetch.robot_body.reset_orientation([0, 0, 1, 0])
|
||||
x,y,z = fetch.get_end_effector_position()
|
||||
#set_joint_positions(robot_id, finger_joints, [0.04,0.04])
|
||||
print(x,y,z)
|
||||
x, y, z = fetch.get_end_effector_position()
|
||||
|
||||
visual_marker = p.createVisualShape(p.GEOM_SPHERE, radius = 0.02)
|
||||
marker = p.createMultiBody(baseVisualShapeIndex = visual_marker)
|
||||
visual_marker = p.createVisualShape(p.GEOM_SPHERE, radius=0.02)
|
||||
marker = p.createMultiBody(baseVisualShapeIndex=visual_marker)
|
||||
|
||||
#max_limits = [0,0] + get_max_limits(robot_id, arm_joints) + [0.05,0.05]
|
||||
#min_limits = [0,0] + get_min_limits(robot_id, arm_joints) + [0,0]
|
||||
#rest_position = [0,0] + list(get_joint_positions(robot_id, arm_joints)) + [0.04,0.04]
|
||||
max_limits = [0,0] + get_max_limits(robot_id, arm_joints)
|
||||
min_limits = [0,0] + get_min_limits(robot_id, arm_joints)
|
||||
rest_position = [0,0] + list(get_joint_positions(robot_id, arm_joints))
|
||||
max_limits = [0, 0] + get_max_limits(robot_id, arm_joints)
|
||||
min_limits = [0, 0] + get_min_limits(robot_id, arm_joints)
|
||||
rest_position = [0, 0] + list(get_joint_positions(robot_id, arm_joints))
|
||||
joint_range = list(np.array(max_limits) - np.array(min_limits))
|
||||
joint_range = [item + 1 for item in joint_range]
|
||||
jd = [0.1 for item in joint_range]
|
||||
print(max_limits)
|
||||
print(min_limits)
|
||||
|
||||
def accurateCalculateInverseKinematics(robotid, endEffectorId, targetPos, threshold, maxIter):
|
||||
sample_fn = get_sample_fn(robotid, arm_joints)
|
||||
set_joint_positions(robotid, arm_joints, sample_fn())
|
||||
it = 0
|
||||
while it < maxIter:
|
||||
jointPoses = p.calculateInverseKinematics(robotid, endEffectorId, targetPos,
|
||||
lowerLimits = min_limits,
|
||||
upperLimits = max_limits,
|
||||
jointRanges = joint_range,
|
||||
restPoses = rest_position,
|
||||
jointDamping = jd)
|
||||
jointPoses = p.calculateInverseKinematics(
|
||||
robotid,
|
||||
endEffectorId,
|
||||
targetPos,
|
||||
lowerLimits=min_limits,
|
||||
upperLimits=max_limits,
|
||||
jointRanges=joint_range,
|
||||
restPoses=rest_position,
|
||||
jointDamping=jd)
|
||||
set_joint_positions(robotid, arm_joints, jointPoses[2:10])
|
||||
ls = p.getLinkState(robotid, endEffectorId)
|
||||
newPos = ls[4]
|
||||
|
@ -66,7 +69,7 @@ def main():
|
|||
|
||||
it += 1
|
||||
|
||||
print ("Num iter: " + str(it) + ", threshold: " + str(dist))
|
||||
print("Num iter: " + str(it) + ", residual: " + str(dist))
|
||||
return jointPoses
|
||||
|
||||
while True:
|
||||
|
@ -75,10 +78,13 @@ def main():
|
|||
fetch.robot_body.reset_orientation([0, 0, 1, 0])
|
||||
threshold = 0.01
|
||||
maxIter = 100
|
||||
joint_pos = accurateCalculateInverseKinematics(robot_id, fetch.parts['gripper_link'].body_part_index, [x, y, z],
|
||||
threshold, maxIter)[2:10]
|
||||
joint_pos = accurateCalculateInverseKinematics(
|
||||
robot_id,
|
||||
fetch.parts['gripper_link'].body_part_index,
|
||||
[x, y, z],
|
||||
threshold,
|
||||
maxIter)[2:10]
|
||||
|
||||
#set_joint_positions(robot_id, finger_joints, [0.04, 0.04])
|
||||
s.step()
|
||||
keys = p.getKeyboardEvents()
|
||||
for k, v in keys.items():
|
||||
|
@ -94,11 +100,10 @@ def main():
|
|||
z += 0.01
|
||||
if (k == ord('x') and (v & p.KEY_IS_DOWN)):
|
||||
z -= 0.01
|
||||
p.resetBasePositionAndOrientation(marker, [x,y,z], [0,0,0,1])
|
||||
p.resetBasePositionAndOrientation(marker, [x, y, z], [0, 0, 0, 1])
|
||||
|
||||
s.disconnect()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
|
@ -19,19 +19,19 @@ def main():
|
|||
p.loadMJCF(floor)
|
||||
|
||||
robots = []
|
||||
config = parse_config('../configs/fetch_p2p_nav.yaml')
|
||||
config = parse_config('../configs/fetch_reaching.yaml')
|
||||
fetch = Fetch(config)
|
||||
robots.append(fetch)
|
||||
|
||||
config = parse_config('../configs/jr_p2p_nav.yaml')
|
||||
config = parse_config('../configs/jr_reaching.yaml')
|
||||
jr = JR2_Kinova(config)
|
||||
robots.append(jr)
|
||||
|
||||
config = parse_config('../configs/locobot_p2p_nav.yaml')
|
||||
config = parse_config('../configs/locobot_point_nav.yaml')
|
||||
locobot = Locobot(config)
|
||||
robots.append(locobot)
|
||||
|
||||
config = parse_config('../configs/turtlebot_p2p_nav.yaml')
|
||||
config = parse_config('../configs/turtlebot_point_nav.yaml')
|
||||
turtlebot = Turtlebot(config)
|
||||
robots.append(turtlebot)
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
class Scene:
|
||||
class Scene(object):
|
||||
"""
|
||||
Base class for all Scene objects
|
||||
Contains the base functionalities and the functions that all derived classes need to implement
|
||||
|
|
|
@ -7,6 +7,12 @@ from gibson2.render.mesh_renderer.instances import InstanceGroup, Instance, Robo
|
|||
from gibson2.render.mesh_renderer.mesh_renderer_tensor import MeshRendererG2G
|
||||
from gibson2.render.viewer import Viewer, ViewerVR, ViewerSimple
|
||||
from gibson2.objects.articulated_object import ArticulatedObject, URDFObject
|
||||
from gibson2.scenes.igibson_indoor_scene import InteractiveIndoorScene
|
||||
from gibson2.scenes.scene_base import Scene
|
||||
from gibson2.robots.robot_base import BaseRobot
|
||||
from gibson2.objects.object_base import Object
|
||||
|
||||
|
||||
import pybullet as p
|
||||
import gibson2
|
||||
import os
|
||||
|
@ -200,7 +206,8 @@ class Simulator:
|
|||
:param class_id: Class id for rendering semantic segmentation
|
||||
:return: pybullet body ids from scene.load function
|
||||
"""
|
||||
|
||||
assert isinstance(scene, Scene) and not isinstance(scene, InteractiveIndoorScene), \
|
||||
'import_scene can only be called with Scene that is not InteractiveIndoorScene'
|
||||
# Load the scene. Returns a list of pybullet ids of the objects loaded that we can use to
|
||||
# load them in the renderer
|
||||
new_object_pb_ids = scene.load()
|
||||
|
@ -223,6 +230,8 @@ class Simulator:
|
|||
:param scene: iGSDFScene instance
|
||||
:return: pybullet body ids from scene.load function
|
||||
"""
|
||||
assert isinstance(scene, InteractiveIndoorScene), \
|
||||
'import_ig_scene can only be called with InteractiveIndoorScene'
|
||||
new_object_ids = scene.load()
|
||||
self.objects += new_object_ids
|
||||
if scene.texture_randomization:
|
||||
|
@ -279,7 +288,8 @@ class Simulator:
|
|||
:param use_pbr_mapping: Whether to use pbr mapping
|
||||
:param shadow_caster: Whether to cast shadow
|
||||
"""
|
||||
|
||||
assert isinstance(obj, Object), \
|
||||
'import_object can only be called with Object'
|
||||
# Load the object in pybullet. Returns a pybullet id that we can use to load it in the renderer
|
||||
new_object_pb_id = obj.load()
|
||||
self.objects += [new_object_pb_id]
|
||||
|
@ -506,6 +516,8 @@ class Simulator:
|
|||
:param class_id: Class id for rendering semantic segmentation
|
||||
:return: pybullet id
|
||||
"""
|
||||
assert isinstance(robot, BaseRobot), \
|
||||
'import_robot can only be called with BaseRobot'
|
||||
ids = robot.load()
|
||||
visual_objects = []
|
||||
link_ids = []
|
||||
|
|
|
@ -96,7 +96,7 @@ At the same time, real-world free-moving objects can have overlapping bounding b
|
|||
- if two objects overlap with each other, we try to shrink the bounding boxes. We shrink by no more than 80%.
|
||||
- we randomize the objects with our object assets, and try to come up with no-collision object configurations from overlapping bounding boxes.
|
||||
|
||||
We managed to provide no-collision URDFs for 2239 scenes. If the scene your are converting is among this set, the scene generation process will automatically retrieve the no-collision URDF. If not, **please use the scene at your own discretion**, since the objects might be penetrating with each other.
|
||||
We managed to provide no-collision URDFs for 6049 scenes. If the scene your are converting is among this set, the scene generation process will automatically retrieve the no-collision URDF. If not, **please use the scene at your own discretion**, since the objects might be penetrating with each other.
|
||||
|
||||
|
||||
### Examine generated scenes
|
||||
|
|
|
@ -1,25 +1,34 @@
|
|||
# scene
|
||||
scene: stadium
|
||||
build_graph: false
|
||||
load_texture: true
|
||||
pybullet_load_texture: true
|
||||
trav_map_type: no_obj
|
||||
trav_map_resolution: 0.1
|
||||
trav_map_erosion: 2
|
||||
should_open_all_doors: true
|
||||
|
||||
# domain randomization
|
||||
texture_randomization_freq: null
|
||||
object_randomization_freq: null
|
||||
|
||||
# robot
|
||||
robot: Turtlebot
|
||||
velocity: 0.1
|
||||
is_discrete: false
|
||||
velocity: 1.0
|
||||
|
||||
# task, observation and action
|
||||
task: pointgoal # pointgoal|objectgoal|areagoal|reaching
|
||||
# task
|
||||
task: point_nav_random
|
||||
target_dist_min: 1.0
|
||||
target_dist_max: 10.0
|
||||
initial_pos_z_offset: 0.1
|
||||
is_discrete: false
|
||||
additional_states_dim: 4
|
||||
goal_format: polar
|
||||
task_obs_dim: 4
|
||||
|
||||
# reward
|
||||
reward_type: l2
|
||||
success_reward: 10.0
|
||||
slack_reward: -0.01
|
||||
potential_reward_weight: 1.0
|
||||
collision_reward_weight: -0.1
|
||||
collision_ignore_link_a_ids: [1, 2, 3, 4] # ignore collisions with these robot links
|
||||
|
||||
# discount factor
|
||||
discount_factor: 0.99
|
||||
|
@ -28,16 +37,19 @@ discount_factor: 0.99
|
|||
dist_tol: 0.36 # body width
|
||||
max_step: 500
|
||||
max_collisions_allowed: 500
|
||||
goal_format: polar
|
||||
|
||||
# misc config
|
||||
initial_pos_z_offset: 0.1
|
||||
collision_ignore_link_a_ids: [1, 2, 3, 4] # ignore collisions with these robot links
|
||||
|
||||
# sensor spec
|
||||
output: [sensor, rgb, depth, scan]
|
||||
output: [task_obs, rgb, depth, scan]
|
||||
# image
|
||||
# ASUS Xtion PRO LIVE
|
||||
# https://www.asus.com/us/3D-Sensor/Xtion_PRO_LIVE
|
||||
fisheye: false
|
||||
image_width: 160
|
||||
image_height: 120
|
||||
image_width: 160
|
||||
image_height: 120
|
||||
vertical_fov: 45
|
||||
# depth
|
||||
depth_low: 0.8
|
||||
|
@ -53,11 +65,10 @@ laser_angular_range: 240.0
|
|||
min_laser_dist: 0.05
|
||||
laser_link_name: scan_link
|
||||
|
||||
# sensor noise
|
||||
# sensor noise
|
||||
depth_noise_rate: 0.0
|
||||
scan_noise_rate: 0.0
|
||||
|
||||
# visual objects
|
||||
visual_object_at_initial_target_pos: true
|
||||
target_visual_object_visible_to_agent: false
|
||||
|
||||
target_visual_object_visible_to_agent: false
|
Loading…
Reference in New Issue