Husky camera adjusted

This commit is contained in:
hzyjerry 2017-10-26 13:03:32 -07:00
parent f42ef6d3e9
commit 97ed60980b
10 changed files with 112 additions and 191 deletions

View File

@ -61,7 +61,8 @@ install() {
read -s password
## Core rendering functionality
conda install -c menpo opencv -y
#conda install -c menpo opencv -y
pip install opencv-python ## python3
conda install pytorch torchvision cuda80 -c soumith -y

View File

@ -35,29 +35,23 @@ if __name__ == '__main__':
agent = RandomAgent(env.action_space)
ob = None
try:
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break
except KeyboardInterrupt:
env._end()
print("Program finished")
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break

View File

@ -42,29 +42,23 @@ if __name__ == '__main__':
torsoId=i
i = 0
try:
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break
except KeyboardInterrupt:
env._end()
print("Program finished")
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break

View File

@ -1,79 +0,0 @@
from __future__ import print_function
import time
import numpy as np
import sys
import gym
from PIL import Image
from realenv.core.render.profiler import Profiler
import pybullet as p
class RandomAgent(object):
"""The world's simplest agent"""
def __init__(self, action_space):
self.action_space = action_space
self.time = 0
self.repeat = 1
self.action_last = np.zeros(self.action_space.shape[0])
def act(self, observation, reward=None):
if self.time < self.repeat:
self.time = self.time + 1
return self.action_last
else:
self.time = 0
action = np.zeros(self.action_space.shape[0])
#action[np.random.randint(0, len(action))] = 1
action = [1] * self.action_space.shape[0]
self.action_last = action
return action
if __name__ == '__main__':
env = gym.make('HuskyWalkingEnv-v0')
env.configure(timestep=1.0/(4 * 9), frame_skip=4)
env.reset()
agent = RandomAgent(env.action_space)
ob = None
torsoId = -1
print("HuskyWalkingEnv", p.getNumBodies())
for i in range (p.getNumBodies()):
print(p.getBodyInfo(i)[0].decode())
if (p.getBodyInfo(i)[0].decode() == "base_footprint"):
torsoId=i
i = 0
try:
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
distance=2.5 ## demo: living room ,kitchen
#distance=1.7 ## demo: stairs
#yaw = 0 ## demo: living room
yaw = 30 ## demo: kitchen
#yaw = 90 ## demo: stairs
humanPos, humanOrn = p.getBasePositionAndOrientation(torsoId)
p.resetDebugVisualizerCamera(distance,yaw,-35,humanPos); ## demo: kitchen, living room
#p.resetDebugVisualizerCamera(distance,yaw,-42,humanPos); ## demo: stairs
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break
except KeyboardInterrupt:
env._end()
print("Program finished")

View File

@ -24,7 +24,7 @@ class RandomAgent(object):
else:
self.time = 0
action = np.zeros(self.action_space.shape[0])
action[np.random.choice(action.shape[0], 1)] = np.random.choice(2, 1) - 1
#action[np.random.choice(action.shape[0], 1)] = np.random.choice(2, 1) - 1
self.action_last = action
return action
@ -34,29 +34,23 @@ if __name__ == '__main__':
agent = RandomAgent(env.action_space)
ob = None
try:
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break
except KeyboardInterrupt:
env._end()
print("Program finished")
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break

View File

@ -35,29 +35,23 @@ if __name__ == '__main__':
agent = RandomAgent(env.action_space)
ob = None
try:
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
while 1:
frame = 0
score = 0
restart_delay = 0
obs = env.reset()
while True:
time.sleep(0.01)
a = agent.act(obs)
with Profiler("Agent step function"):
obs, r, done, meta = env.step(a)
score += r
frame += 1
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break
except KeyboardInterrupt:
env._end()
print("Program finished")
if not done and frame < 60: continue
if restart_delay==0:
print("score=%0.2f in %i frames" % (score, frame))
restart_delay = 20 * 4 # 2 sec at 60 fps
else:
restart_delay -= 1
if restart_delay==0: break

View File

@ -17,6 +17,7 @@ class WalkerBase(MJCFBasedRobot):
self.walk_target_x = 1e3 # kilometer away
self.walk_target_y = 0
self.body_xyz=[0,0,0]
self.eye_offset_orn = euler2quat(0, 0, 0)
def robot_specific_reset(self):
@ -191,9 +192,9 @@ class Humanoid(WalkerBase):
orientation = [roll, pitch, yaw]
else:
position = [0, 0, 1.4]
orientation = [0, 0, yaw] # just face random direction, but stay straight otherwise
self.robot_body.reset_position(position)
self.robot_body.reset_orientation(quatWXYZ2quatXYZW(euler2quat(orientation)))
# just face random direction, but stay straight otherwise
self.robot_body.reset_orientation(quatWXYZ2quatXYZW(euler2quat(0, 0, yaw)))
self.initial_z = 0.8
orientation, position = get_model_initial_pose("humanoid")
@ -229,6 +230,8 @@ class Husky(WalkerBase):
WalkerBase.__init__(self, "husky.urdf", "husky_robot", action_dim=4, obs_dim=20, power=2.5)
if self.is_discrete:
self.action_space = gym.spaces.Discrete(2 ** action_dim)
## specific offset for husky.urdf
self.eye_offset_orn = euler2quat(np.pi/2, 0, -np.pi/2, axes='sxyz')
def _step(self, action):
if self.is_discrete:
@ -241,5 +244,14 @@ class Husky(WalkerBase):
realaction = action
WalkerBase.step(self, action)
def robot_specific_reset(self):
WalkerBase.robot_specific_reset(self)
orientation, position = get_model_initial_pose("husky")
roll = orientation[0]
pitch = orientation[1]
yaw = orientation[2]
self.robot_body.reset_orientation(quatWXYZ2quatXYZW(euler2quat(roll, pitch, yaw)))
self.robot_body.reset_position(position)
def alive_bonus(self, z, pitch):
return +1 if z > 0.26 else -1 # 0.25 is central sphere rad, die if it scrapes the ground

View File

@ -56,6 +56,8 @@ def get_model_initial_pose(robot):
#return [0, 0, 3.14], [-0.603, -1.24, 2.35]
if MODEL_ID == "BbxejD15Etk":
return [0, 0, 3 * 3.14/2], [-6.76, -12, 1.4] ## Gates Huang
elif robot=="husky":
return [0, 0, 3.14], [-2, 3.5, 0.4]
else:
return [0, 0, 0], [0, 0, 1.4]

View File

@ -5,6 +5,7 @@ from realenv.envs.env_bases import MJCFBaseEnv
import realenv
from gym import error
from gym.utils import seeding
from transforms3d import quaternions
import pybullet as p
from tqdm import *
import subprocess, os, signal
@ -19,9 +20,20 @@ import cv2
DEFAULT_TIMESTEP = 1.0/(4 * 9)
DEFAULT_FRAMESKIP = 4
DEFAULT_DEBUG_CAMERA = {
'yaw': 30,
'distance': 2.5
}
#distance=2.5 ## demo: living room ,kitchen
#distance=1.7 ## demo: stairs
#yaw = 0 ## demo: living room
#yaw = 30 ## demo: kitchen
#yaw = 90 ## demo: stairs
class SensorRobotEnv(MJCFBaseEnv):
def __init__(self, human, timestep=DEFAULT_TIMESTEP, frame_skip=DEFAULT_FRAMESKIP):
MJCFBaseEnv.__init__(self, human)
self.camera_x = 0
@ -43,6 +55,7 @@ class SensorRobotEnv(MJCFBaseEnv):
train = False,
overwrite_fofn=True)
self.ground_ids = None
self.tracking_camera = DEFAULT_DEBUG_CAMERA
def _reset(self):
MJCFBaseEnv._reset(self)
@ -131,18 +144,13 @@ class SensorRobotEnv(MJCFBaseEnv):
self.reward += sum(self.rewards)
if self.isRender:
distance=2.5 ## demo: living room ,kitchen
#distance=1.7 ## demo: stairs
#yaw = 0 ## demo: living room
yaw = 30 ## demo: kitchen
#yaw = 90 ## demo: stairs
humanPos, humanOrn = p.getBasePositionAndOrientation(self.robot_tracking_id)
p.resetDebugVisualizerCamera(distance,yaw,-35,humanPos); ## demo: kitchen, living room
p.resetDebugVisualizerCamera(self.tracking_camera['distance'],self.tracking_camera['yaw'],-35,humanPos); ## demo: kitchen, living room
#p.resetDebugVisualizerCamera(distance,yaw,-42,humanPos); ## demo: stairs
eye_pos = self.robot.eyes.current_position()
x, y, z ,w = self.robot.eyes.current_orientation()
eye_quat = [w, x, y, z]
eye_quat = quaternions.qmult([w, x, y, z], self.robot.eye_offset_orn)
return state, sum(self.rewards), bool(done), {"eye_pos":eye_pos, "eye_quat":eye_quat}

View File

@ -20,6 +20,7 @@ class HuskyCameraEnv(HuskyEnv, CameraRobotEnv):
HuskyEnv.__init__(self, is_discrete)
CameraRobotEnv.__init__(self, human, timestep=timestep,
frame_skip=frame_skip, enable_sensors=enable_sensors)
self.tracking_camera['yaw'] = 60
class HuskySensorEnv(HuskyEnv, SensorRobotEnv):
def __init__(self, human=True, timestep=HUMANOID_TIMESTEP,