benchmark seems to be working a first version. The planner needs further work "
;
This commit is contained in:
parent
d9624f6616
commit
85d693032e
|
@ -21,21 +21,21 @@ from carla.carla_server_pb2 import Control
|
|||
|
||||
|
||||
class Agent(object, ):
|
||||
def __init__(self, **kwargs):
|
||||
def __init__(self,city_name, **kwargs):
|
||||
import os
|
||||
dir_path = os.path.dirname(__file__)
|
||||
|
||||
planner = Planner(dir_path+'/../planner/' + city_name + '.txt',\
|
||||
dir_path+'/../planner/' + city_name + '.png')
|
||||
self._planner = Planner(dir_path+'/../carla/planner/' + city_name + '.txt',\
|
||||
dir_path+'/../carla/planner/' + city_name + '.png')
|
||||
|
||||
|
||||
|
||||
def get_initial_distance(self):
|
||||
def get_distance(self,start_point,end_point):
|
||||
|
||||
_,path_distance=planner.get_next_command([positions[start_point].location.x\
|
||||
,positions[start_point].location.y,22],[positions[start_point].orientation.x\
|
||||
,positions[start_point].orientation.y,22],[positions[end_point].location.x\
|
||||
,positions[end_point].location.y,22],(1,0,0))
|
||||
_,path_distance=self._planner.get_next_command([start_point.location.x\
|
||||
,start_point.location.y,22],[start_point.orientation.x\
|
||||
,start_point.orientation.y,22],[end_point.location.x\
|
||||
,end_point.location.y,22],(1,0,0))
|
||||
# We calculate the timout based on the distance
|
||||
|
||||
return path_distance
|
||||
|
|
|
@ -1,10 +1,19 @@
|
|||
|
||||
from .metrics import plot_summary
|
||||
try:
|
||||
from carla import carla_server_pb2 as carla_protocol
|
||||
except ImportError:
|
||||
raise RuntimeError('cannot import "carla_server_pb2.py", run the protobuf compiler to generate this file')
|
||||
|
||||
|
||||
import json, csv, time
|
||||
|
||||
|
||||
class Benchmark(object,):
|
||||
|
||||
# Param @name to be used for saving purposes
|
||||
def __init__(self,name):
|
||||
def __init__(self,city_name,name):
|
||||
self._city_name = city_name # The name of the city that is going to be used.
|
||||
self._base_name = name # Sends a base name, the rest will be saved with respect to what the episode was about
|
||||
self._dict_stats = {'exp_id':-1,
|
||||
'rep':-1,
|
||||
|
@ -19,10 +28,6 @@ class Benchmark(object,):
|
|||
|
||||
|
||||
}
|
||||
with open(self._base_name + self._suffix_name , 'wb') as ofd:
|
||||
|
||||
w = csv.DictWriter(ofd, self._dict_stats.keys())
|
||||
w.writeheader()
|
||||
|
||||
|
||||
self._dict_rewards = {
|
||||
|
@ -47,7 +52,7 @@ class Benchmark(object,):
|
|||
prev_x = -1
|
||||
prev_y = -1
|
||||
measurements,sensor_data= carla.read_data()
|
||||
carla.send_control(Control())
|
||||
carla.send_control(carla_protocol.Control())
|
||||
t0 = measurements.game_timestamp
|
||||
t1=t0
|
||||
success = False
|
||||
|
@ -59,9 +64,7 @@ class Benchmark(object,):
|
|||
while((t1-t0) < (time_out*1000) and not success):
|
||||
capture_time = time.time()
|
||||
measurements,sensor_data = carla.read_data()
|
||||
print (sensor_data)
|
||||
print (measurements)
|
||||
print (time.time()-capture_time)
|
||||
|
||||
control = agent.run_step(measurements,sensor_data,target)
|
||||
print ('STEER ',control.steer,'GAS ',control.throttle,'Brake ',control.brake)
|
||||
carla.send_control(control)
|
||||
|
@ -78,17 +81,12 @@ class Benchmark(object,):
|
|||
measurement_vec.append(measurements.player_measurements)
|
||||
|
||||
t1 = measurements.game_timestamp
|
||||
print (t1-t0)
|
||||
|
||||
# accumulate layout related signal
|
||||
# accum_lane_intersect += reward.road_intersect
|
||||
#accum_sidewalk_intersect += reward.sidewalk_intersect
|
||||
|
||||
step += 1
|
||||
# This distance is wrong
|
||||
# The distance is based on graph but quite not exact.
|
||||
distance = agent.get_distance(measurements.player_measurements.transform,target)
|
||||
|
||||
agent.compute_distance()
|
||||
#distance = self.compute_distance([curr_x, curr_y], [prev_x, prev_y], [target.location.x, target.location.y])
|
||||
# debug
|
||||
print('[d=%f] c_x = %f, c_y = %f ---> t_x = %f, t_y = %f' % (float(distance), curr_x, curr_y, target.location.x, target.location.y))
|
||||
|
||||
if(distance < 200.0):
|
||||
|
@ -107,51 +105,46 @@ class Benchmark(object,):
|
|||
|
||||
|
||||
experiments = self._build_experiments() # Returns a experiment class that is build from a benchmark inherited class
|
||||
self._suffix_name = self._get_names(experiments[starting_position:]) # The fixed name considering all the experiments being run
|
||||
self._suffix_name = self._get_experiments_names(experiments[starting_position:]) # The fixed name considering all the experiments being run
|
||||
with open(self._base_name + self._suffix_name , 'wb') as ofd:
|
||||
|
||||
w = csv.DictWriter(ofd, self._dict_stats.keys())
|
||||
w.writeheader()
|
||||
|
||||
|
||||
with open('rewards_' + self._base_name + self._suffix_name , 'wb') as rfd:
|
||||
|
||||
rw = csv.DictWriter(rfd, self._dict_rewards.keys())
|
||||
rw.writeheader()
|
||||
|
||||
|
||||
|
||||
self.write_experiment() # write the experiment being run
|
||||
|
||||
|
||||
for experiment in experiments[starting_position:]:
|
||||
|
||||
|
||||
self.write_experiment(experiment) # write the experiment being run
|
||||
|
||||
|
||||
#poses_exp = start_goal_poses[experiment_id]
|
||||
#repetitions = repetitions_per_experiment[experiment_id]
|
||||
#pedestrians_exp = pedestrians[experiment_id]
|
||||
#ehicles_exp = vehicles[experiment_id]
|
||||
|
||||
#for rep in range(repetitions): # CONTROL REPETITION INSIDE EXPERIMENT ???
|
||||
|
||||
# for the different weathers
|
||||
#for weather_cond in weathers:
|
||||
# let's go through all the starting-goal positions of the experiment
|
||||
|
||||
|
||||
|
||||
|
||||
positions = carla.load_settings(experiment.conditions).player_start_spots
|
||||
|
||||
for pose in experiment.poses:
|
||||
for rep in experiment.repetitions:
|
||||
for rep in range(experiment.repetitions):
|
||||
|
||||
trajectory = experiment.poses
|
||||
|
||||
#ped = pedestrians_exp[i]
|
||||
#vehic = vehicles_exp[i]
|
||||
|
||||
start_point = trajectory[0]
|
||||
end_point = trajectory[1]
|
||||
start_point = pose[0]
|
||||
end_point = pose[1]
|
||||
|
||||
carla.start_episode(start_point)
|
||||
|
||||
print('======== !!!! ==========')
|
||||
|
||||
path_distance = agent.get_initial_distance(positions[start_point],positions[end_point])
|
||||
path_distance = agent.get_distance(positions[start_point],positions[end_point])
|
||||
|
||||
time_out = self._calculate_time_out(path_distance)
|
||||
# running the agent
|
||||
(result, reward_vec, final_time, remaining_distance) = self.run_until(agent,carla,time_out,positions[end_point])
|
||||
(result, reward_vec, final_time, remaining_distance) = self.run_navigation_episode(agent,carla,time_out,positions[end_point])
|
||||
|
||||
|
||||
|
||||
|
@ -160,18 +153,11 @@ class Benchmark(object,):
|
|||
self.write_summary_results(experiment,pose,rep,path_distance,remaining_distance,final_time,time_out,result)
|
||||
|
||||
|
||||
#rw.writerow(dict_rewards)
|
||||
|
||||
self.write_reward_results(reward_vec)
|
||||
self.write_reward_results(experiment,rep,reward_vec)
|
||||
|
||||
|
||||
|
||||
# save results of the experiment
|
||||
#list_stats.append(dict_stats)
|
||||
#print (dict_stats)
|
||||
#w.writerow(dict_stats)
|
||||
|
||||
|
||||
if(result > 0):
|
||||
print('+++++ Target achieved in %f seconds! +++++' % final_time)
|
||||
else:
|
||||
|
@ -182,9 +168,12 @@ class Benchmark(object,):
|
|||
|
||||
|
||||
|
||||
def write_details(self):
|
||||
def write_experiment(self):
|
||||
|
||||
with open(self._get_details() , 'wb') as ofd:
|
||||
pass
|
||||
|
||||
|
||||
def write_summary_results(self,experiment,pose,rep,path_distance,remaining_distance,final_time,time_out,result):
|
||||
|
||||
self._dict_stats['exp_id'] = experiment.id
|
||||
|
@ -205,31 +194,31 @@ class Benchmark(object,):
|
|||
w = csv.DictWriter(ofd, self._dict_stats.keys())
|
||||
|
||||
|
||||
w.writerow(dict_stats)
|
||||
w.writerow(self._dict_stats)
|
||||
|
||||
|
||||
|
||||
def write_reward_results(self,experiment,rep,reward_vec):
|
||||
|
||||
with open('rewards_' + self._base_name + self._suffix_name , 'a+') as ofd:
|
||||
with open('rewards_' + self._base_name + self._suffix_name , 'a+') as rfd:
|
||||
|
||||
rw = csv.DictWriter(rfd, dict_rewards.keys())
|
||||
rw = csv.DictWriter(rfd, self._dict_rewards.keys())
|
||||
rw.writeheader()
|
||||
|
||||
|
||||
for i in range(len(reward_vec)):
|
||||
dict_rewards['exp_id'] = experiment.id
|
||||
dict_rewards['rep'] = rep
|
||||
dict_rewards['weather'] = experiment.Conditions.WeatherId
|
||||
dict_rewards['collision_gen'] = reward_vec[i].collision_other
|
||||
dict_rewards['collision_ped'] = reward_vec[i].collision_pedestrians
|
||||
dict_rewards['collision_car'] = reward_vec[i].collision_vehicles
|
||||
dict_rewards['lane_intersect'] = reward_vec[i].intersection_otherlane
|
||||
dict_rewards['sidewalk_intersect'] = reward_vec[i].intersection_offroad
|
||||
dict_rewards['pos_x'] = reward_vec[i].transform.location.x
|
||||
dict_rewards['pos_y'] = reward_vec[i].transform.location.y
|
||||
self._dict_rewards['exp_id'] = experiment.id
|
||||
self._dict_rewards['rep'] = rep
|
||||
self._dict_rewards['weather'] = experiment.Conditions.WeatherId
|
||||
self._dict_rewards['collision_gen'] = reward_vec[i].collision_other
|
||||
self._dict_rewards['collision_ped'] = reward_vec[i].collision_pedestrians
|
||||
self._dict_rewards['collision_car'] = reward_vec[i].collision_vehicles
|
||||
self._dict_rewards['lane_intersect'] = reward_vec[i].intersection_otherlane
|
||||
self._dict_rewards['sidewalk_intersect'] = reward_vec[i].intersection_offroad
|
||||
self._dict_rewards['pos_x'] = reward_vec[i].transform.location.x
|
||||
self._dict_rewards['pos_y'] = reward_vec[i].transform.location.y
|
||||
|
||||
rw.writerow(dict_rewards)
|
||||
rw.writerow(self._dict_rewards)
|
||||
|
||||
|
||||
|
||||
|
@ -240,7 +229,7 @@ class Benchmark(object,):
|
|||
|
||||
summary_weathers = {'train_weather': [1,3,6,8]}
|
||||
|
||||
summary = plot_summary(self._base_name + self._suffix_name,summary_weathers, )
|
||||
summary = plot_summary(self._base_name + self._suffix_name,summary_weathers)
|
||||
|
||||
|
||||
def plot_summary_train(self):
|
||||
|
@ -257,11 +246,11 @@ class Benchmark(object,):
|
|||
return 0
|
||||
|
||||
# To be redefined on subclasses
|
||||
def build_experiments(self):
|
||||
def _build_experiments(self):
|
||||
pass
|
||||
|
||||
|
||||
def _get_experiments(self,experiments):
|
||||
def _get_experiments_names(self,experiments):
|
||||
|
||||
name_cat ='_t'
|
||||
for experiment in experiments:
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
|
||||
from .benchmark import Benchmark
|
||||
from .experiment import Experiment
|
||||
|
||||
from carla.sensor import Camera
|
||||
from carla.settings import CarlaSettings
|
||||
|
||||
# Function to return the timeout ( in miliseconds) that is calculated based on distance to goal.
|
||||
# This is the same timeout as used on the CoRL paper.
|
||||
|
@ -10,7 +11,7 @@ from .experiment import Experiment
|
|||
|
||||
class CoRL(Benchmark):
|
||||
|
||||
def _calculate_time_out(self,distance):
|
||||
def _calculate_time_out(self,path_distance):
|
||||
return ((path_distance/100000.0)/10.0)*3600.0 + 10.0
|
||||
|
||||
def _poses_town01(self):
|
||||
|
@ -72,7 +73,7 @@ class CoRL(Benchmark):
|
|||
]
|
||||
|
||||
|
||||
def build_experiments(self,town):
|
||||
def _build_experiments(self):
|
||||
|
||||
|
||||
# We set the camera that is going to be used for all experiments
|
||||
|
@ -86,7 +87,7 @@ class CoRL(Benchmark):
|
|||
camera.set_rotation(-15.0,0,0)
|
||||
|
||||
weathers = [1,3,6,8,4,14]
|
||||
if town == 'Town01':
|
||||
if self._city_name == 'Town01':
|
||||
poses_tasks = self._poses_town01()
|
||||
vehicles_tasks =[0,0,0,20]
|
||||
pedestrians_tasks = [0,0,0,50]
|
||||
|
@ -130,8 +131,10 @@ class CoRL(Benchmark):
|
|||
Conditions=conditions,
|
||||
Poses= poses,
|
||||
Id = iteration,
|
||||
Repetions = 1
|
||||
Repetitions = 1
|
||||
)
|
||||
experiments_vector.append(experiment)
|
||||
|
||||
return experiments_vector
|
||||
def _get_details(self): # Function to get automatic information from the experiment for writing purposes
|
||||
return 'corl_' + self._city_name
|
|
@ -20,7 +20,7 @@ class Experiment(object):
|
|||
setattr(self, key, value)
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
def id(self):
|
||||
return self.Id
|
||||
|
||||
@property
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 27 KiB |
Binary file not shown.
After Width: | Height: | Size: 10 KiB |
|
@ -59,7 +59,7 @@ class Planner(object):
|
|||
|
||||
import os
|
||||
dir_path = os.path.dirname(__file__)
|
||||
self.central_path_map_image = Image.open(map_file[:-4] +'c.png')
|
||||
self.central_path_map_image = Image.open(map_file[:-4] +'Central.png')
|
||||
self.central_path_map_image.load()
|
||||
self.central_path_map_image = np.asarray(self.central_path_map_image, dtype="int32" )
|
||||
self.central_path_map_image =self.central_path_map_image[:,:,0] # Just take the red dimension
|
||||
|
|
|
@ -9,11 +9,16 @@ from benchmarks.agent import Agent
|
|||
|
||||
from carla.tcp import TCPConnectionError
|
||||
from carla.client import make_carla_client
|
||||
import time
|
||||
|
||||
try:
|
||||
from carla import carla_server_pb2 as carla_protocol
|
||||
except ImportError:
|
||||
raise RuntimeError('cannot import "carla_server_pb2.py", run the protobuf compiler to generate this file')
|
||||
|
||||
class Manual(Agent):
|
||||
def run_step(self, data,target):
|
||||
control = Control()
|
||||
def run_step(self,measurements,sensor_data,target):
|
||||
control = carla_protocol.Control()
|
||||
control.steer = 0.0
|
||||
control.throttle = 0.9
|
||||
control.brake = 0.0
|
||||
|
@ -55,10 +60,12 @@ if(__name__ == '__main__'):
|
|||
|
||||
while True:
|
||||
try:
|
||||
with make_carla_client(host, port) as client:
|
||||
corl= CoRL('test')
|
||||
agent = Manual()
|
||||
results = corl.benchmark_agent(agent,carla)
|
||||
with make_carla_client(args.host, args.port) as client:
|
||||
corl= CoRL('Town02','test')
|
||||
agent = Manual('Town02')
|
||||
results = corl.benchmark_agent(agent,client)
|
||||
corl.plot_summary_test()
|
||||
corl.plot_summary_train()
|
||||
|
||||
break
|
||||
|
||||
|
|
Loading…
Reference in New Issue