Style changes, changing useless files
This commit is contained in:
parent
edd4f20839
commit
e85f37fae9
|
@ -1 +1 @@
|
|||
from .driving_benchmark import run_driving_benchmark
|
||||
from .driving_benchmark import run_driving_benchmark
|
||||
|
|
|
@ -179,7 +179,7 @@ class DrivingBenchmark(object):
|
|||
start_point.location.x, start_point.location.y, 0.22], [
|
||||
start_point.orientation.x, start_point.orientation.y, 0.22], [
|
||||
end_point.location.x, end_point.location.y, end_point.location.z], [
|
||||
end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])
|
||||
end_point.orientation.x, end_point.orientation.y, end_point.orientation.z])
|
||||
|
||||
def _run_navigation_episode(
|
||||
self,
|
||||
|
@ -271,7 +271,6 @@ def run_driving_benchmark(agent,
|
|||
host='127.0.0.1',
|
||||
port=2000
|
||||
):
|
||||
|
||||
while True:
|
||||
try:
|
||||
|
||||
|
@ -286,8 +285,8 @@ def run_driving_benchmark(agent,
|
|||
|
||||
benchmark = DrivingBenchmark(city_name=city_name,
|
||||
name_to_save=log_name + '_'
|
||||
+ type(experiment_suite).__name__
|
||||
+ '_' + city_name,
|
||||
+ type(experiment_suite).__name__
|
||||
+ '_' + city_name,
|
||||
continue_experiment=continue_experiment)
|
||||
# This function performs the benchmark. It returns a dictionary summarizing
|
||||
# the entire execution.
|
||||
|
|
|
@ -19,6 +19,7 @@ class BasicExperimentSuite(ExperimentSuite):
|
|||
@property
|
||||
def train_weathers(self):
|
||||
return [1]
|
||||
|
||||
@property
|
||||
def test_weathers(self):
|
||||
return [1]
|
||||
|
|
|
@ -8,19 +8,18 @@
|
|||
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
from carla.driving_benchmark.experiment import Experiment
|
||||
from carla.sensor import Camera
|
||||
from carla.settings import CarlaSettings
|
||||
from carla.driving_benchmark.experiment_suites.experiment_suite import ExperimentSuite
|
||||
|
||||
|
||||
|
||||
class CoRL2017(ExperimentSuite):
|
||||
|
||||
@property
|
||||
def train_weathers(self):
|
||||
return [1, 3, 6, 8]
|
||||
|
||||
@property
|
||||
def test_weathers(self):
|
||||
return [4, 14]
|
||||
|
@ -143,5 +142,3 @@ class CoRL2017(ExperimentSuite):
|
|||
experiments_vector.append(experiment)
|
||||
|
||||
return experiments_vector
|
||||
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ class ExperimentSuite(object):
|
|||
|
||||
# Warning: assumes that all tasks have the same size
|
||||
|
||||
|
||||
return len(self._experiments[0].poses)
|
||||
|
||||
def get_experiments(self):
|
||||
|
@ -45,8 +44,6 @@ class ExperimentSuite(object):
|
|||
|
||||
return list(dynamic_tasks)
|
||||
|
||||
|
||||
|
||||
@property
|
||||
def metrics_parameters(self):
|
||||
"""
|
||||
|
@ -76,7 +73,7 @@ class ExperimentSuite(object):
|
|||
'threshold': 300
|
||||
},
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@property
|
||||
def weathers(self):
|
||||
|
@ -91,6 +88,7 @@ class ExperimentSuite(object):
|
|||
Must be redefined in an inherited class.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractproperty
|
||||
def train_weathers(self):
|
||||
"""
|
||||
|
|
|
@ -9,7 +9,6 @@ import numpy as np
|
|||
import math
|
||||
import os
|
||||
|
||||
|
||||
sldist = lambda c1, c2: math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2)
|
||||
flatten = lambda l: [item for sublist in l for item in sublist]
|
||||
|
||||
|
@ -60,8 +59,7 @@ class Metrics(object):
|
|||
|
||||
# If there is a change in the position it means it is a new episode for sure.
|
||||
if (current_start != prev_start and current_end != prev_end) \
|
||||
or current_exp_id != prev_exp_id:
|
||||
|
||||
or current_exp_id != prev_exp_id:
|
||||
episode_matrix_metrics.append(measurements_matrix[prev_i_position:i, :])
|
||||
prev_i_position = i
|
||||
|
||||
|
@ -156,7 +154,6 @@ class Metrics(object):
|
|||
|
||||
def _get_out_of_road_lane(self, selected_matrix, header):
|
||||
|
||||
|
||||
"""
|
||||
Check for the situations were the agent goes out of the road.
|
||||
Args:
|
||||
|
@ -232,7 +229,6 @@ class Metrics(object):
|
|||
header_metrics = header_metrics.split(',')
|
||||
header_metrics[-1] = header_metrics[-1][:-1]
|
||||
|
||||
|
||||
result_matrix = np.loadtxt(os.path.join(path, 'summary.csv'), delimiter=",", skiprows=1)
|
||||
|
||||
# Corner Case: The presented test just had one episode
|
||||
|
@ -256,9 +252,9 @@ class Metrics(object):
|
|||
'collision_vehicles': {w: [[] for i in range(len(tasks))] for w in
|
||||
all_weathers},
|
||||
'collision_other': {w: [[] for i in range(len(tasks))] for w in
|
||||
all_weathers},
|
||||
all_weathers},
|
||||
'episodes_fully_completed': {w: [0] * len(tasks) for w in
|
||||
all_weathers},
|
||||
all_weathers},
|
||||
'average_speed': {w: [0] * len(tasks) for w in all_weathers},
|
||||
'driven_kilometers': {w: [0] * len(tasks) for w in all_weathers}
|
||||
}
|
||||
|
@ -286,7 +282,7 @@ class Metrics(object):
|
|||
|
||||
metrics_dictionary['episodes_completion'][w][t] = \
|
||||
((experiment_results_matrix[:, header.index('initial_distance')]
|
||||
- experiment_results_matrix[:, header.index('final_distance')])
|
||||
- experiment_results_matrix[:, header.index('final_distance')])
|
||||
/ experiment_results_matrix[:, header.index('initial_distance')]).tolist()
|
||||
|
||||
# Now we divide the experiment metrics matrix
|
||||
|
@ -302,8 +298,9 @@ class Metrics(object):
|
|||
episode_experiment_metrics, header_metrics)
|
||||
metrics_dictionary['driven_kilometers'][w][t] += km_run_episodes
|
||||
metrics_dictionary['average_speed'][w][t] = \
|
||||
km_run_episodes/(experiment_results_matrix[count,
|
||||
header.index('final_time')] / 3600.0)
|
||||
km_run_episodes / (experiment_results_matrix[count,
|
||||
header.index(
|
||||
'final_time')] / 3600.0)
|
||||
count += 1
|
||||
|
||||
lane_road = self._get_out_of_road_lane(
|
||||
|
@ -316,7 +313,8 @@ class Metrics(object):
|
|||
|
||||
if tasks[t] in set(self._parameters['dynamic_tasks']):
|
||||
|
||||
collisions = self._get_collisions(episode_experiment_metrics, header_metrics)
|
||||
collisions = self._get_collisions(episode_experiment_metrics,
|
||||
header_metrics)
|
||||
|
||||
metrics_dictionary['collision_pedestrians'][
|
||||
w][t].append(collisions[2])
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
|
||||
|
||||
import csv
|
||||
import datetime
|
||||
import os
|
||||
|
@ -148,14 +146,12 @@ class Recording(object):
|
|||
|
||||
rw.writerow(self._dict_measurements)
|
||||
|
||||
|
||||
def _create_log_files(self):
|
||||
"""
|
||||
Just create the log files and add the necessary header for it.
|
||||
"""
|
||||
|
||||
if not self._experiment_exist():
|
||||
|
||||
os.mkdir(self._path)
|
||||
|
||||
with open(os.path.join(self._path, 'summary.csv'), 'w') as ofd:
|
||||
|
@ -172,6 +168,7 @@ class Recording(object):
|
|||
If continue_experiment is false and experiment exist, generates a new file path
|
||||
|
||||
"""
|
||||
|
||||
def get_non_existent_path(f_name_path):
|
||||
"""
|
||||
Get the path to a filename which does not exist by incrementing path.
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
import numpy as np
|
||||
import json
|
||||
|
||||
|
||||
def print_summary(metrics_summary, weathers, path):
|
||||
"""
|
||||
We plot the summary of the testing for the set selected weathers.
|
||||
|
@ -113,7 +114,7 @@ def print_summary(metrics_summary, weathers, path):
|
|||
count += 1
|
||||
print (' Average Between Weathers')
|
||||
for i in range(len(metric_sum_values)):
|
||||
if metric_sum_values[i] == 0:
|
||||
if metric_sum_values[i] == 0:
|
||||
print(' Task ', i, ' -> more than ', summed_driven_kilometers[i])
|
||||
else:
|
||||
print(' Task ', i, ' -> ', summed_driven_kilometers[i] / metric_sum_values[i])
|
||||
|
|
|
@ -14,7 +14,6 @@ from carla.driving_benchmark.experiment_suites import CoRL2017
|
|||
from carla.driving_benchmark.experiment_suites import BasicExperimentSuite
|
||||
from carla.agent import ForwardAgent
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
argparser = argparse.ArgumentParser(description=__doc__)
|
||||
|
@ -44,13 +43,13 @@ if __name__ == '__main__':
|
|||
metavar='C',
|
||||
default='Town01',
|
||||
help='The town that is going to be used on benchmark'
|
||||
+ '(needs to match active town in server, options: Town01 or Town02)')
|
||||
+ '(needs to match active town in server, options: Town01 or Town02)')
|
||||
argparser.add_argument(
|
||||
'-n', '--log_name',
|
||||
metavar='T',
|
||||
default='test',
|
||||
help='The name of the log file to be created by the benchmark'
|
||||
)
|
||||
)
|
||||
argparser.add_argument(
|
||||
'--corl-2017',
|
||||
action='store_true',
|
||||
|
@ -62,7 +61,6 @@ if __name__ == '__main__':
|
|||
help='If you want to continue the experiment with the same name'
|
||||
)
|
||||
|
||||
|
||||
args = argparser.parse_args()
|
||||
if args.debug:
|
||||
log_level = logging.DEBUG
|
||||
|
@ -84,8 +82,8 @@ if __name__ == '__main__':
|
|||
experiment_suite = CoRL2017(args.city_name)
|
||||
else:
|
||||
print (' WARNING: running the basic driving benchmark, to run for CoRL 2017'
|
||||
' experiment suites, you should run'
|
||||
' python driving_benchmark_example.py --corl-2017')
|
||||
' experiment suites, you should run'
|
||||
' python driving_benchmark_example.py --corl-2017')
|
||||
experiment_suite = BasicExperimentSuite(args.city_name)
|
||||
|
||||
# Now actually run the driving_benchmark
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
|
||||
|
||||
# For here we are going to test all this restarting and going on stuff.
|
||||
|
||||
import unittest
|
||||
|
||||
class testBenchmark(unittest.TestCase):
|
||||
|
||||
|
||||
def __init__(self):
|
||||
pass
|
Loading…
Reference in New Issue