From b0cecc99d928b872819792bcfc39ab6f14c53cd7 Mon Sep 17 00:00:00 2001 From: felipecode Date: Tue, 10 Apr 2018 14:55:03 +0200 Subject: [PATCH] refactor of experiment suite name --- Docs/benchmark_creating.md | 37 +++++---- Docs/benchmark_metrics.md | 75 ++++++++++++++----- Docs/benchmark_start.md | 3 +- .../experiment_suite/__init__.py | 2 +- .../{basic.py => basic_experiment_suite.py} | 2 +- PythonClient/driving_benchmark_example.py | 4 +- .../acceptance_tests/test_benchmark_module.py | 2 +- .../test/unit_tests/test_experiment_suite.py | 2 +- 8 files changed, 85 insertions(+), 42 deletions(-) rename PythonClient/carla/driving_benchmark/experiment_suite/{basic.py => basic_experiment_suite.py} (98%) diff --git a/Docs/benchmark_creating.md b/Docs/benchmark_creating.md index 7a647b5aa..91b4edb7c 100644 --- a/Docs/benchmark_creating.md +++ b/Docs/benchmark_creating.md @@ -4,25 +4,29 @@ Benchmarking your Agent ![Benchmark_structure](img/benchmark_diagram_small.png ) The driving benchmark is associated with other two modules. -The *agent* module, a controller which performs in a +The *agent* module, a controller which performs in another module, the *experiment suite*. Both modules are abstract classes that must be redefined by the user. The following code excerpt is -an example of how to apply a driving benchmark +an example of how to apply a driving benchmark; agent = ForwardAgent() - experiment_suite = Basic() + experiment_suite = BasicExperiments() benchmark = DrivingBenchmark() - benchmark_summary = benchmark.benchmark_agent(experiment_suite, agent, client) + performance_metrics_summary = benchmark.benchmark_agent(experiment_suite, agent, client) Following this excerpt, there are two classes to be defined. -The ForwardAgent() and the BasicSuite(). -In this tutorial we are going to show how to create -a basic experiment suite and a trivial forward going agent. +The ForwardAgent() and the BasicExperiments(). +After that, we instantiate the driving benchmark with default parameters +and execute it. As a result of the execution, the driving benchmark +returns a summary of the calculated [performance metrics](benchmark_metrics.md). +In this tutorial we are going to show how to define +a basic experiment suite and a trivial agent with a going +forward policy. #### Defining the Agent @@ -36,8 +40,8 @@ Lets start by deriving a simple Forward agent. class ForwardAgent(Agent): -To have its performance evaluated, the Forward derived class _must_ redefine the *run_step* -function as it is done in the following excerpt: +To have its performance evaluated, the ForwardAgent derived class _must_ +redefine the *run_step* function as it is done in the following excerpt: def run_step(self, measurements, sensor_data, directions, target): """ @@ -53,13 +57,14 @@ This function receives the following parameters: * [Measurements](measurements.md): the entire state of the world received by the client from the CARLA Simulator. These measurements contains agent position, orientation, dynamic objects information, etc. - * [Sensor Data](cameras_and_sensors.md): The measured data from defined sensors, such as Lidars or RGB cameras. + * [Sensor Data](cameras_and_sensors.md): The measured data from defined sensors, + such as Lidars or RGB cameras. * Directions: Information from the high level planner. Currently the planner sends a high level command from the set: STRAIGHT, RIGHT, LEFT, NOTHING. * Target Position: The position and orientation of the target. With all this information, the *run_step* function is expected - to return a [control message]() containing, + to return a [vehicle control message](measurements.md) containing, steering value, throttle value, brake value, etc. @@ -95,7 +100,7 @@ as in the following code. The user must select the weathers to be used. One should select the set of test weathers and the set of train weathers. This is defined as a -class property as in the following example. +class property as in the following example: @property def train_weathers(self): @@ -108,13 +113,13 @@ class property as in the following example. ##### Building Experiments The [experiments are composed by a *task* that is defined by a set of *poses*](benchmark_structure.md). -Lets start by selecting poses for one of the cities, Town01. +Lets start by selecting poses for one of the cities, Town01 for instance. First of all, we need to see all the possible positions, for that, with a CARLA simulator running in a terminal, run: python view_start_positions.py - ![town01_positions](img/welcome.png) + ![town01_positions](img/town01_positions.png) @@ -132,7 +137,7 @@ Figure 3, shows these defined poses for both carla towns. ![town01_positions](img/initial_positions.png) - >Figure 3: The poses used on this basic *Experimental Suite* example. Poses are + >Figure 3: The poses used on this basic *Experiment Suite* example. Poses are a tuple of start and end position of a goal-directed episode. Start positions are shown in Blue, end positions in Red. Left: Straight poses, where the goal is just straight away from the start position. Middle: One turn @@ -157,7 +162,7 @@ vector as we show in the following code excerpt: ``` experiments_vector = [] - for weather in self.weathers: + for weather in used_weathers: for iteration in range(len(poses_tasks)): poses = poses_tasks[iteration] diff --git a/Docs/benchmark_metrics.md b/Docs/benchmark_metrics.md index 06a382835..78d75ed02 100644 --- a/Docs/benchmark_metrics.md +++ b/Docs/benchmark_metrics.md @@ -2,11 +2,20 @@ Driving Benchmark Performance Metrics ------------------------------ +This page explains the performance metrics module. +Use to compute a summary of results based on the agent +actions when completing the experiments. -The benchmark module provides the following performance metrics, which -are related to infraction: +### Provided performance metrics +The driving benchmark performance metrics module provides the following performance metrics: + +* Percentage of Success: The percentage of episodes (poses from tasks), +that the agent successfully completed. + +* Average Completion: The average distance towards the goal that the +agent was able to travel. * Off Road Intersection: The number of times the agent goes out of the road. The intersection is only counted if the area of the vehicle outside @@ -26,16 +35,21 @@ are related to infraction: objects with an impact bigger than a *threshold*. -These results can be computed with the metrics module, by using the following -code excerpt: +### Executing and Setting Parameters + +The metrics are computed as the final step of the benchmark +and it is returned by the [benchmark_agent()](benchmark_creating.md). +Internally it is executed as follows: metrics_object = Metrics(metrics_parameters) summary_dictionary = metrics_object.compute(path_to_execution_log) -The class is instanced with a dictionary of parameters. -These parameters could be changed by changing -The function receives the full path to the execution log and a dictionary with -parameters. It returns a dictionary with the metrics. +The performance metric compute function +receives the full path to the execution log +and dictionary with the performance metrics. + +Also, the metric class is instanced with the metric parameters. + The parameters are: * Threshold: The threshold used by the metrics. @@ -44,20 +58,43 @@ The parameters are: of frames that the agent needs to keep doing the infraction for it to be counted as another infraction. -*Frames Skip: It is related to the number of frames that are +* Frames Skip: It is related to the number of frames that are skipped after a collision or a intersection starts. +These parameters are defined as property of the *Experiment Suite* +base class and can be redefined at your +[custom *Experiment Suite*](benchmark_creating.md/#defining-the-experiment-suite). + +The default defined parameters are: -On your experiment suite class definition you could also -redefine the metrics experiment. + @property + def metrics_parameters(self): + """ + Property to return the parameters for the metric module + Could be redefined depending on the needs of the user. + """ + return { + 'intersection_offroad': {'frames_skip': 10, + 'frames_recount': 20, + 'threshold': 0.3 + }, + 'intersection_otherlane': {'frames_skip': 10, + 'frames_recount': 20, + 'threshold': 0.4 + }, + 'collision_other': {'frames_skip': 10, + 'frames_recount': 20, + 'threshold': 400 + }, + 'collision_vehicles': {'frames_skip': 10, + 'frames_recount': 30, + 'threshold': 400 + }, + 'collision_pedestrians': {'frames_skip': 5, + 'frames_recount': 100, + 'threshold': 300 + }, -####Benchmark Execution - - -During the execution the benchmark module stores -the [measurements](measurements.md) and - [controls](measurements.md) for every single step. - These results are stored on the *_benchmarks_results* - folder. + } diff --git a/Docs/benchmark_start.md b/Docs/benchmark_start.md index 5b1fde76b..7d5d20180 100644 --- a/Docs/benchmark_start.md +++ b/Docs/benchmark_start.md @@ -51,7 +51,8 @@ Run the help command to see options available. When running the driving benchmark for the basic configuration -you should [expect the following results](benchmark_creating.md/#expected-results). +you should [expect the following results](benchmark_creating.md/#expected-results) +to be printed on the terminal. diff --git a/PythonClient/carla/driving_benchmark/experiment_suite/__init__.py b/PythonClient/carla/driving_benchmark/experiment_suite/__init__.py index a6d64bff8..dbbe633c1 100644 --- a/PythonClient/carla/driving_benchmark/experiment_suite/__init__.py +++ b/PythonClient/carla/driving_benchmark/experiment_suite/__init__.py @@ -1,2 +1,2 @@ -from .basic import Basic +from .basic_experiment_suite import BasicExperimentSuite from .corl_2017 import CoRL2017 diff --git a/PythonClient/carla/driving_benchmark/experiment_suite/basic.py b/PythonClient/carla/driving_benchmark/experiment_suite/basic_experiment_suite.py similarity index 98% rename from PythonClient/carla/driving_benchmark/experiment_suite/basic.py rename to PythonClient/carla/driving_benchmark/experiment_suite/basic_experiment_suite.py index 379dbdedd..4ffeb060f 100644 --- a/PythonClient/carla/driving_benchmark/experiment_suite/basic.py +++ b/PythonClient/carla/driving_benchmark/experiment_suite/basic_experiment_suite.py @@ -14,7 +14,7 @@ from carla.settings import CarlaSettings from .experiment_suite import ExperimentSuite -class Basic(ExperimentSuite): +class BasicExperimentSuite(ExperimentSuite): @property def train_weathers(self): diff --git a/PythonClient/driving_benchmark_example.py b/PythonClient/driving_benchmark_example.py index d922cea33..e8b935f8d 100755 --- a/PythonClient/driving_benchmark_example.py +++ b/PythonClient/driving_benchmark_example.py @@ -11,7 +11,7 @@ import logging from carla.driving_benchmark import run_driving_benchmark from carla.driving_benchmark.experiment_suite import CoRL2017 -from carla.driving_benchmark.experiment_suite import Basic +from carla.driving_benchmark.experiment_suite import BasicExperimentSuite from carla.agent import ForwardAgent @@ -85,7 +85,7 @@ if __name__ == '__main__': else: print ' WARNING: running the basic driving benchmark, to run the CORL 2017, you should run' \ ' python driving_benchmark_example.py --corld-2017' - experiment_suite = Basic(args.city_name) + experiment_suite = BasicExperimentSuite(args.city_name) # Now actually run the driving_benchmark run_driving_benchmark(agent, experiment_suite, args.city_name, diff --git a/PythonClient/test/acceptance_tests/test_benchmark_module.py b/PythonClient/test/acceptance_tests/test_benchmark_module.py index 4641d3756..0dec56f5b 100644 --- a/PythonClient/test/acceptance_tests/test_benchmark_module.py +++ b/PythonClient/test/acceptance_tests/test_benchmark_module.py @@ -18,7 +18,7 @@ import unittest from carla.agent.agent import Agent -from carla.driving_benchmark.experiment_suite.basic import Basic +from carla.driving_benchmark.experiment_suite.basic_experiment_suite import BasicExperimentSuite from carla.client import make_carla_client, VehicleControl from carla.tcp import TCPConnectionError diff --git a/PythonClient/test/unit_tests/test_experiment_suite.py b/PythonClient/test/unit_tests/test_experiment_suite.py index 57cbf9762..9295277ad 100644 --- a/PythonClient/test/unit_tests/test_experiment_suite.py +++ b/PythonClient/test/unit_tests/test_experiment_suite.py @@ -1,7 +1,7 @@ import unittest from carla.driving_benchmark.experiment_suite.experiment_suite import ExperimentSuite -from carla.driving_benchmark.experiment_suite.basic import Basic +from carla.driving_benchmark.experiment_suite.basic_experiment_suite import BasicExperimentSuite from carla.driving_benchmark.experiment_suite.corl_2017 import CoRL2017