refactoring, names and functions

This commit is contained in:
felipecode 2018-04-09 18:52:25 +02:00
commit ee4b9d44b7
23 changed files with 267 additions and 262 deletions

View File

@ -1,4 +1,4 @@
----- Printing results for training weathers (Seen in Training) -----
Shows the results for test and train weathers, for this case they are the same.
Percentage of Successful Episodes
@ -87,95 +87,3 @@
Task 2 -> 0.008890562810079442
Task 3 -> 0.19999741925565595
----- Printing results for test weathers (Unseen in Training) -----
Percentage of Successful Episodes
Weather: Clear Noon
Task: 0 -> 1.0
Task: 1 -> 0.0
Task: 2 -> 0.0
Task: 3 -> 0.0
Average Between Weathers
Task 0 -> 1.0
Task 1 -> 0.0
Task 2 -> 0.0
Task 3 -> 0.0
Average Percentage of Distance to Goal Travelled
Weather: Clear Noon
Task: 0 -> 0.9642745353103807
Task: 1 -> 0.6793899335407552
Task: 2 -> 0.6593335145871936
Task: 3 -> 0.6696988482193544
Average Between Weathers
Task 0 -> 0.9642745353103807
Task 1 -> 0.6793899335407552
Task 2 -> 0.6593335145871936
Task 3 -> 0.6696988482193544
Avg. Kilometers driven before a collision to a PEDESTRIAN
Weather: Clear Noon
Task 0 -> more than 0.043162963265066225
Task 1 -> more than 0.1235917529074286
Task 2 -> more than 0.24004519587214496
Task 3 -> more than 0.19999741925565595
Average Between Weathers
More than 0 -> 0.043162963265066225
More than 1 -> 0.1235917529074286
More than 2 -> 0.24004519587214496
More than 3 -> 0.19999741925565595
Avg. Kilometers driven before a collision to a VEHICLE
Weather: Clear Noon
Task 0 -> more than 0.043162963265066225
Task 1 -> more than 0.1235917529074286
Task 2 -> more than 0.24004519587214496
Task 3 -> 0.03999948385113119
Average Between Weathers
More than 0 -> 0.043162963265066225
More than 1 -> 0.1235917529074286
More than 2 -> 0.24004519587214496
Task 3 -> 0.03999948385113119
Avg. Kilometers driven before a collision to a STATIC OBSTACLE
Weather: Clear Noon
Task 0 -> more than 0.043162963265066225
Task 1 -> more than 0.1235917529074286
Task 2 -> more than 0.24004519587214496
Task 3 -> 0.19999741925565595
Average Between Weathers
More than 0 -> 0.043162963265066225
More than 1 -> 0.1235917529074286
More than 2 -> 0.24004519587214496
Task 3 -> 0.19999741925565595
Avg. Kilometers driven before going OUTSIDE OF THE ROAD
Weather: Clear Noon
Task 0 -> more than 0.043162963265066225
Task 1 -> more than 0.1235917529074286
Task 2 -> more than 0.24004519587214496
Task 3 -> more than 0.19999741925565595
Average Between Weathers
More than 0 -> 0.043162963265066225
More than 1 -> 0.1235917529074286
More than 2 -> 0.24004519587214496
More than 3 -> 0.19999741925565595
Avg. Kilometers driven before invading the OPPOSITE LANE
Weather: Clear Noon
Task 0 -> more than 0.043162963265066225
Task 1 -> 0.1235917529074286
Task 2 -> 0.008890562810079442
Task 3 -> 0.19999741925565595
Average Between Weathers
More than 0 -> 0.043162963265066225
Task 1 -> 0.1235917529074286
Task 2 -> 0.008890562810079442
Task 3 -> 0.19999741925565595

View File

@ -0,0 +1,89 @@
Shows the results for test and train weathers, for this case they are the same.
Percentage of Successful Episodes
Weather: Clear Noon
Task: 0 -> 1.0
Task: 1 -> 0.0
Task: 2 -> 0.0
Task: 3 -> 0.0
Average Between Weathers
Task 0 -> 1.0
Task 1 -> 0.0
Task 2 -> 0.0
Task 3 -> 0.0
Average Percentage of Distance to Goal Travelled
Weather: Clear Noon
Task: 0 -> 0.8118712628185436
Task: 1 -> 0.10663609976621982
Task: 2 -> -0.20449201041073187
Task: 3 -> -0.20447574648836767
Average Between Weathers
Task 0 -> 0.8118712628185436
Task 1 -> 0.10663609976621982
Task 2 -> -0.20449201041073187
Task 3 -> -0.20447574648836767
Avg. Kilometers driven before a collision to a PEDESTRIAN
Weather: Clear Noon
Task 0 -> more than 0.007091656691537298
Task 1 -> more than 0.038524663402911156
Task 2 -> more than 0.039421759075344356
Task 3 -> more than 0.03941413920457633
Average Between Weathers
More than 0 -> 0.007091656691537298
More than 1 -> 0.038524663402911156
More than 2 -> 0.039421759075344356
More than 3 -> 0.03941413920457633
Avg. Kilometers driven before a collision to a VEHICLE
Weather: Clear Noon
Task 0 -> more than 0.007091656691537298
Task 1 -> more than 0.038524663402911156
Task 2 -> more than 0.039421759075344356
Task 3 -> more than 0.03941413920457633
Average Between Weathers
More than 0 -> 0.007091656691537298
More than 1 -> 0.038524663402911156
More than 2 -> 0.039421759075344356
More than 3 -> 0.03941413920457633
Avg. Kilometers driven before a collision to a STATIC OBSTACLE
Weather: Clear Noon
Task 0 -> more than 0.007091656691537298
Task 1 -> more than 0.038524663402911156
Task 2 -> more than 0.039421759075344356
Task 3 -> 0.019707069602288164
Average Between Weathers
More than 0 -> 0.007091656691537298
More than 1 -> 0.038524663402911156
More than 2 -> 0.039421759075344356
Task 3 -> 0.019707069602288164
Avg. Kilometers driven before going OUTSIDE OF THE ROAD
Weather: Clear Noon
Task 0 -> more than 0.007091656691537298
Task 1 -> 0.038524663402911156
Task 2 -> 0.039421759075344356
Task 3 -> 0.03941413920457633
Average Between Weathers
More than 0 -> 0.007091656691537298
Task 1 -> 0.038524663402911156
Task 2 -> 0.039421759075344356
Task 3 -> 0.03941413920457633
Avg. Kilometers driven before invading the OPPOSITE LANE
Weather: Clear Noon
Task 0 -> more than 0.007091656691537298
Task 1 -> 0.038524663402911156
Task 2 -> 0.039421759075344356
Task 3 -> 0.03941413920457633
Average Between Weathers
More than 0 -> 0.007091656691537298
Task 1 -> 0.038524663402911156
Task 2 -> 0.039421759075344356
Task 3 -> 0.03941413920457633

View File

@ -1,146 +0,0 @@
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import argparse
import logging
import time
from carla.client import make_carla_client
from carla.tcp import TCPConnectionError
from carla.settings import CarlaSettings
from carla.driving_benchmark.driving_benchmark import DrivingBenchmark
from carla.agent.forward_agent import ForwardAgent
from carla.driving_benchmark.experiment_suite.corl_2017 import CoRL2017
from carla.driving_benchmark.experiment_suite.basic import Basic
import carla.driving_benchmark.results_printer as results_printer
def run_benchmark(full_benchmark, city_name, log_name, continue_experiment):
while True:
try:
with make_carla_client(args.host, args.port) as client:
# Hack to fix for the issue 310, we force a reset, so it does not get
# the positions on first server reset.
client.load_settings(CarlaSettings())
client.start_episode(0)
# We instantiate a forward agent, a simple policy that just set
# acceleration as 0.9 and steering as zero
agent = ForwardAgent()
# We instantiate an experiment suite. Basically a set of experiments
# that are going to be evaluated on this benchmark.
if full_benchmark:
experiment_suite = CoRL2017(city_name)
else:
experiment_suite = Basic(city_name)
# We instantiate the driving benchmark, that is the engine used to
# benchmark an agent. The instantiation starts the log process, sets
# the city and log name.
benchmark = DrivingBenchmark(city_name=city_name,
name_to_save=log_name
+ type(experiment_suite).__name__
+ '_' + city_name,
continue_experiment=continue_experiment)
# This function performs the benchmark. It returns a dictionary summarizing
# the entire execution.
benchmark_summary = benchmark.benchmark_agent(experiment_suite, agent, client)
print("")
print("")
print("----- Printing results for training weathers (Seen in Training) -----")
print("")
print("")
results_printer.print_summary(benchmark_summary, experiment_suite.train_weathers,
benchmark.get_path())
print("")
print("")
print("----- Printing results for test weathers (Unseen in Training) -----")
print("")
print("")
results_printer.print_summary(benchmark_summary, experiment_suite.test_weathers,
benchmark.get_path())
break
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='verbose',
help='print some extra status information')
argparser.add_argument(
'-db', '--debug',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='localhost',
help='IP of the host server (default: localhost)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-c', '--city-name',
metavar='C',
default='Town01',
help='The town that is going to be used on benchmark'
+ '(needs to match active town in server, options: Town01 or Town02)')
argparser.add_argument(
'-n', '--log_name',
metavar='T',
default='test',
help='The name of the log file to be created by the benchmark'
)
argparser.add_argument(
'--corl-2017',
action='store_true',
help='If you want to benchmark the corl-2017 instead of the Basic one'
)
argparser.add_argument(
'--continue-experiment',
action='store_true',
help='If you want to continue the experiment with the same name'
)
args = argparser.parse_args()
if args.debug:
log_level = logging.DEBUG
elif args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
run_benchmark(args.corl_2017, args.city_name, args.log_name, args.continue_experiment)

View File

@ -0,0 +1 @@
from .forward_agent import ForwardAgent

View File

@ -3,7 +3,7 @@ from carla.agent.agent import Agent
from carla.client import VehicleControl
class Forward(Agent):
class ForwardAgent(Agent):
"""
Simple derivation of Agent Class,
A trivial agent agent that goes straight

View File

@ -0,0 +1 @@
from .driving_benchmark import run_driving_benchmark

View File

@ -11,19 +11,24 @@ import math
import abc
import logging
from .recording import Recording
from carla.client import make_carla_client
from carla.tcp import TCPConnectionError
from carla.settings import CarlaSettings
from carla.planner.planner import Planner
from carla.client import VehicleControl
from carla.agent_benchmark.metrics import Metrics
from carla.driving_benchmark.metrics import Metrics
from .recording import Recording
from . import results_printer
def sldist(c1, c2):
return math.sqrt((c2[0] - c1[0]) ** 2 + (c2[1] - c1[1]) ** 2)
class AgentBenchmark(object):
class DrivingBenchmark(object):
"""
The Benchmark class, controls the execution of the benchmark interfacing
an Agent class with a set Suite.
@ -265,3 +270,57 @@ class AgentBenchmark(object):
return 0, measurement_vec, control_vec, time_out, distance
def run_driving_benchmark(agent,
experiment_suite,
city_name='Town01',
log_name='Test',
continue_experiment=False,
host='127.0.0.1',
port=2000
):
while True:
try:
with make_carla_client(host, port) as client:
# Hack to fix for the issue 310, we force a reset, so it does not get
# the positions on first server reset.
client.load_settings(CarlaSettings())
client.start_episode(0)
# We instantiate the driving benchmark, that is the engine used to
# benchmark an agent. The instantiation starts the log process, sets
benchmark = DrivingBenchmark(city_name=city_name,
name_to_save=log_name
+ type(experiment_suite).__name__
+ '_' + city_name,
continue_experiment=continue_experiment)
# This function performs the benchmark. It returns a dictionary summarizing
# the entire execution.
benchmark_summary = benchmark.benchmark_agent(experiment_suite, agent, client)
print("")
print("")
print("----- Printing results for training weathers (Seen in Training) -----")
print("")
print("")
results_printer.print_summary(benchmark_summary, experiment_suite.train_weathers,
benchmark.get_path())
print("")
print("")
print("----- Printing results for test weathers (Unseen in Training) -----")
print("")
print("")
results_printer.print_summary(benchmark_summary, experiment_suite.test_weathers,
benchmark.get_path())
break
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)

View File

@ -0,0 +1,2 @@
from .basic import Basic
from .corl_2017 import CoRL2017

View File

@ -7,7 +7,7 @@
from __future__ import print_function
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
from carla.sensor import Camera
from carla.settings import CarlaSettings

View File

@ -9,10 +9,10 @@
from __future__ import print_function
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
from carla.sensor import Camera
from carla.settings import CarlaSettings
from carla.agent_benchmark.experiment_suite.experiment_suite import ExperimentSuite
from carla.driving_benchmark.experiment_suite.experiment_suite import ExperimentSuite

View File

@ -0,0 +1,91 @@
#!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import argparse
import logging
from carla.driving_benchmark import run_driving_benchmark
from carla.driving_benchmark.experiment_suite import CoRL2017
from carla.driving_benchmark.experiment_suite import Basic
from carla.agent import ForwardAgent
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='verbose',
help='print some extra status information')
argparser.add_argument(
'-db', '--debug',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='localhost',
help='IP of the host server (default: localhost)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-c', '--city-name',
metavar='C',
default='Town01',
help='The town that is going to be used on benchmark'
+ '(needs to match active town in server, options: Town01 or Town02)')
argparser.add_argument(
'-n', '--log_name',
metavar='T',
default='test',
help='The name of the log file to be created by the benchmark'
)
argparser.add_argument(
'--corl-2017',
action='store_true',
help='If you want to benchmark the corl-2017 instead of the Basic one'
)
argparser.add_argument(
'--continue-experiment',
action='store_true',
help='If you want to continue the experiment with the same name'
)
args = argparser.parse_args()
if args.debug:
log_level = logging.DEBUG
elif args.verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
# We instantiate a forward agent, a simple policy that just set
# acceleration as 0.9 and steering as zero
agent = ForwardAgent()
# We instantiate an experiment suite. Basically a set of experiments
# that are going to be evaluated on this benchmark.
if args.corl_2017:
experiment_suite = CoRL2017(args.city_name)
else:
experiment_suite = Basic(args.city_name)
# Now actually run the agent_benchmark
run_driving_benchmark(agent, experiment_suite, args.city_name,
args.log_name, args.continue_experiment,
args.host, args.port)

View File

@ -5,7 +5,7 @@ from setuptools import setup
setup(
name='carla_client',
version='0.8.1',
packages=['carla', 'carla.agent_benchmark', 'carla.planner'],
packages=['carla', 'carla.driving_benchmark', 'carla.planner'],
license='MIT License',
description='Python API for communicating with the CARLA server.',
url='https://github.com/carla-simulator/carla',

View File

@ -18,7 +18,7 @@ import unittest
from carla.agent.agent import Agent
from carla.agent_benchmark.experiment_suite.basic import Basic
from carla.driving_benchmark.experiment_suite.basic import Basic
from carla.client import make_carla_client, VehicleControl
from carla.tcp import TCPConnectionError

View File

@ -1,9 +1,9 @@
import unittest
from carla.agent_benchmark.experiment_suite.experiment_suite import ExperimentSuite
from carla.driving_benchmark.experiment_suite.experiment_suite import ExperimentSuite
from carla.agent_benchmark.experiment_suite.basic import Basic
from carla.driving_benchmark.experiment_suite.basic import Basic
from carla.agent_benchmark.experiment_suite.corl_2017 import CoRL2017
from carla.driving_benchmark.experiment_suite.corl_2017 import CoRL2017
class testExperimentSuite(unittest.TestCase):

View File

@ -1,8 +1,8 @@
import os
import numpy as np
import unittest
from carla.agent_benchmark.metrics import Metrics
from carla.agent_benchmark.recording import Recording
from carla.driving_benchmark.metrics import Metrics
from carla.driving_benchmark.recording import Recording
@ -54,7 +54,7 @@ class testMetrics(unittest.TestCase):
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
from carla.carla_server_pb2 import Measurements
from carla.carla_server_pb2 import Control

View File

@ -1,6 +1,6 @@
import unittest
from carla.agent_benchmark.recording import Recording
from carla.driving_benchmark.recording import Recording
class testRecording(unittest.TestCase):
@ -29,7 +29,7 @@ class testRecording(unittest.TestCase):
def test_write_summary_results(self):
import os
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
recording = Recording(name_to_save='Test1'
, continue_experiment=False, save_images=True
@ -58,7 +58,7 @@ class testRecording(unittest.TestCase):
def teste_write_measurements_results(self):
import os
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
from carla.carla_server_pb2 import Measurements
from carla.carla_server_pb2 import Control
@ -101,7 +101,7 @@ class testRecording(unittest.TestCase):
# If you don't want to continue, should return also one
self.assertEqual(recording._continue_experiment(False)[1], 1)
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
recording.write_summary_results(experiment=Experiment(), pose=[24, 32], rep=1,
path_distance=200, remaining_distance=0,
@ -126,7 +126,7 @@ class testRecording(unittest.TestCase):
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
pose, experiment = recording.get_pose_and_experiment(25)
@ -169,7 +169,7 @@ class testRecording(unittest.TestCase):
def test_get_pose_and_experiment_corner(self):
from carla.agent_benchmark.experiment import Experiment
from carla.driving_benchmark.experiment import Experiment
recording = Recording( name_to_save='Test1'
, continue_experiment=False, save_images=True