From 664b2113f24a0be726dd50f1dbed7bbe22e24887 Mon Sep 17 00:00:00 2001 From: glopezdiest Date: Thu, 31 Oct 2024 10:12:14 +0100 Subject: [PATCH] Improved old scripts and added new ones --- PythonAPI/examples/automatic_control.py | 62 +- PythonAPI/examples/client_bounding_boxes.py | 399 ----- PythonAPI/examples/draw_skeleton.py | 24 +- PythonAPI/examples/generate_traffic.py | 94 +- PythonAPI/examples/interpolate_camera.py | 208 +++ PythonAPI/examples/interpolation.xml | 16 + PythonAPI/{util => examples}/lane_explorer.py | 55 +- PythonAPI/examples/lidar_to_camera.py | 354 ---- PythonAPI/examples/manual_control.py | 42 +- .../examples/manual_control_with_traffic.py | 1590 +++++++++++++++++ PythonAPI/examples/no_rendering_mode.py | 54 +- PythonAPI/examples/open3d_lidar.py | 176 +- PythonAPI/examples/recorder_replay.py | 163 ++ PythonAPI/examples/sensor_synchronization.py | 11 - .../examples/show_recorder_collisions.py | 76 - PythonAPI/examples/start_recording.py | 158 -- PythonAPI/examples/synchronous_mode.py | 207 --- .../vehicle_doors_demo.py} | 24 +- PythonAPI/examples/vehicle_gallery.py | 27 +- .../vehicle_lights_demo.py} | 24 +- PythonAPI/examples/vehicle_physics.py | 142 -- .../examples/visualize_multiple_sensors.py | 65 +- PythonAPI/examples/visualize_radar.py | 133 ++ PythonAPI/{util => test}/test_connection.py | 24 +- .../test_raycast_sensor.py} | 82 +- .../test_vehicle_physics.py} | 11 - PythonAPI/util/apply_texture.py | 63 +- PythonAPI/util/change_map_layer.py | 26 +- PythonAPI/util/change_textures.py | 54 - PythonAPI/util/config.py | 264 +++ PythonAPI/util/extract_spawn_points.py | 105 +- PythonAPI/util/manage_environment_objects.py | 110 +- PythonAPI/util/manage_lights.py | 62 - PythonAPI/util/manage_traffic_light.py | 110 ++ PythonAPI/util/osm_to_xodr.py | 37 +- PythonAPI/util/performance_benchmark.py | 595 ------ PythonAPI/util/recorder_comparer.py | 133 ++ PythonAPI/util/show_crosswalks.py | 19 +- PythonAPI/util/show_junctions.py | 18 +- .../show_recorder_file_info.py | 31 +- PythonAPI/util/show_spawn_points.py | 76 - PythonAPI/util/show_topology.py | 14 +- PythonAPI/util/show_traffic_lights.py | 48 +- PythonAPI/util/spectator_transform.py | 83 + .../{examples => util}/start_replaying.py | 54 +- 45 files changed, 3151 insertions(+), 2972 deletions(-) delete mode 100755 PythonAPI/examples/client_bounding_boxes.py create mode 100755 PythonAPI/examples/interpolate_camera.py create mode 100755 PythonAPI/examples/interpolation.xml rename PythonAPI/{util => examples}/lane_explorer.py (86%) delete mode 100644 PythonAPI/examples/lidar_to_camera.py create mode 100644 PythonAPI/examples/manual_control_with_traffic.py create mode 100755 PythonAPI/examples/recorder_replay.py delete mode 100755 PythonAPI/examples/show_recorder_collisions.py delete mode 100755 PythonAPI/examples/start_recording.py delete mode 100755 PythonAPI/examples/synchronous_mode.py rename PythonAPI/{util/showcase_doors.py => examples/vehicle_doors_demo.py} (81%) rename PythonAPI/{util/showcase_vehicle_lights.py => examples/vehicle_lights_demo.py} (86%) delete mode 100644 PythonAPI/examples/vehicle_physics.py create mode 100644 PythonAPI/examples/visualize_radar.py rename PythonAPI/{util => test}/test_connection.py (71%) rename PythonAPI/{util/raycast_sensor_testing.py => test/test_raycast_sensor.py} (92%) rename PythonAPI/{util/vehicle_physics_tester.py => test/test_vehicle_physics.py} (98%) delete mode 100644 PythonAPI/util/change_textures.py create mode 100644 PythonAPI/util/config.py delete mode 100644 PythonAPI/util/manage_lights.py create mode 100644 PythonAPI/util/manage_traffic_light.py delete mode 100755 PythonAPI/util/performance_benchmark.py create mode 100755 PythonAPI/util/recorder_comparer.py rename PythonAPI/{examples => util}/show_recorder_file_info.py (69%) delete mode 100644 PythonAPI/util/show_spawn_points.py create mode 100755 PythonAPI/util/spectator_transform.py rename PythonAPI/{examples => util}/start_replaying.py (64%) diff --git a/PythonAPI/examples/automatic_control.py b/PythonAPI/examples/automatic_control.py index f177e0dc1..2dab0350a 100755 --- a/PythonAPI/examples/automatic_control.py +++ b/PythonAPI/examples/automatic_control.py @@ -13,7 +13,6 @@ from __future__ import print_function import argparse import collections import datetime -import glob import logging import math import os @@ -36,17 +35,6 @@ except ImportError: raise RuntimeError( 'cannot import numpy, make sure numpy package is installed') -# ============================================================================== -# -- Find CARLA module --------------------------------------------------------- -# ============================================================================== -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - # ============================================================================== # -- Add PythonAPI for release mode -------------------------------------------- # ============================================================================== @@ -807,60 +795,38 @@ def main(): argparser = argparse.ArgumentParser( description='CARLA Automatic Control Client') argparser.add_argument( - '-v', '--verbose', - action='store_true', - dest='debug', + '-v', '--verbose', action='store_true', dest='debug', help='Print debug information') argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '--res', - metavar='WIDTHxHEIGHT', - default='1280x720', + '--res', metavar='WIDTHxHEIGHT', default='1280x720', help='Window resolution (default: 1280x720)') argparser.add_argument( - '--sync', - action='store_true', + '--sync', action='store_true', help='Synchronous mode execution') argparser.add_argument( - '--filter', - metavar='PATTERN', - default='vehicle.*', + '--filter', metavar='PATTERN', default='vehicle.*', help='Actor filter (default: "vehicle.*")') argparser.add_argument( - '--generation', - metavar='G', - default='All', + '--generation', metavar='G', default='All', help='restrict to certain actor generation (values: "2","3","All" - default: "All")') argparser.add_argument( - '-l', '--loop', - action='store_true', - dest='loop', + '-l', '--loop', action='store_true', dest='loop', help='Sets a new random destination upon reaching the previous one (default: False)') argparser.add_argument( - "-a", "--agent", type=str, - choices=["Behavior", "Basic", "Constant"], - help="select which agent to run", - default="Behavior") + "-a", "--agent", type=str, choices=["Behavior", "Basic", "Constant"], default="Behavior", + help="select which agent to run") argparser.add_argument( - '-b', '--behavior', type=str, - choices=["cautious", "normal", "aggressive"], - help='Choose one of the possible agent behaviors (default: normal) ', - default='normal') + '-b', '--behavior', type=str, choices=["cautious", "normal", "aggressive"], default='normal', + help='Choose one of the possible agent behaviors (default: normal)') argparser.add_argument( - '-s', '--seed', - help='Set seed for repeating executions (default: None)', - default=None, - type=int) + '-s', '--seed', default=None, type=int, + help='Set seed for repeating executions (default: None)') args = argparser.parse_args() diff --git a/PythonAPI/examples/client_bounding_boxes.py b/PythonAPI/examples/client_bounding_boxes.py deleted file mode 100755 index 793d53c12..000000000 --- a/PythonAPI/examples/client_bounding_boxes.py +++ /dev/null @@ -1,399 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2019 Aptiv -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -""" -An example of client-side bounding boxes with basic car controls. - -Controls: - - W : throttle - S : brake - AD : steer - Space : hand-brake - - ESC : quit -""" - -# ============================================================================== -# -- find carla module --------------------------------------------------------- -# ============================================================================== - - -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - - -# ============================================================================== -# -- imports ------------------------------------------------------------------- -# ============================================================================== - -import carla - -import weakref -import random - -try: - import pygame - from pygame.locals import K_ESCAPE - from pygame.locals import K_SPACE - from pygame.locals import K_a - from pygame.locals import K_d - from pygame.locals import K_s - from pygame.locals import K_w -except ImportError: - raise RuntimeError('cannot import pygame, make sure pygame package is installed') - -try: - import numpy as np -except ImportError: - raise RuntimeError('cannot import numpy, make sure numpy package is installed') - -VIEW_WIDTH = 1920//2 -VIEW_HEIGHT = 1080//2 -VIEW_FOV = 90 - -BB_COLOR = (248, 64, 24) - -# ============================================================================== -# -- ClientSideBoundingBoxes --------------------------------------------------- -# ============================================================================== - - -class ClientSideBoundingBoxes(object): - """ - This is a module responsible for creating 3D bounding boxes and drawing them - client-side on pygame surface. - """ - - @staticmethod - def get_bounding_boxes(vehicles, camera): - """ - Creates 3D bounding boxes based on carla vehicle list and camera. - """ - - bounding_boxes = [ClientSideBoundingBoxes.get_bounding_box(vehicle, camera) for vehicle in vehicles] - # filter objects behind camera - bounding_boxes = [bb for bb in bounding_boxes if all(bb[:, 2] > 0)] - return bounding_boxes - - @staticmethod - def draw_bounding_boxes(display, bounding_boxes): - """ - Draws bounding boxes on pygame display. - """ - - bb_surface = pygame.Surface((VIEW_WIDTH, VIEW_HEIGHT)) - bb_surface.set_colorkey((0, 0, 0)) - for bbox in bounding_boxes: - points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)] - # draw lines - # base - pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1]) - pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1]) - pygame.draw.line(bb_surface, BB_COLOR, points[1], points[2]) - pygame.draw.line(bb_surface, BB_COLOR, points[2], points[3]) - pygame.draw.line(bb_surface, BB_COLOR, points[3], points[0]) - # top - pygame.draw.line(bb_surface, BB_COLOR, points[4], points[5]) - pygame.draw.line(bb_surface, BB_COLOR, points[5], points[6]) - pygame.draw.line(bb_surface, BB_COLOR, points[6], points[7]) - pygame.draw.line(bb_surface, BB_COLOR, points[7], points[4]) - # base-top - pygame.draw.line(bb_surface, BB_COLOR, points[0], points[4]) - pygame.draw.line(bb_surface, BB_COLOR, points[1], points[5]) - pygame.draw.line(bb_surface, BB_COLOR, points[2], points[6]) - pygame.draw.line(bb_surface, BB_COLOR, points[3], points[7]) - display.blit(bb_surface, (0, 0)) - - @staticmethod - def get_bounding_box(vehicle, camera): - """ - Returns 3D bounding box for a vehicle based on camera view. - """ - - bb_cords = ClientSideBoundingBoxes._create_bb_points(vehicle) - cords_x_y_z = ClientSideBoundingBoxes._vehicle_to_sensor(bb_cords, vehicle, camera)[:3, :] - cords_y_minus_z_x = np.concatenate([cords_x_y_z[1, :], -cords_x_y_z[2, :], cords_x_y_z[0, :]]) - bbox = np.transpose(np.dot(camera.calibration, cords_y_minus_z_x)) - camera_bbox = np.concatenate([bbox[:, 0] / bbox[:, 2], bbox[:, 1] / bbox[:, 2], bbox[:, 2]], axis=1) - return camera_bbox - - @staticmethod - def _create_bb_points(vehicle): - """ - Returns 3D bounding box for a vehicle. - """ - - cords = np.zeros((8, 4)) - extent = vehicle.bounding_box.extent - cords[0, :] = np.array([extent.x, extent.y, -extent.z, 1]) - cords[1, :] = np.array([-extent.x, extent.y, -extent.z, 1]) - cords[2, :] = np.array([-extent.x, -extent.y, -extent.z, 1]) - cords[3, :] = np.array([extent.x, -extent.y, -extent.z, 1]) - cords[4, :] = np.array([extent.x, extent.y, extent.z, 1]) - cords[5, :] = np.array([-extent.x, extent.y, extent.z, 1]) - cords[6, :] = np.array([-extent.x, -extent.y, extent.z, 1]) - cords[7, :] = np.array([extent.x, -extent.y, extent.z, 1]) - return cords - - @staticmethod - def _vehicle_to_sensor(cords, vehicle, sensor): - """ - Transforms coordinates of a vehicle bounding box to sensor. - """ - - world_cord = ClientSideBoundingBoxes._vehicle_to_world(cords, vehicle) - sensor_cord = ClientSideBoundingBoxes._world_to_sensor(world_cord, sensor) - return sensor_cord - - @staticmethod - def _vehicle_to_world(cords, vehicle): - """ - Transforms coordinates of a vehicle bounding box to world. - """ - - bb_transform = carla.Transform(vehicle.bounding_box.location) - bb_vehicle_matrix = ClientSideBoundingBoxes.get_matrix(bb_transform) - vehicle_world_matrix = ClientSideBoundingBoxes.get_matrix(vehicle.get_transform()) - bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix) - world_cords = np.dot(bb_world_matrix, np.transpose(cords)) - return world_cords - - @staticmethod - def _world_to_sensor(cords, sensor): - """ - Transforms world coordinates to sensor. - """ - - sensor_world_matrix = ClientSideBoundingBoxes.get_matrix(sensor.get_transform()) - world_sensor_matrix = np.linalg.inv(sensor_world_matrix) - sensor_cords = np.dot(world_sensor_matrix, cords) - return sensor_cords - - @staticmethod - def get_matrix(transform): - """ - Creates matrix from carla transform. - """ - - rotation = transform.rotation - location = transform.location - c_y = np.cos(np.radians(rotation.yaw)) - s_y = np.sin(np.radians(rotation.yaw)) - c_r = np.cos(np.radians(rotation.roll)) - s_r = np.sin(np.radians(rotation.roll)) - c_p = np.cos(np.radians(rotation.pitch)) - s_p = np.sin(np.radians(rotation.pitch)) - matrix = np.matrix(np.identity(4)) - matrix[0, 3] = location.x - matrix[1, 3] = location.y - matrix[2, 3] = location.z - matrix[0, 0] = c_p * c_y - matrix[0, 1] = c_y * s_p * s_r - s_y * c_r - matrix[0, 2] = -c_y * s_p * c_r - s_y * s_r - matrix[1, 0] = s_y * c_p - matrix[1, 1] = s_y * s_p * s_r + c_y * c_r - matrix[1, 2] = -s_y * s_p * c_r + c_y * s_r - matrix[2, 0] = s_p - matrix[2, 1] = -c_p * s_r - matrix[2, 2] = c_p * c_r - return matrix - - -# ============================================================================== -# -- BasicSynchronousClient ---------------------------------------------------- -# ============================================================================== - - -class BasicSynchronousClient(object): - """ - Basic implementation of a synchronous client. - """ - - def __init__(self): - self.client = None - self.world = None - self.camera = None - self.car = None - - self.display = None - self.image = None - self.capture = True - - def camera_blueprint(self): - """ - Returns camera blueprint. - """ - - camera_bp = self.world.get_blueprint_library().find('sensor.camera.rgb') - camera_bp.set_attribute('image_size_x', str(VIEW_WIDTH)) - camera_bp.set_attribute('image_size_y', str(VIEW_HEIGHT)) - camera_bp.set_attribute('fov', str(VIEW_FOV)) - return camera_bp - - def set_synchronous_mode(self, synchronous_mode): - """ - Sets synchronous mode. - """ - - settings = self.world.get_settings() - settings.synchronous_mode = synchronous_mode - self.world.apply_settings(settings) - - def setup_car(self): - """ - Spawns actor-vehicle to be controled. - """ - - car_bp = self.world.get_blueprint_library().filter('vehicle.*')[0] - location = random.choice(self.world.get_map().get_spawn_points()) - self.car = self.world.spawn_actor(car_bp, location) - - def setup_camera(self): - """ - Spawns actor-camera to be used to render view. - Sets calibration for client-side boxes rendering. - """ - - camera_transform = carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)) - self.camera = self.world.spawn_actor(self.camera_blueprint(), camera_transform, attach_to=self.car) - weak_self = weakref.ref(self) - self.camera.listen(lambda image: weak_self().set_image(weak_self, image)) - - calibration = np.identity(3) - calibration[0, 2] = VIEW_WIDTH / 2.0 - calibration[1, 2] = VIEW_HEIGHT / 2.0 - calibration[0, 0] = calibration[1, 1] = VIEW_WIDTH / (2.0 * np.tan(VIEW_FOV * np.pi / 360.0)) - self.camera.calibration = calibration - - def control(self, car): - """ - Applies control to main car based on pygame pressed keys. - Will return True If ESCAPE is hit, otherwise False to end main loop. - """ - - keys = pygame.key.get_pressed() - if keys[K_ESCAPE]: - return True - - control = car.get_control() - control.throttle = 0 - if keys[K_w]: - control.throttle = 1 - control.reverse = False - elif keys[K_s]: - control.throttle = 1 - control.reverse = True - if keys[K_a]: - control.steer = max(-1., min(control.steer - 0.05, 0)) - elif keys[K_d]: - control.steer = min(1., max(control.steer + 0.05, 0)) - else: - control.steer = 0 - control.hand_brake = keys[K_SPACE] - - car.apply_control(control) - return False - - @staticmethod - def set_image(weak_self, img): - """ - Sets image coming from camera sensor. - The self.capture flag is a mean of synchronization - once the flag is - set, next coming image will be stored. - """ - - self = weak_self() - if self.capture: - self.image = img - self.capture = False - - def render(self, display): - """ - Transforms image from camera sensor and blits it to main pygame display. - """ - - if self.image is not None: - array = np.frombuffer(self.image.raw_data, dtype=np.dtype("uint8")) - array = np.reshape(array, (self.image.height, self.image.width, 4)) - array = array[:, :, :3] - array = array[:, :, ::-1] - surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) - display.blit(surface, (0, 0)) - - def game_loop(self): - """ - Main program loop. - """ - - try: - pygame.init() - - self.client = carla.Client('127.0.0.1', 2000) - self.client.set_timeout(2.0) - self.world = self.client.get_world() - - self.setup_car() - self.setup_camera() - - self.display = pygame.display.set_mode((VIEW_WIDTH, VIEW_HEIGHT), pygame.HWSURFACE | pygame.DOUBLEBUF) - pygame_clock = pygame.time.Clock() - - self.set_synchronous_mode(True) - vehicles = self.world.get_actors().filter('vehicle.*') - - while True: - self.world.tick() - - self.capture = True - pygame_clock.tick_busy_loop(20) - - self.render(self.display) - bounding_boxes = ClientSideBoundingBoxes.get_bounding_boxes(vehicles, self.camera) - ClientSideBoundingBoxes.draw_bounding_boxes(self.display, bounding_boxes) - - pygame.display.flip() - - pygame.event.pump() - if self.control(self.car): - return - - finally: - self.set_synchronous_mode(False) - self.camera.destroy() - self.car.destroy() - pygame.quit() - - -# ============================================================================== -# -- main() -------------------------------------------------------------------- -# ============================================================================== - - -def main(): - """ - Initializes the client-side bounding box demo. - """ - - try: - client = BasicSynchronousClient() - client.game_loop() - finally: - print('EXIT') - - -if __name__ == '__main__': - main() diff --git a/PythonAPI/examples/draw_skeleton.py b/PythonAPI/examples/draw_skeleton.py index 1eb2302bb..1bdf18976 100644 --- a/PythonAPI/examples/draw_skeleton.py +++ b/PythonAPI/examples/draw_skeleton.py @@ -5,24 +5,12 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -import glob -import os -import sys import math import argparse import copy -import time from multiprocessing import Pool from PIL import Image -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla import random @@ -284,18 +272,12 @@ def write_image(frame, id, buffer): img.save('_out/%s_%06d.png' % (id, frame)) def main(): - argparser = argparse.ArgumentParser( - description='CARLA Manual Control Client') + argparser = argparse.ArgumentParser(description='Show a pedestrian skeleton') argparser.add_argument( - '--fov', - default=60, - type=int, + '--fov', default=60, type=int, help='FOV for camera') argparser.add_argument( - '--res', - metavar='WIDTHxHEIGHT', - # default='1920x1080', - default='800x600', + '--res', metavar='WIDTHxHEIGHT', default='800x600', help='window resolution (default: 800x600)') args = argparser.parse_args() diff --git a/PythonAPI/examples/generate_traffic.py b/PythonAPI/examples/generate_traffic.py index 551f412f1..44cdfe444 100755 --- a/PythonAPI/examples/generate_traffic.py +++ b/PythonAPI/examples/generate_traffic.py @@ -8,23 +8,10 @@ """Example script to generate traffic in the simulation""" -import glob -import os -import sys import time -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla -from carla import VehicleLightState as vls - import argparse import logging from numpy import random @@ -54,99 +41,60 @@ def get_actor_blueprints(world, filter, generation): return [] def main(): - argparser = argparse.ArgumentParser( - description=__doc__) + argparser = argparse.ArgumentParser(description=__doc__) argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '-n', '--number-of-vehicles', - metavar='N', - default=30, - type=int, + '-n', '--number-of-vehicles', metavar='N', default=30, type=int, help='Number of vehicles (default: 30)') argparser.add_argument( - '-w', '--number-of-walkers', - metavar='W', - default=10, - type=int, + '-w', '--number-of-walkers', metavar='W', default=10, type=int, help='Number of walkers (default: 10)') argparser.add_argument( - '--safe', - action='store_true', + '--safe', action='store_true', help='Avoid spawning vehicles prone to accidents') argparser.add_argument( - '--filterv', - metavar='PATTERN', - default='vehicle.*', + '--filterv', metavar='PATTERN', default='vehicle.*', help='Filter vehicle model (default: "vehicle.*")') argparser.add_argument( - '--generationv', - metavar='G', - default='All', + '--generationv', metavar='G', default='All', help='restrict to certain vehicle generation (values: "2","3","All" - default: "All")') argparser.add_argument( - '--filterw', - metavar='PATTERN', - default='walker.pedestrian.*', + '--filterw', metavar='PATTERN', default='walker.pedestrian.*', help='Filter pedestrian type (default: "walker.pedestrian.*")') argparser.add_argument( - '--generationw', - metavar='G', - default='All', + '--generationw', metavar='G', default='All', help='restrict to certain pedestrian generation (values: "2","3","All" - default: "All")') argparser.add_argument( - '--tm-port', - metavar='P', - default=8000, - type=int, + '--tm-port', metavar='P', default=8000, type=int, help='Port to communicate with TM (default: 8000)') argparser.add_argument( - '--asynch', - action='store_true', + '--asynch', action='store_true', help='Activate asynchronous mode execution') argparser.add_argument( - '--hybrid', - action='store_true', + '--hybrid', action='store_true', help='Activate hybrid mode for Traffic Manager') argparser.add_argument( - '-s', '--seed', - metavar='S', - type=int, + '-s', '--seed', metavar='S', type=int, help='Set random device seed and deterministic mode for Traffic Manager') argparser.add_argument( - '--seedw', - metavar='S', - default=0, - type=int, + '--seedw', metavar='S', default=0, type=int, help='Set the seed for pedestrians module') argparser.add_argument( - '--car-lights-on', - action='store_true', - default=False, + '--car-lights-on', action='store_true', default=False, help='Enable automatic car light management') argparser.add_argument( - '--hero', - action='store_true', - default=False, + '--hero', action='store_true', default=False, help='Set one of the vehicles as hero') argparser.add_argument( - '--respawn', - action='store_true', - default=False, + '--respawn', action='store_true', default=False, help='Automatically respawn dormant vehicles (only in large maps)') argparser.add_argument( - '--no-rendering', - action='store_true', - default=False, + '--no-rendering', action='store_true', default=False, help='Activate no rendering mode') args = argparser.parse_args() @@ -184,9 +132,7 @@ def main(): else: synchronous_master = False else: - print("You are currently in asynchronous mode. If this is a traffic simulation, \ - you could experience some issues. If it's not working correctly, switch to synchronous \ - mode by using traffic_manager.set_synchronous_mode(True)") + print("You are currently in asynchronous mode, and traffic might experience some issues") if args.no_rendering: settings.no_rendering_mode = True diff --git a/PythonAPI/examples/interpolate_camera.py b/PythonAPI/examples/interpolate_camera.py new file mode 100755 index 000000000..263b64e1d --- /dev/null +++ b/PythonAPI/examples/interpolate_camera.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python + +# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +import argparse + +import numpy as np +import matplotlib.pyplot as plt +from scipy.interpolate import interp1d + +import xml.etree.ElementTree as ET +import carla + +FPS = 30 +DELTA_SECONDS = 1.0 / FPS +LIFETIME = 50 + +def parse_key_points(file, id): + base_tree = ET.parse(file) + interpolation_key_points = [] + max_order = 0 + for real_tree in base_tree.iter("interpolations"): + for interpolation in real_tree.iter("interpolation"): + if interpolation.attrib.get('id', None) != id: + continue + + for point in interpolation.iter('point'): + time = float(point.attrib.get('time', 0.0)) + + x = float(point.attrib.get('x', 0.0)) + y = float(point.attrib.get('y', 0.0)) + z = float(point.attrib.get('z', 0.0)) + + pitch = float(point.attrib.get('pitch', 0.0)) + yaw = float(point.attrib.get('yaw', 0.0)) + roll = float(point.attrib.get('roll', 0.0)) + + interpolation_key_points.append([time, carla.Location(x=x, y=y, z=z), carla.Rotation(pitch=pitch, yaw=yaw, roll=roll)]) + max_order = int(interpolation.attrib.get('order', 3)) + + if not interpolation_key_points: + raise ValueError("Couldn't find the interpolation id") + return interpolation_key_points, max_order + +def interpolate_data(points, max_order): + def interpolate_element(time_points, elem_points, time_disc): + if len(time_points) > 3 and max_order >= 3: + inter_kind = 'cubic' + elif len(time_points) > 2 and max_order >= 2: + inter_kind = 'quadratic' + elif len(time_points) > 1 and max_order >= 1: + inter_kind = 'linear' + else: + inter_kind = 'zero' + + f_elem = interp1d(time_points, elem_points, kind=inter_kind) + elems = list(f_elem(time_disc)) + return elems + + def normalize_angles(angle_list): + """Interpoaltion will fail between angle jumps, so add / substract 360 as needed""" + for i in range(1, len(angle_list)): + while abs(angle_list[i-1] - angle_list[i]) > 180: + if angle_list[i-1] > angle_list[i]: + angle_list[i] += 360 + else: + angle_list[i] -= 360 + return angle_list + + def get_time_data(points): + """The given 'time' is the duration, so add it all together to get the time""" + time_list = [] + for item in points: + time_list.append(item[0] + (time_list[-1] if time_list else 0)) + return time_list + + # Separate the data + time_list = get_time_data(points) + x_list = [item[1].x for item in points] + y_list = [item[1].y for item in points] + z_list = [item[1].z for item in points] + roll_list = normalize_angles([item[2].roll for item in points]) + pitch_list = normalize_angles([item[2].pitch for item in points]) + yaw_list = normalize_angles([item[2].yaw for item in points]) + + # Discretize the time + time_disc = [] + for i in range(len(time_list) - 1): + current_time = time_list[i] + next_time = time_list[i+1] + time_disc.extend(list(np.arange(current_time, next_time, step=1/FPS))) + + # Interpolate the data + x_disc = interpolate_element(time_list, x_list, time_disc) + y_disc = interpolate_element(time_list, y_list, time_disc) + z_disc = interpolate_element(time_list, z_list, time_disc) + roll_disc = interpolate_element(time_list, roll_list, time_disc) + pitch_disc = interpolate_element(time_list, pitch_list, time_disc) + yaw_disc = interpolate_element(time_list, yaw_list, time_disc) + + # Join the data + data = [] + for i in range(len(time_disc)): + data.append(carla.Transform( + carla.Location(x=x_disc[i], y=y_disc[i], z=z_disc[i]), + carla.Rotation(roll=roll_disc[i], pitch=pitch_disc[i], yaw=yaw_disc[i]) + )) + + return data + +def main(): + + argparser = argparse.ArgumentParser( + description=__doc__) + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-f', '--file', required=True, + help='Interpolation file') + argparser.add_argument( + '--id', required=True, + help='Id to be run part of the interpolation file.') + argparser.add_argument( + '--debug', action='store_true', + help='Debug interpolation data.') + argparser.add_argument( + '-st', '--start-time', + default=0, type=int, help='Starting keypoint of the interpolation.') + argparser.add_argument( + '--sync', action='store_true', + help='Activates synchronous mode.') + args = argparser.parse_args() + + client = carla.Client('localhost', 2000) + world = client.get_world() + + try: + if args.sync: + original_settings = world.get_settings() + settings = world.get_settings() + settings.synchronous_mode = True + settings.fixed_delta_seconds = DELTA_SECONDS + world.apply_settings(settings) + + key_points, max_order = parse_key_points(args.file, args.id) + data = interpolate_data(key_points, max_order) + + try: + for _ in range(int(args.start_time * FPS)): + _ = data.pop(0) + except Exception: + pass + + if args.debug: + + # Draw the keypoints (and their data) + for point in key_points: + world.debug.draw_point(point[1], size=0.2, color=carla.Color(0, 0, 128), life_time=LIFETIME) + text = f"Keypoint: {point[0]}, " \ + f"{str(round(point[1].x, 2))}, " \ + f"{str(round(point[1].y, 2))}, " \ + f"{str(round(point[1].z, 2))}, " \ + f"{str(round(point[2].roll, 2))}, " \ + f"{str(round(point[2].yaw, 2))}, " \ + f"{str(round(point[2].pitch, 2))}" + world.debug.draw_string(point[1] + carla.Location(z=1), text, color=carla.Color(0, 128, 0), life_time=LIFETIME) + + # Calculate and draw the speed + for i in range(1, len(key_points)): + kp = key_points[i][1] + prev_kp = key_points[i-1][1] + middle_kp = carla.Location((kp.x + prev_kp.x)/2, (kp.y + prev_kp.y)/2, (kp.z + prev_kp.z)/2) + speed = kp.distance(prev_kp) / float(key_points[i][0]) + world.debug.draw_string(middle_kp + carla.Location(z=1), "Speed: " + str(speed), color=carla.Color(128, 0, 0), life_time=LIFETIME) + + # Draw all points + arrows showing their rotation + for i, transform in enumerate(data): + world.debug.draw_point(transform.location, size=0.05, life_time=LIFETIME, color=carla.Color(128, 0, 128)) + forward_vector = transform.get_forward_vector() + world.debug.draw_arrow(transform.location, transform.location + 2 * forward_vector, + thickness=0.03, arrow_size=0.03, life_time=LIFETIME, color=carla.Color(128, 0, 128)) + if i % 200 == 0: + world.tick() + + else: + spectator = world.get_spectator() + while len(data) > 0: + new_transform = data.pop(0) + spectator.set_transform(new_transform) + world.tick() + + except KeyboardInterrupt: + pass + + finally: + if args.sync: + world.apply_settings(original_settings) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/PythonAPI/examples/interpolation.xml b/PythonAPI/examples/interpolation.xml new file mode 100755 index 000000000..fb0a9f525 --- /dev/null +++ b/PythonAPI/examples/interpolation.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/PythonAPI/util/lane_explorer.py b/PythonAPI/examples/lane_explorer.py similarity index 86% rename from PythonAPI/util/lane_explorer.py rename to PythonAPI/examples/lane_explorer.py index 055b27151..3ba0e48fc 100755 --- a/PythonAPI/util/lane_explorer.py +++ b/PythonAPI/examples/lane_explorer.py @@ -6,18 +6,6 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla import argparse @@ -92,46 +80,28 @@ def draw_junction(debug, junction, l_time=10): def main(): argparser = argparse.ArgumentParser() argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '-i', '--info', - action='store_true', + '-i', '--info', action='store_true', help='Show text information') argparser.add_argument( - '-x', - default=0.0, - type=float, + '-x', default=0.0, type=float, help='X start position (default: 0.0)') argparser.add_argument( - '-y', - default=0.0, - type=float, + '-y', default=0.0, type=float, help='Y start position (default: 0.0)') argparser.add_argument( - '-z', - default=0.0, - type=float, + '-z', default=0.0, type=float, help='Z start position (default: 0.0)') argparser.add_argument( - '-s', '--seed', - metavar='S', - default=os.getpid(), - type=int, - help='Seed for the random path (default: program pid)') + '-s', '--seed', metavar='S', default=None, type=int, + help='Seed for the random path (default: None)') argparser.add_argument( - '-t', '--tick-time', - metavar='T', - default=0.2, - type=float, + '-t', '--tick-time', metavar='T', default=0.2, type=float, help='Tick time between updates (forward velocity) (default: 0.2)') args = argparser.parse_args() @@ -143,15 +113,12 @@ def main(): m = world.get_map() debug = world.debug - random.seed(args.seed) - print("Seed: ", args.seed) + if args.seed: + random.seed(args.seed) loc = carla.Location(args.x, args.y, args.z) - print("Initial location: ", loc) - current_w = m.get_waypoint(loc) - # main loop while True: # list of potential next waypoints potential_w = list(current_w.next(waypoint_separation)) diff --git a/PythonAPI/examples/lidar_to_camera.py b/PythonAPI/examples/lidar_to_camera.py deleted file mode 100644 index 468cf1766..000000000 --- a/PythonAPI/examples/lidar_to_camera.py +++ /dev/null @@ -1,354 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -""" -Lidar projection on RGB camera example -""" - -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - -import carla - -import argparse -from queue import Queue -from queue import Empty -from matplotlib import cm - -try: - import numpy as np -except ImportError: - raise RuntimeError('cannot import numpy, make sure numpy package is installed') - -try: - from PIL import Image -except ImportError: - raise RuntimeError('cannot import PIL, make sure "Pillow" package is installed') - -VIRIDIS = np.array(cm.get_cmap('viridis').colors) -VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0]) - -def sensor_callback(data, queue): - """ - This simple callback just stores the data on a thread safe Python Queue - to be retrieved from the "main thread". - """ - queue.put(data) - - -def tutorial(args): - """ - This function is intended to be a tutorial on how to retrieve data in a - synchronous way, and project 3D points from a lidar to a 2D camera. - """ - # Connect to the server - client = carla.Client(args.host, args.port) - client.set_timeout(2.0) - world = client.get_world() - bp_lib = world.get_blueprint_library() - - traffic_manager = client.get_trafficmanager(8000) - traffic_manager.set_synchronous_mode(True) - - original_settings = world.get_settings() - settings = world.get_settings() - settings.synchronous_mode = True - settings.fixed_delta_seconds = 3.0 - world.apply_settings(settings) - - vehicle = None - camera = None - lidar = None - - try: - if not os.path.isdir('_out'): - os.mkdir('_out') - # Search the desired blueprints - vehicle_bp = bp_lib.filter("vehicle.lincoln.mkz_2017")[0] - camera_bp = bp_lib.filter("sensor.camera.rgb")[0] - lidar_bp = bp_lib.filter("sensor.lidar.ray_cast")[0] - - # Configure the blueprints - camera_bp.set_attribute("image_size_x", str(args.width)) - camera_bp.set_attribute("image_size_y", str(args.height)) - - if args.no_noise: - lidar_bp.set_attribute('dropoff_general_rate', '0.0') - lidar_bp.set_attribute('dropoff_intensity_limit', '1.0') - lidar_bp.set_attribute('dropoff_zero_intensity', '0.0') - lidar_bp.set_attribute('upper_fov', str(args.upper_fov)) - lidar_bp.set_attribute('lower_fov', str(args.lower_fov)) - lidar_bp.set_attribute('channels', str(args.channels)) - lidar_bp.set_attribute('range', str(args.range)) - lidar_bp.set_attribute('points_per_second', str(args.points_per_second)) - - # Spawn the blueprints - vehicle = world.spawn_actor( - blueprint=vehicle_bp, - transform=world.get_map().get_spawn_points()[0]) - vehicle.set_autopilot(True) - camera = world.spawn_actor( - blueprint=camera_bp, - transform=carla.Transform(carla.Location(x=1.6, z=1.6)), - attach_to=vehicle) - lidar = world.spawn_actor( - blueprint=lidar_bp, - transform=carla.Transform(carla.Location(x=1.0, z=1.8)), - attach_to=vehicle) - - # Build the K projection matrix: - # K = [[Fx, 0, image_w/2], - # [ 0, Fy, image_h/2], - # [ 0, 0, 1]] - image_w = camera_bp.get_attribute("image_size_x").as_int() - image_h = camera_bp.get_attribute("image_size_y").as_int() - fov = camera_bp.get_attribute("fov").as_float() - focal = image_w / (2.0 * np.tan(fov * np.pi / 360.0)) - - # In this case Fx and Fy are the same since the pixel aspect - # ratio is 1 - K = np.identity(3) - K[0, 0] = K[1, 1] = focal - K[0, 2] = image_w / 2.0 - K[1, 2] = image_h / 2.0 - - # The sensor data will be saved in thread-safe Queues - image_queue = Queue() - lidar_queue = Queue() - - camera.listen(lambda data: sensor_callback(data, image_queue)) - lidar.listen(lambda data: sensor_callback(data, lidar_queue)) - - for frame in range(args.frames): - world.tick() - world_frame = world.get_snapshot().frame - - try: - # Get the data once it's received. - image_data = image_queue.get(True, 1.0) - lidar_data = lidar_queue.get(True, 1.0) - except Empty: - print("[Warning] Some sensor data has been missed") - continue - - assert image_data.frame == lidar_data.frame == world_frame - # At this point, we have the synchronized information from the 2 sensors. - sys.stdout.write("\r(%d/%d) Simulation: %d Camera: %d Lidar: %d" % - (frame, args.frames, world_frame, image_data.frame, lidar_data.frame) + ' ') - sys.stdout.flush() - - # Get the raw BGRA buffer and convert it to an array of RGB of - # shape (image_data.height, image_data.width, 3). - im_array = np.copy(np.frombuffer(image_data.raw_data, dtype=np.dtype("uint8"))) - im_array = np.reshape(im_array, (image_data.height, image_data.width, 4)) - im_array = im_array[:, :, :3][:, :, ::-1] - - # Get the lidar data and convert it to a numpy array. - p_cloud_size = len(lidar_data) - p_cloud = np.copy(np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))) - p_cloud = np.reshape(p_cloud, (p_cloud_size, 4)) - - # Lidar intensity array of shape (p_cloud_size,) but, for now, let's - # focus on the 3D points. - intensity = np.array(p_cloud[:, 3]) - - # Point cloud in lidar sensor space array of shape (3, p_cloud_size). - local_lidar_points = np.array(p_cloud[:, :3]).T - - # Add an extra 1.0 at the end of each 3d point so it becomes of - # shape (4, p_cloud_size) and it can be multiplied by a (4, 4) matrix. - local_lidar_points = np.r_[ - local_lidar_points, [np.ones(local_lidar_points.shape[1])]] - - # This (4, 4) matrix transforms the points from lidar space to world space. - lidar_2_world = lidar.get_transform().get_matrix() - - # Transform the points from lidar space to world space. - world_points = np.dot(lidar_2_world, local_lidar_points) - - # This (4, 4) matrix transforms the points from world to sensor coordinates. - world_2_camera = np.array(camera.get_transform().get_inverse_matrix()) - - # Transform the points from world space to camera space. - sensor_points = np.dot(world_2_camera, world_points) - - # New we must change from UE4's coordinate system to an "standard" - # camera coordinate system (the same used by OpenCV): - - # ^ z . z - # | / - # | to: +-------> x - # | . x | - # |/ | - # +-------> y v y - - # This can be achieved by multiplying by the following matrix: - # [[ 0, 1, 0 ], - # [ 0, 0, -1 ], - # [ 1, 0, 0 ]] - - # Or, in this case, is the same as swapping: - # (x, y ,z) -> (y, -z, x) - point_in_camera_coords = np.array([ - sensor_points[1], - sensor_points[2] * -1, - sensor_points[0]]) - - # Finally we can use our K matrix to do the actual 3D -> 2D. - points_2d = np.dot(K, point_in_camera_coords) - - # Remember to normalize the x, y values by the 3rd value. - points_2d = np.array([ - points_2d[0, :] / points_2d[2, :], - points_2d[1, :] / points_2d[2, :], - points_2d[2, :]]) - - # At this point, points_2d[0, :] contains all the x and points_2d[1, :] - # contains all the y values of our points. In order to properly - # visualize everything on a screen, the points that are out of the screen - # must be discarted, the same with points behind the camera projection plane. - points_2d = points_2d.T - intensity = intensity.T - points_in_canvas_mask = \ - (points_2d[:, 0] > 0.0) & (points_2d[:, 0] < image_w) & \ - (points_2d[:, 1] > 0.0) & (points_2d[:, 1] < image_h) & \ - (points_2d[:, 2] > 0.0) - points_2d = points_2d[points_in_canvas_mask] - intensity = intensity[points_in_canvas_mask] - - # Extract the screen coords (uv) as integers. - u_coord = points_2d[:, 0].astype(np.int) - v_coord = points_2d[:, 1].astype(np.int) - - # Since at the time of the creation of this script, the intensity function - # is returning high values, these are adjusted to be nicely visualized. - intensity = 4 * intensity - 3 - color_map = np.array([ - np.interp(intensity, VID_RANGE, VIRIDIS[:, 0]) * 255.0, - np.interp(intensity, VID_RANGE, VIRIDIS[:, 1]) * 255.0, - np.interp(intensity, VID_RANGE, VIRIDIS[:, 2]) * 255.0]).astype(np.int).T - - if args.dot_extent <= 0: - # Draw the 2d points on the image as a single pixel using numpy. - im_array[v_coord, u_coord] = color_map - else: - # Draw the 2d points on the image as squares of extent args.dot_extent. - for i in range(len(points_2d)): - # I'm not a NumPy expert and I don't know how to set bigger dots - # without using this loop, so if anyone has a better solution, - # make sure to update this script. Meanwhile, it's fast enough :) - im_array[ - v_coord[i]-args.dot_extent : v_coord[i]+args.dot_extent, - u_coord[i]-args.dot_extent : u_coord[i]+args.dot_extent] = color_map[i] - - # Save the image using Pillow module. - image = Image.fromarray(im_array) - image.save("_out/%08d.png" % image_data.frame) - - finally: - # Apply the original settings when exiting. - world.apply_settings(original_settings) - - # Destroy the actors in the scene. - if camera: - camera.destroy() - if lidar: - lidar.destroy() - if vehicle: - vehicle.destroy() - - -def main(): - """Start function""" - argparser = argparse.ArgumentParser( - description='CARLA Sensor sync and projection tutorial') - argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', - help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, - help='TCP port to listen to (default: 2000)') - argparser.add_argument( - '--res', - metavar='WIDTHxHEIGHT', - default='680x420', - help='window resolution (default: 1280x720)') - argparser.add_argument( - '-f', '--frames', - metavar='N', - default=500, - type=int, - help='number of frames to record (default: 500)') - argparser.add_argument( - '-d', '--dot-extent', - metavar='SIZE', - default=2, - type=int, - help='visualization dot extent in pixels (Recomended [1-4]) (default: 2)') - argparser.add_argument( - '--no-noise', - action='store_true', - help='remove the drop off and noise from the normal (non-semantic) lidar') - argparser.add_argument( - '--upper-fov', - metavar='F', - default=30.0, - type=float, - help='lidar\'s upper field of view in degrees (default: 15.0)') - argparser.add_argument( - '--lower-fov', - metavar='F', - default=-25.0, - type=float, - help='lidar\'s lower field of view in degrees (default: -25.0)') - argparser.add_argument( - '-c', '--channels', - metavar='C', - default=64.0, - type=float, - help='lidar\'s channel count (default: 64)') - argparser.add_argument( - '-r', '--range', - metavar='R', - default=100.0, - type=float, - help='lidar\'s maximum range in meters (default: 100.0)') - argparser.add_argument( - '--points-per-second', - metavar='N', - default='100000', - type=int, - help='lidar points per second (default: 100000)') - args = argparser.parse_args() - args.width, args.height = [int(x) for x in args.res.split('x')] - args.dot_extent -= 1 - - try: - tutorial(args) - - except KeyboardInterrupt: - print('\nCancelled by user. Bye!') - - -if __name__ == '__main__': - - main() diff --git a/PythonAPI/examples/manual_control.py b/PythonAPI/examples/manual_control.py index f7b7aabd0..4d9014243 100755 --- a/PythonAPI/examples/manual_control.py +++ b/PythonAPI/examples/manual_control.py @@ -1294,56 +1294,36 @@ def game_loop(args): def main(): - argparser = argparse.ArgumentParser( - description='CARLA Manual Control Client') + argparser = argparse.ArgumentParser(description='CARLA Manual Control Client') argparser.add_argument( - '-v', '--verbose', - action='store_true', - dest='debug', + '-v', '--verbose', action='store_true', dest='debug', help='print debug information') argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '-a', '--autopilot', - action='store_true', + '-a', '--autopilot', action='store_true', help='enable autopilot') argparser.add_argument( - '--res', - metavar='WIDTHxHEIGHT', - default='1280x720', + '--res', metavar='WIDTHxHEIGHT', default='1280x720', help='window resolution (default: 1280x720)') argparser.add_argument( - '--filter', - metavar='PATTERN', - default='vehicle.*', + '--filter', metavar='PATTERN', default='vehicle.*', help='actor filter (default: "vehicle.*")') argparser.add_argument( - '--generation', - metavar='G', - default='All', + '--generation', metavar='G', default='All', help='restrict to certain actor generation (values: "2","3","All" - default: "All")') argparser.add_argument( - '--rolename', - metavar='NAME', - default='hero', + '--rolename', metavar='NAME', default='hero', help='actor role name (default: "hero")') argparser.add_argument( - '--gamma', - default=1.0, - type=float, + '--gamma', default=1.0, type=float, help='Gamma correction of the camera (default: 1.0)') argparser.add_argument( - '--sync', - action='store_true', + '--sync', action='store_true', help='Activate synchronous mode execution') args = argparser.parse_args() diff --git a/PythonAPI/examples/manual_control_with_traffic.py b/PythonAPI/examples/manual_control_with_traffic.py new file mode 100644 index 000000000..04ffab14e --- /dev/null +++ b/PythonAPI/examples/manual_control_with_traffic.py @@ -0,0 +1,1590 @@ +#!/usr/bin/env python + +# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +# Allows controlling a vehicle with a keyboard. For a simpler and more +# documented example, please take a look at tutorial.py. + +""" +Welcome to CARLA manual control. + +Use ARROWS or WASD keys for control. + + W : throttle + S : brake + A/D : steer left/right + Q : toggle reverse + Space : hand-brake + P : toggle autopilot + M : toggle manual transmission + ,/. : gear up/down + CTRL + W : toggle constant velocity mode at 60 km/h + + L : toggle next light type + SHIFT + L : toggle high beam + Z/X : toggle right/left blinker + I : toggle interior light + + TAB : change sensor position + ` or N : next sensor + [1-9] : change to sensor [1-9] + G : toggle radar visualization + C : change weather (Shift+C reverse) + Backspace : change vehicle + + O : open/close all doors of vehicle + T : toggle vehicle's telemetry + + V : Select next map layer (Shift+V reverse) + B : Load current selected map layer (Shift+B to unload) + + R : toggle recording images to disk + + CTRL + R : toggle recording of simulation (replacing any previous) + CTRL + P : start replaying last recorded simulation + CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds) + CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds) + + F1 : toggle HUD + H/? : toggle help + ESC : quit +""" + +from __future__ import print_function + + +# ============================================================================== +# -- find carla module --------------------------------------------------------- +# ============================================================================== + +# ============================================================================== +# -- imports ------------------------------------------------------------------- +# ============================================================================== + +import carla + +from carla import ColorConverter as cc + +import argparse +import collections +import datetime +import logging +import math +import random +import re +import os +import weakref + +try: + import pygame + from pygame.locals import KMOD_CTRL + from pygame.locals import KMOD_SHIFT + from pygame.locals import K_0 + from pygame.locals import K_9 + from pygame.locals import K_BACKQUOTE + from pygame.locals import K_BACKSPACE + from pygame.locals import K_COMMA + from pygame.locals import K_DOWN + from pygame.locals import K_ESCAPE + from pygame.locals import K_F1 + from pygame.locals import K_LEFT + from pygame.locals import K_PERIOD + from pygame.locals import K_RIGHT + from pygame.locals import K_SLASH + from pygame.locals import K_SPACE + from pygame.locals import K_TAB + from pygame.locals import K_UP + from pygame.locals import K_a + from pygame.locals import K_b + from pygame.locals import K_c + from pygame.locals import K_d + from pygame.locals import K_f + from pygame.locals import K_g + from pygame.locals import K_h + from pygame.locals import K_i + from pygame.locals import K_l + from pygame.locals import K_m + from pygame.locals import K_n + from pygame.locals import K_o + from pygame.locals import K_p + from pygame.locals import K_q + from pygame.locals import K_r + from pygame.locals import K_s + from pygame.locals import K_t + from pygame.locals import K_v + from pygame.locals import K_w + from pygame.locals import K_x + from pygame.locals import K_z + from pygame.locals import K_MINUS + from pygame.locals import K_EQUALS +except ImportError: + raise RuntimeError('cannot import pygame, make sure pygame package is installed') + +try: + import numpy as np +except ImportError: + raise RuntimeError('cannot import numpy, make sure numpy package is installed') + + +# ============================================================================== +# -- Global functions ---------------------------------------------------------- +# ============================================================================== + + +def find_weather_presets(): + rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)') + name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x)) + presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)] + return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets] + + +def get_actor_display_name(actor, truncate=250): + name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:]) + return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name + +def get_actor_blueprints(world, filter, generation): + bps = world.get_blueprint_library().filter(filter) + + if generation.lower() == "all": + return bps + + # If the filter returns only one bp, we assume that this one needed + # and therefore, we ignore the generation + if len(bps) == 1: + return bps + + try: + int_generation = int(generation) + # Check if generation is in available generations + if int_generation in [1, 2, 3, 4]: + bps = [x for x in bps if int(x.get_attribute('generation')) == int_generation] + return bps + else: + print(" Warning! Actor Generation is not valid. No actor will be spawned.") + return [] + except: + print(" Warning! Actor Generation is not valid. No actor will be spawned.") + return [] + + +# ============================================================================== +# -- World --------------------------------------------------------------------- +# ============================================================================== + + +class World(object): + def __init__(self, carla_world, hud, args): + self.world = carla_world + self.sync = args.sync + self.actor_role_name = args.rolename + try: + self.map = self.world.get_map() + except RuntimeError as error: + print('RuntimeError: {}'.format(error)) + print(' The server could not send the OpenDRIVE (.xodr) file:') + print(' Make sure it exists, has the same name of your town, and is correct.') + sys.exit(1) + self.hud = hud + self.player = None + self.collision_sensor = None + self.lane_invasion_sensor = None + self.gnss_sensor = None + self.imu_sensor = None + self.radar_sensor = None + self.camera_manager = None + self._weather_presets = find_weather_presets() + self._weather_index = 0 + self._actor_filter = args.filter + self._actor_generation = args.generation + self._gamma = args.gamma + self.restart() + self.world.on_tick(hud.on_world_tick) + self.recording_enabled = False + self.recording_start = 0 + self.constant_velocity_enabled = False + self.show_vehicle_telemetry = False + self.doors_are_open = False + self.current_map_layer = 0 + self.map_layer_names = [ + carla.MapLayer.NONE, + carla.MapLayer.Buildings, + carla.MapLayer.Decals, + carla.MapLayer.Foliage, + carla.MapLayer.Ground, + carla.MapLayer.ParkedVehicles, + carla.MapLayer.Particles, + carla.MapLayer.Props, + carla.MapLayer.StreetLights, + carla.MapLayer.Walls, + carla.MapLayer.All + ] + + def restart(self): + self.player_max_speed = 1.589 + self.player_max_speed_fast = 3.713 + # Keep same camera config if the camera manager exists. + cam_index = self.camera_manager.index if self.camera_manager is not None else 0 + cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0 + # Get a random blueprint. + blueprint_list = get_actor_blueprints(self.world, self._actor_filter, self._actor_generation) + if self.args.safe: + blueprint_list = [x for x in blueprint_list if x.get_attribute('base_type') == 'car'] + if not blueprint_list: + raise ValueError("Couldn't find any blueprints with the specified filters") + blueprint = random.choice(blueprint_list) + blueprint.set_attribute('role_name', self.actor_role_name) + if blueprint.has_attribute('terramechanics'): + blueprint.set_attribute('terramechanics', 'true') + if blueprint.has_attribute('color'): + color = random.choice(blueprint.get_attribute('color').recommended_values) + blueprint.set_attribute('color', color) + if blueprint.has_attribute('driver_id'): + driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values) + blueprint.set_attribute('driver_id', driver_id) + if blueprint.has_attribute('is_invincible'): + blueprint.set_attribute('is_invincible', 'true') + # set the max speed + if blueprint.has_attribute('speed'): + self.player_max_speed = float(blueprint.get_attribute('speed').recommended_values[1]) + self.player_max_speed_fast = float(blueprint.get_attribute('speed').recommended_values[2]) + + # Spawn the player. + if self.player is not None: + spawn_point = self.player.get_transform() + spawn_point.location.z += 2.0 + spawn_point.rotation.roll = 0.0 + spawn_point.rotation.pitch = 0.0 + self.destroy() + self.player = self.world.try_spawn_actor(blueprint, spawn_point) + self.show_vehicle_telemetry = False + self.modify_vehicle_physics(self.player) + while self.player is None: + if not self.map.get_spawn_points(): + print('There are no spawn points available in your map/town.') + print('Please add some Vehicle Spawn Point to your UE5 scene.') + sys.exit(1) + spawn_points = self.map.get_spawn_points() + spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform() + self.player = self.world.try_spawn_actor(blueprint, spawn_point) + self.show_vehicle_telemetry = False + self.modify_vehicle_physics(self.player) + # Set up the sensors. + self.collision_sensor = CollisionSensor(self.player, self.hud) + self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud) + self.gnss_sensor = GnssSensor(self.player) + self.imu_sensor = IMUSensor(self.player) + self.camera_manager = CameraManager(self.player, self.hud, self._gamma) + self.camera_manager.transform_index = cam_pos_index + self.camera_manager.set_sensor(cam_index, notify=False) + actor_type = get_actor_display_name(self.player) + self.hud.notification(actor_type) + + if self.sync: + self.world.tick() + else: + self.world.wait_for_tick() + + def next_weather(self, reverse=False): + self._weather_index += -1 if reverse else 1 + self._weather_index %= len(self._weather_presets) + preset = self._weather_presets[self._weather_index] + self.hud.notification('Weather: %s' % preset[1]) + self.player.get_world().set_weather(preset[0]) + + def next_map_layer(self, reverse=False): + self.current_map_layer += -1 if reverse else 1 + self.current_map_layer %= len(self.map_layer_names) + selected = self.map_layer_names[self.current_map_layer] + self.hud.notification('LayerMap selected: %s' % selected) + + def load_map_layer(self, unload=False): + selected = self.map_layer_names[self.current_map_layer] + if unload: + self.hud.notification('Unloading map layer: %s' % selected) + self.world.unload_map_layer(selected) + else: + self.hud.notification('Loading map layer: %s' % selected) + self.world.load_map_layer(selected) + + def toggle_radar(self): + if self.radar_sensor is None: + self.radar_sensor = RadarSensor(self.player) + elif self.radar_sensor.sensor is not None: + self.radar_sensor.sensor.destroy() + self.radar_sensor = None + + def modify_vehicle_physics(self, actor): + #If actor is not a vehicle, we cannot use the physics control + try: + physics_control = actor.get_physics_control() + physics_control.use_sweep_wheel_collision = True + actor.apply_physics_control(physics_control) + except Exception: + pass + + def tick(self, clock): + self.hud.tick(self, clock) + + def render(self, display): + self.camera_manager.render(display) + self.hud.render(display) + + def destroy_sensors(self): + self.camera_manager.sensor.destroy() + self.camera_manager.sensor = None + self.camera_manager.index = None + + def destroy(self): + if self.radar_sensor is not None: + self.toggle_radar() + sensors = [ + self.camera_manager.sensor, + self.collision_sensor.sensor, + self.lane_invasion_sensor.sensor, + self.gnss_sensor.sensor, + self.imu_sensor.sensor] + for sensor in sensors: + if sensor is not None: + sensor.stop() + sensor.destroy() + if self.player is not None: + self.player.destroy() + + +# ============================================================================== +# -- KeyboardControl ----------------------------------------------------------- +# ============================================================================== + + +class KeyboardControl(object): + """Class that handles keyboard input.""" + def __init__(self, world, start_in_autopilot): + self._autopilot_enabled = start_in_autopilot + self._ackermann_enabled = False + self._ackermann_reverse = 1 + if isinstance(world.player, carla.Vehicle): + self._control = carla.VehicleControl() + self._ackermann_control = carla.VehicleAckermannControl() + self._lights = carla.VehicleLightState.NONE + world.player.set_autopilot(self._autopilot_enabled) + world.player.set_light_state(self._lights) + elif isinstance(world.player, carla.Walker): + self._control = carla.WalkerControl() + self._autopilot_enabled = False + self._rotation = world.player.get_transform().rotation + else: + raise NotImplementedError("Actor type not supported") + self._steer_cache = 0.0 + world.hud.notification("Press 'H' or '?' for help.", seconds=4.0) + + def parse_events(self, client, world, clock, sync_mode): + if isinstance(self._control, carla.VehicleControl): + current_lights = self._lights + for event in pygame.event.get(): + if event.type == pygame.QUIT: + return True + elif event.type == pygame.KEYUP: + if self._is_quit_shortcut(event.key): + return True + elif event.key == K_BACKSPACE: + if self._autopilot_enabled: + world.player.set_autopilot(False) + world.restart() + world.player.set_autopilot(True) + else: + world.restart() + elif event.key == K_F1: + world.hud.toggle_info() + elif event.key == K_v and pygame.key.get_mods() & KMOD_SHIFT: + world.next_map_layer(reverse=True) + elif event.key == K_v: + world.next_map_layer() + elif event.key == K_b and pygame.key.get_mods() & KMOD_SHIFT: + world.load_map_layer(unload=True) + elif event.key == K_b: + world.load_map_layer() + elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT): + world.hud.help.toggle() + elif event.key == K_TAB: + world.camera_manager.toggle_camera() + elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT: + world.next_weather(reverse=True) + elif event.key == K_c: + world.next_weather() + elif event.key == K_g: + world.toggle_radar() + elif event.key == K_BACKQUOTE: + world.camera_manager.next_sensor() + elif event.key == K_n: + world.camera_manager.next_sensor() + elif event.key == K_w and (pygame.key.get_mods() & KMOD_CTRL): + if world.constant_velocity_enabled: + world.player.disable_constant_velocity() + world.constant_velocity_enabled = False + world.hud.notification("Disabled Constant Velocity Mode") + else: + world.player.enable_constant_velocity(carla.Vector3D(17, 0, 0)) + world.constant_velocity_enabled = True + world.hud.notification("Enabled Constant Velocity Mode at 60 km/h") + elif event.key == K_o: + try: + if world.doors_are_open: + world.hud.notification("Closing Doors") + world.doors_are_open = False + world.player.close_door(carla.VehicleDoor.All) + else: + world.hud.notification("Opening doors") + world.doors_are_open = True + world.player.open_door(carla.VehicleDoor.All) + except Exception: + pass + elif event.key == K_t: + if world.show_vehicle_telemetry: + world.player.show_debug_telemetry(False) + world.show_vehicle_telemetry = False + world.hud.notification("Disabled Vehicle Telemetry") + else: + try: + world.player.show_debug_telemetry(True) + world.show_vehicle_telemetry = True + world.hud.notification("Enabled Vehicle Telemetry") + except Exception: + pass + elif event.key > K_0 and event.key <= K_9: + index_ctrl = 0 + if pygame.key.get_mods() & KMOD_CTRL: + index_ctrl = 9 + world.camera_manager.set_sensor(event.key - 1 - K_0 + index_ctrl) + elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL): + world.camera_manager.toggle_recording() + elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL): + if (world.recording_enabled): + client.stop_recorder() + world.recording_enabled = False + world.hud.notification("Recorder is OFF") + else: + client.start_recorder("manual_recording.rec") + world.recording_enabled = True + world.hud.notification("Recorder is ON") + elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL): + # stop recorder + client.stop_recorder() + world.recording_enabled = False + # work around to fix camera at start of replaying + current_index = world.camera_manager.index + world.destroy_sensors() + # disable autopilot + self._autopilot_enabled = False + world.player.set_autopilot(self._autopilot_enabled) + world.hud.notification("Replaying file 'manual_recording.rec'") + # replayer + client.replay_file("manual_recording.rec", world.recording_start, 0, 0) + world.camera_manager.set_sensor(current_index) + elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL): + if pygame.key.get_mods() & KMOD_SHIFT: + world.recording_start -= 10 + else: + world.recording_start -= 1 + world.hud.notification("Recording start time is %d" % (world.recording_start)) + elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL): + if pygame.key.get_mods() & KMOD_SHIFT: + world.recording_start += 10 + else: + world.recording_start += 1 + world.hud.notification("Recording start time is %d" % (world.recording_start)) + if isinstance(self._control, carla.VehicleControl): + if event.key == K_f: + # Toggle ackermann controller + self._ackermann_enabled = not self._ackermann_enabled + world.hud.show_ackermann_info(self._ackermann_enabled) + world.hud.notification("Ackermann Controller %s" % + ("Enabled" if self._ackermann_enabled else "Disabled")) + if event.key == K_q: + if not self._ackermann_enabled: + self._control.gear = 1 if self._control.reverse else -1 + else: + self._ackermann_reverse *= -1 + # Reset ackermann control + self._ackermann_control = carla.VehicleAckermannControl() + elif event.key == K_m: + self._control.manual_gear_shift = not self._control.manual_gear_shift + self._control.gear = world.player.get_control().gear + world.hud.notification('%s Transmission' % + ('Manual' if self._control.manual_gear_shift else 'Automatic')) + elif self._control.manual_gear_shift and event.key == K_COMMA: + self._control.gear = max(-1, self._control.gear - 1) + elif self._control.manual_gear_shift and event.key == K_PERIOD: + self._control.gear = self._control.gear + 1 + elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL: + if not self._autopilot_enabled and not sync_mode: + print("WARNING: You are currently in asynchronous mode and could " + "experience some issues with the traffic simulation") + self._autopilot_enabled = not self._autopilot_enabled + world.player.set_autopilot(self._autopilot_enabled) + world.hud.notification( + 'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off')) + elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL: + current_lights ^= carla.VehicleLightState.Special1 + elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT: + current_lights ^= carla.VehicleLightState.HighBeam + elif event.key == K_l: + # Use 'L' key to switch between lights: + # closed -> position -> low beam -> fog + if not self._lights & carla.VehicleLightState.Position: + world.hud.notification("Position lights") + current_lights |= carla.VehicleLightState.Position + else: + world.hud.notification("Low beam lights") + current_lights |= carla.VehicleLightState.LowBeam + if self._lights & carla.VehicleLightState.LowBeam: + world.hud.notification("Fog lights") + current_lights |= carla.VehicleLightState.Fog + if self._lights & carla.VehicleLightState.Fog: + world.hud.notification("Lights off") + current_lights ^= carla.VehicleLightState.Position + current_lights ^= carla.VehicleLightState.LowBeam + current_lights ^= carla.VehicleLightState.Fog + elif event.key == K_i: + current_lights ^= carla.VehicleLightState.Interior + elif event.key == K_z: + current_lights ^= carla.VehicleLightState.LeftBlinker + elif event.key == K_x: + current_lights ^= carla.VehicleLightState.RightBlinker + + if not self._autopilot_enabled: + if isinstance(self._control, carla.VehicleControl): + self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time()) + self._control.reverse = self._control.gear < 0 + # Set automatic control-related vehicle lights + if self._control.brake: + current_lights |= carla.VehicleLightState.Brake + else: # Remove the Brake flag + current_lights &= ~carla.VehicleLightState.Brake + if self._control.reverse: + current_lights |= carla.VehicleLightState.Reverse + else: # Remove the Reverse flag + current_lights &= ~carla.VehicleLightState.Reverse + if current_lights != self._lights: # Change the light state only if necessary + self._lights = current_lights + world.player.set_light_state(carla.VehicleLightState(self._lights)) + # Apply control + if not self._ackermann_enabled: + world.player.apply_control(self._control) + else: + world.player.apply_ackermann_control(self._ackermann_control) + # Update control to the last one applied by the ackermann controller. + self._control = world.player.get_control() + # Update hud with the newest ackermann control + world.hud.update_ackermann_control(self._ackermann_control) + + elif isinstance(self._control, carla.WalkerControl): + self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world) + world.player.apply_control(self._control) + + def _parse_vehicle_keys(self, keys, milliseconds): + if keys[K_UP] or keys[K_w]: + if not self._ackermann_enabled: + self._control.throttle = min(self._control.throttle + 0.1, 1.00) + else: + self._ackermann_control.speed += round(milliseconds * 0.005, 2) * self._ackermann_reverse + else: + if not self._ackermann_enabled: + self._control.throttle = 0.0 + + if keys[K_DOWN] or keys[K_s]: + if not self._ackermann_enabled: + self._control.brake = min(self._control.brake + 0.2, 1) + else: + self._ackermann_control.speed -= min(abs(self._ackermann_control.speed), round(milliseconds * 0.005, 2)) * self._ackermann_reverse + self._ackermann_control.speed = max(0, abs(self._ackermann_control.speed)) * self._ackermann_reverse + else: + if not self._ackermann_enabled: + self._control.brake = 0 + + steer_increment = 5e-4 * milliseconds + if keys[K_LEFT] or keys[K_a]: + if self._steer_cache > 0: + self._steer_cache = 0 + else: + self._steer_cache -= steer_increment + elif keys[K_RIGHT] or keys[K_d]: + if self._steer_cache < 0: + self._steer_cache = 0 + else: + self._steer_cache += steer_increment + else: + self._steer_cache = 0.0 + self._steer_cache = min(0.7, max(-0.7, self._steer_cache)) + if not self._ackermann_enabled: + self._control.steer = round(self._steer_cache, 1) + self._control.hand_brake = keys[K_SPACE] + else: + self._ackermann_control.steer = round(self._steer_cache, 1) + + def _parse_walker_keys(self, keys, milliseconds, world): + self._control.speed = 0.0 + if keys[K_DOWN] or keys[K_s]: + self._control.speed = 0.0 + if keys[K_LEFT] or keys[K_a]: + self._control.speed = .01 + self._rotation.yaw -= 0.08 * milliseconds + if keys[K_RIGHT] or keys[K_d]: + self._control.speed = .01 + self._rotation.yaw += 0.08 * milliseconds + if keys[K_UP] or keys[K_w]: + self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed + self._control.jump = keys[K_SPACE] + self._rotation.yaw = round(self._rotation.yaw, 1) + self._control.direction = self._rotation.get_forward_vector() + + @staticmethod + def _is_quit_shortcut(key): + return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL) + + +# ============================================================================== +# -- HUD ----------------------------------------------------------------------- +# ============================================================================== + + +class HUD(object): + def __init__(self, width, height): + self.dim = (width, height) + font = pygame.font.Font(pygame.font.get_default_font(), 20) + font_name = 'courier' if os.name == 'nt' else 'mono' + fonts = [x for x in pygame.font.get_fonts() if font_name in x] + default_font = 'ubuntumono' + mono = default_font if default_font in fonts else fonts[0] + mono = pygame.font.match_font(mono) + self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14) + self._notifications = FadingText(font, (width, 40), (0, height - 40)) + self.help = HelpText(pygame.font.Font(mono, 16), width, height) + self.server_fps = 0 + self.frame = 0 + self.simulation_time = 0 + self._show_info = True + self._info_text = [] + self._server_clock = pygame.time.Clock() + + self._show_ackermann_info = False + self._ackermann_control = carla.VehicleAckermannControl() + + def on_world_tick(self, timestamp): + self._server_clock.tick() + self.server_fps = self._server_clock.get_fps() + self.frame = timestamp.frame + self.simulation_time = timestamp.elapsed_seconds + + def tick(self, world, clock): + self._notifications.tick(world, clock) + if not self._show_info: + return + t = world.player.get_transform() + v = world.player.get_velocity() + c = world.player.get_control() + compass = world.imu_sensor.compass + heading = 'N' if compass > 270.5 or compass < 89.5 else '' + heading += 'S' if 90.5 < compass < 269.5 else '' + heading += 'E' if 0.5 < compass < 179.5 else '' + heading += 'W' if 180.5 < compass < 359.5 else '' + colhist = world.collision_sensor.get_collision_history() + collision = [colhist[x + self.frame - 200] for x in range(0, 200)] + max_col = max(1.0, max(collision)) + collision = [x / max_col for x in collision] + vehicles = world.world.get_actors().filter('vehicle.*') + self._info_text = [ + 'Server: % 16.0f FPS' % self.server_fps, + 'Client: % 16.0f FPS' % clock.get_fps(), + '', + 'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20), + 'Map: % 20s' % world.map.name.split('/')[-1], + 'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)), + '', + 'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)), + u'Compass:% 17.0f\N{DEGREE SIGN} % 2s' % (compass, heading), + 'Accelero: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.accelerometer), + 'Gyroscop: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.gyroscope), + 'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)), + 'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)), + 'Height: % 18.0f m' % t.location.z, + ''] + if isinstance(c, carla.VehicleControl): + self._info_text += [ + ('Throttle:', c.throttle, 0.0, 1.0), + ('Steer:', c.steer, -1.0, 1.0), + ('Brake:', c.brake, 0.0, 1.0), + ('Reverse:', c.reverse), + ('Hand brake:', c.hand_brake), + ('Manual:', c.manual_gear_shift), + 'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)] + if self._show_ackermann_info: + self._info_text += [ + '', + 'Ackermann Controller:', + ' Target speed: % 8.0f km/h' % (3.6*self._ackermann_control.speed), + ] + elif isinstance(c, carla.WalkerControl): + self._info_text += [ + ('Speed:', c.speed, 0.0, 5.556), + ('Jump:', c.jump)] + self._info_text += [ + '', + 'Collision:', + collision, + '', + 'Number of vehicles: % 8d' % len(vehicles)] + if len(vehicles) > 1: + self._info_text += ['Nearby vehicles:'] + distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2) + vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id] + for d, vehicle in sorted(vehicles, key=lambda vehicles: vehicles[0]): + if d > 200.0: + break + vehicle_type = get_actor_display_name(vehicle, truncate=22) + self._info_text.append('% 4dm %s' % (d, vehicle_type)) + + def show_ackermann_info(self, enabled): + self._show_ackermann_info = enabled + + def update_ackermann_control(self, ackermann_control): + self._ackermann_control = ackermann_control + + def toggle_info(self): + self._show_info = not self._show_info + + def notification(self, text, seconds=2.0): + self._notifications.set_text(text, seconds=seconds) + + def error(self, text): + self._notifications.set_text('Error: %s' % text, (255, 0, 0)) + + def render(self, display): + if self._show_info: + info_surface = pygame.Surface((220, self.dim[1])) + info_surface.set_alpha(100) + display.blit(info_surface, (0, 0)) + v_offset = 4 + bar_h_offset = 100 + bar_width = 106 + for item in self._info_text: + if v_offset + 18 > self.dim[1]: + break + if isinstance(item, list): + if len(item) > 1: + points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)] + pygame.draw.lines(display, (255, 136, 0), False, points, 2) + item = None + v_offset += 18 + elif isinstance(item, tuple): + if isinstance(item[1], bool): + rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6)) + pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1) + else: + rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6)) + pygame.draw.rect(display, (255, 255, 255), rect_border, 1) + f = (item[1] - item[2]) / (item[3] - item[2]) + if item[2] < 0.0: + rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6)) + else: + rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6)) + pygame.draw.rect(display, (255, 255, 255), rect) + item = item[0] + if item: # At this point has to be a str. + surface = self._font_mono.render(item, True, (255, 255, 255)) + display.blit(surface, (8, v_offset)) + v_offset += 18 + self._notifications.render(display) + self.help.render(display) + + +# ============================================================================== +# -- FadingText ---------------------------------------------------------------- +# ============================================================================== + + +class FadingText(object): + def __init__(self, font, dim, pos): + self.font = font + self.dim = dim + self.pos = pos + self.seconds_left = 0 + self.surface = pygame.Surface(self.dim) + + def set_text(self, text, color=(255, 255, 255), seconds=2.0): + text_texture = self.font.render(text, True, color) + self.surface = pygame.Surface(self.dim) + self.seconds_left = seconds + self.surface.fill((0, 0, 0, 0)) + self.surface.blit(text_texture, (10, 11)) + + def tick(self, _, clock): + delta_seconds = 1e-3 * clock.get_time() + self.seconds_left = max(0.0, self.seconds_left - delta_seconds) + self.surface.set_alpha(500.0 * self.seconds_left) + + def render(self, display): + display.blit(self.surface, self.pos) + + +# ============================================================================== +# -- HelpText ------------------------------------------------------------------ +# ============================================================================== + + +class HelpText(object): + """Helper class to handle text output using pygame""" + def __init__(self, font, width, height): + lines = __doc__.split('\n') + self.font = font + self.line_space = 18 + self.dim = (780, len(lines) * self.line_space + 12) + self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1]) + self.seconds_left = 0 + self.surface = pygame.Surface(self.dim) + self.surface.fill((0, 0, 0, 0)) + for n, line in enumerate(lines): + text_texture = self.font.render(line, True, (255, 255, 255)) + self.surface.blit(text_texture, (22, n * self.line_space)) + self._render = False + self.surface.set_alpha(220) + + def toggle(self): + self._render = not self._render + + def render(self, display): + if self._render: + display.blit(self.surface, self.pos) + + +# ============================================================================== +# -- CollisionSensor ----------------------------------------------------------- +# ============================================================================== + + +class CollisionSensor(object): + def __init__(self, parent_actor, hud): + self.sensor = None + self.history = [] + self._parent = parent_actor + self.hud = hud + world = self._parent.get_world() + bp = world.get_blueprint_library().find('sensor.other.collision') + self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent) + # We need to pass the lambda a weak reference to self to avoid circular + # reference. + weak_self = weakref.ref(self) + self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event)) + + def get_collision_history(self): + history = collections.defaultdict(int) + for frame, intensity in self.history: + history[frame] += intensity + return history + + @staticmethod + def _on_collision(weak_self, event): + self = weak_self() + if not self: + return + actor_type = get_actor_display_name(event.other_actor) + self.hud.notification('Collision with %r' % actor_type) + impulse = event.normal_impulse + intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2) + self.history.append((event.frame, intensity)) + if len(self.history) > 4000: + self.history.pop(0) + + +# ============================================================================== +# -- LaneInvasionSensor -------------------------------------------------------- +# ============================================================================== + + +class LaneInvasionSensor(object): + def __init__(self, parent_actor, hud): + self.sensor = None + + # If the spawn object is not a vehicle, we cannot use the Lane Invasion Sensor + if parent_actor.type_id.startswith("vehicle."): + self._parent = parent_actor + self.hud = hud + world = self._parent.get_world() + bp = world.get_blueprint_library().find('sensor.other.lane_invasion') + self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent) + # We need to pass the lambda a weak reference to self to avoid circular + # reference. + weak_self = weakref.ref(self) + self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event)) + + @staticmethod + def _on_invasion(weak_self, event): + self = weak_self() + if not self: + return + lane_types = set(x.type for x in event.crossed_lane_markings) + text = ['%r' % str(x).split()[-1] for x in lane_types] + self.hud.notification('Crossed line %s' % ' and '.join(text)) + + +# ============================================================================== +# -- GnssSensor ---------------------------------------------------------------- +# ============================================================================== + + +class GnssSensor(object): + def __init__(self, parent_actor): + self.sensor = None + self._parent = parent_actor + self.lat = 0.0 + self.lon = 0.0 + world = self._parent.get_world() + bp = world.get_blueprint_library().find('sensor.other.gnss') + self.sensor = world.spawn_actor(bp, carla.Transform(carla.Location(x=1.0, z=2.8)), attach_to=self._parent) + # We need to pass the lambda a weak reference to self to avoid circular + # reference. + weak_self = weakref.ref(self) + self.sensor.listen(lambda event: GnssSensor._on_gnss_event(weak_self, event)) + + @staticmethod + def _on_gnss_event(weak_self, event): + self = weak_self() + if not self: + return + self.lat = event.latitude + self.lon = event.longitude + + +# ============================================================================== +# -- IMUSensor ----------------------------------------------------------------- +# ============================================================================== + + +class IMUSensor(object): + def __init__(self, parent_actor): + self.sensor = None + self._parent = parent_actor + self.accelerometer = (0.0, 0.0, 0.0) + self.gyroscope = (0.0, 0.0, 0.0) + self.compass = 0.0 + world = self._parent.get_world() + bp = world.get_blueprint_library().find('sensor.other.imu') + self.sensor = world.spawn_actor( + bp, carla.Transform(), attach_to=self._parent) + # We need to pass the lambda a weak reference to self to avoid circular + # reference. + weak_self = weakref.ref(self) + self.sensor.listen( + lambda sensor_data: IMUSensor._IMU_callback(weak_self, sensor_data)) + + @staticmethod + def _IMU_callback(weak_self, sensor_data): + self = weak_self() + if not self: + return + limits = (-99.9, 99.9) + self.accelerometer = ( + max(limits[0], min(limits[1], sensor_data.accelerometer.x)), + max(limits[0], min(limits[1], sensor_data.accelerometer.y)), + max(limits[0], min(limits[1], sensor_data.accelerometer.z))) + self.gyroscope = ( + max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.x))), + max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.y))), + max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.z)))) + self.compass = math.degrees(sensor_data.compass) + + +# ============================================================================== +# -- RadarSensor --------------------------------------------------------------- +# ============================================================================== + + +class RadarSensor(object): + def __init__(self, parent_actor): + self.sensor = None + self._parent = parent_actor + bound_x = 0.5 + self._parent.bounding_box.extent.x + bound_y = 0.5 + self._parent.bounding_box.extent.y + bound_z = 0.5 + self._parent.bounding_box.extent.z + + self.velocity_range = 7.5 # m/s + world = self._parent.get_world() + self.debug = world.debug + bp = world.get_blueprint_library().find('sensor.other.radar') + bp.set_attribute('horizontal_fov', str(35)) + bp.set_attribute('vertical_fov', str(20)) + self.sensor = world.spawn_actor( + bp, + carla.Transform( + carla.Location(x=bound_x + 0.05, z=bound_z+0.05), + carla.Rotation(pitch=5)), + attach_to=self._parent) + # We need a weak reference to self to avoid circular reference. + weak_self = weakref.ref(self) + self.sensor.listen( + lambda radar_data: RadarSensor._Radar_callback(weak_self, radar_data)) + + @staticmethod + def _Radar_callback(weak_self, radar_data): + self = weak_self() + if not self: + return + # To get a numpy [[vel, altitude, azimuth, depth],...[,,,]]: + # points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4')) + # points = np.reshape(points, (len(radar_data), 4)) + + current_rot = radar_data.transform.rotation + for detect in radar_data: + azi = math.degrees(detect.azimuth) + alt = math.degrees(detect.altitude) + # The 0.25 adjusts a bit the distance so the dots can + # be properly seen + fw_vec = carla.Vector3D(x=detect.depth - 0.25) + carla.Transform( + carla.Location(), + carla.Rotation( + pitch=current_rot.pitch + alt, + yaw=current_rot.yaw + azi, + roll=current_rot.roll)).transform(fw_vec) + + def clamp(min_v, max_v, value): + return max(min_v, min(value, max_v)) + + norm_velocity = detect.velocity / self.velocity_range # range [-1, 1] + r = int(clamp(0.0, 1.0, 1.0 - norm_velocity) * 255.0) + g = int(clamp(0.0, 1.0, 1.0 - abs(norm_velocity)) * 255.0) + b = int(abs(clamp(- 1.0, 0.0, - 1.0 - norm_velocity)) * 255.0) + self.debug.draw_point( + radar_data.transform.location + fw_vec, + size=0.075, + life_time=0.06, + persistent_lines=False, + color=carla.Color(r, g, b)) + +# ============================================================================== +# -- CameraManager ------------------------------------------------------------- +# ============================================================================== + + +class CameraManager(object): + def __init__(self, parent_actor, hud, gamma_correction): + self.sensor = None + self.surface = None + self._parent = parent_actor + self.hud = hud + self.recording = False + bound_x = 0.5 + self._parent.bounding_box.extent.x + bound_y = 0.5 + self._parent.bounding_box.extent.y + bound_z = 0.5 + self._parent.bounding_box.extent.z + Attachment = carla.AttachmentType + + if not self._parent.type_id.startswith("walker.pedestrian"): + self._camera_transforms = [ + (carla.Transform(carla.Location(x=-2.0*bound_x, y=+0.0*bound_y, z=2.0*bound_z), carla.Rotation(pitch=8.0)), Attachment.SpringArmGhost), + (carla.Transform(carla.Location(x=+0.8*bound_x, y=+0.0*bound_y, z=1.3*bound_z)), Attachment.Rigid), + (carla.Transform(carla.Location(x=+1.9*bound_x, y=+1.0*bound_y, z=1.2*bound_z)), Attachment.SpringArmGhost), + (carla.Transform(carla.Location(x=-2.8*bound_x, y=+0.0*bound_y, z=4.6*bound_z), carla.Rotation(pitch=6.0)), Attachment.SpringArmGhost), + (carla.Transform(carla.Location(x=-1.0, y=-1.0*bound_y, z=0.4*bound_z)), Attachment.Rigid)] + else: + self._camera_transforms = [ + (carla.Transform(carla.Location(x=-2.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArmGhost), + (carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid), + (carla.Transform(carla.Location(x=2.5, y=0.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArmGhost), + (carla.Transform(carla.Location(x=-4.0, z=2.0), carla.Rotation(pitch=6.0)), Attachment.SpringArmGhost), + (carla.Transform(carla.Location(x=0, y=-2.5, z=-0.0), carla.Rotation(yaw=90.0)), Attachment.Rigid)] + + self.transform_index = 1 + self.sensors = [ + ['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}], + ['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)', {}], + ['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}], + ['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)', {}], + ['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)', {}], + ['sensor.camera.semantic_segmentation', cc.CityScapesPalette, 'Camera Semantic Segmentation (CityScapes Palette)', {}], + ['sensor.camera.instance_segmentation', cc.CityScapesPalette, 'Camera Instance Segmentation (CityScapes Palette)', {}], + ['sensor.camera.instance_segmentation', cc.Raw, 'Camera Instance Segmentation (Raw)', {}], + ['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)', {'range': '50'}], + ['sensor.camera.dvs', cc.Raw, 'Dynamic Vision Sensor', {}], + ['sensor.camera.rgb', cc.Raw, 'Camera RGB Distorted', + {'lens_circle_multiplier': '3.0', + 'lens_circle_falloff': '3.0', + 'chromatic_aberration_intensity': '0.5', + 'chromatic_aberration_offset': '0'}], + ['sensor.camera.optical_flow', cc.Raw, 'Optical Flow', {}], + ['sensor.camera.normals', cc.Raw, 'Camera Normals', {}], + ] + world = self._parent.get_world() + bp_library = world.get_blueprint_library() + for item in self.sensors: + bp = bp_library.find(item[0]) + if item[0].startswith('sensor.camera'): + bp.set_attribute('image_size_x', str(hud.dim[0])) + bp.set_attribute('image_size_y', str(hud.dim[1])) + if bp.has_attribute('gamma'): + bp.set_attribute('gamma', str(gamma_correction)) + for attr_name, attr_value in item[3].items(): + bp.set_attribute(attr_name, attr_value) + elif item[0].startswith('sensor.lidar'): + self.lidar_range = 50 + + for attr_name, attr_value in item[3].items(): + bp.set_attribute(attr_name, attr_value) + if attr_name == 'range': + self.lidar_range = float(attr_value) + + item.append(bp) + self.index = None + + def toggle_camera(self): + self.transform_index = (self.transform_index + 1) % len(self._camera_transforms) + self.set_sensor(self.index, notify=False, force_respawn=True) + + def set_sensor(self, index, notify=True, force_respawn=False): + index = index % len(self.sensors) + needs_respawn = True if self.index is None else \ + (force_respawn or (self.sensors[index][2] != self.sensors[self.index][2])) + if needs_respawn: + if self.sensor is not None: + self.sensor.destroy() + self.surface = None + self.sensor = self._parent.get_world().spawn_actor( + self.sensors[index][-1], + self._camera_transforms[self.transform_index][0], + attach_to=self._parent, + attachment_type=self._camera_transforms[self.transform_index][1]) + # We need to pass the lambda a weak reference to self to avoid + # circular reference. + weak_self = weakref.ref(self) + self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image)) + if notify: + self.hud.notification(self.sensors[index][2]) + self.index = index + + def next_sensor(self): + self.set_sensor(self.index + 1) + + def toggle_recording(self): + self.recording = not self.recording + self.hud.notification('Recording %s' % ('On' if self.recording else 'Off')) + + def render(self, display): + if self.surface is not None: + display.blit(self.surface, (0, 0)) + + @staticmethod + def _parse_image(weak_self, image): + self = weak_self() + if not self: + return + if self.sensors[self.index][0].startswith('sensor.lidar'): + points = np.frombuffer(image.raw_data, dtype=np.dtype('f4')) + points = np.reshape(points, (int(points.shape[0] / 4), 4)) + lidar_data = np.array(points[:, :2]) + lidar_data *= min(self.hud.dim) / (2.0 * self.lidar_range) + lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1]) + lidar_data = np.fabs(lidar_data) # pylint: disable=E1111 + lidar_data = lidar_data.astype(np.int32) + lidar_data = np.reshape(lidar_data, (-1, 2)) + lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3) + lidar_img = np.zeros((lidar_img_size), dtype=np.uint8) + lidar_img[tuple(lidar_data.T)] = (255, 255, 255) + self.surface = pygame.surfarray.make_surface(lidar_img) + elif self.sensors[self.index][0].startswith('sensor.camera.dvs'): + # Example of converting the raw_data from a carla.DVSEventArray + # sensor into a NumPy array and using it as an image + dvs_events = np.frombuffer(image.raw_data, dtype=np.dtype([ + ('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool_)])) + dvs_img = np.zeros((image.height, image.width, 3), dtype=np.uint8) + # Blue is positive, red is negative + dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255 + self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1)) + elif self.sensors[self.index][0].startswith('sensor.camera.optical_flow'): + image = image.get_color_coded_flow() + array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) + array = np.reshape(array, (image.height, image.width, 4)) + array = array[:, :, :3] + array = array[:, :, ::-1] + self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) + else: + image.convert(self.sensors[self.index][1]) + array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) + array = np.reshape(array, (image.height, image.width, 4)) + array = array[:, :, :3] + array = array[:, :, ::-1] + self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) + if self.recording: + image.save_to_disk('_out/%08d' % image.frame) + + +class Traffic(object): + + def __init__(self, client, world, traffic_manager, args): + self.client = client + self.world = world + self.traffic_manager = traffic_manager + self.args = args + self.vehicle_ids = [] + self.vehicles = [] # USed to print "spawned X vehicles" + self.walkers = [] + self.walker_and_control_ids = [] # These include the controllers too + self.walkers_and_controls = [] + + if self.args.sync: + self.traffic_manager.set_synchronous_mode(True) + if self.args.respawn: + self.traffic_manager.set_respawn_dormant_vehicles(True) + if self.args.hybrid: + self.traffic_manager.set_hybrid_physics_mode(True) + self.traffic_manager.set_hybrid_physics_radius(70.0) + if self.args.seed is not None: + self.traffic_manager.set_random_device_seed(self.args.seed) + + self.traffic_manager.set_global_distance_to_leading_vehicle(2.5) + self.traffic_manager.global_percentage_speed_difference(0) + + if not args.sync: + print("You are currently in asynchronous mode, and traffic might experience some issues") + + self.spawn_vehicles() + self.spawn_pedestrians() + + print('Spawned %d vehicles and %d walkers, press Ctrl+C to exit.' % (len(self.vehicles), len(self.walkers))) + + def spawn_vehicles(self): + + if self.args.number_of_vehicles == 0: + return + + # Get the vehicle blueprints + blueprints_vehicles = get_actor_blueprints(self.world, self.args.filterv, self.args.generationv) + if not blueprints_vehicles: + raise ValueError("Couldn't find any vehicles with the specified filters") + if self.args.safe: + blueprints_vehicles = [x for x in blueprints_vehicles if x.get_attribute('base_type') == 'car'] + blueprints_vehicles = sorted(blueprints_vehicles, key=lambda bp: bp.id) + + # Get their spawn points + spawn_points = self.world.get_map().get_spawn_points() + number_of_spawn_points = len(spawn_points) + number_vehicles = self.args.number_of_vehicles + if number_vehicles < number_of_spawn_points: + random.shuffle(spawn_points) + elif self.args.number_of_vehicles > number_of_spawn_points: + print(f"Requested {number_vehicles} vehicles, but could only find {number_of_spawn_points} spawn points") + number_vehicles = number_of_spawn_points + + hero = self.args.hero + + SpawnActor = carla.command.SpawnActor + SetAutopilot = carla.command.SetAutopilot + FutureActor = carla.command.FutureActor + + batch = [] + for n, transform in enumerate(spawn_points): + if n >= self.args.number_of_vehicles: + break + blueprint = random.choice(blueprints_vehicles) + if blueprint.has_attribute('color'): + color = random.choice(blueprint.get_attribute('color').recommended_values) + blueprint.set_attribute('color', color) + if blueprint.has_attribute('driver_id'): + driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values) + blueprint.set_attribute('driver_id', driver_id) + if hero: + blueprint.set_attribute('role_name', 'hero') + hero = False + else: + blueprint.set_attribute('role_name', 'autopilot') + + # Spawn the cars and set their autopilot + batch.append(SpawnActor(blueprint, transform) + .then(SetAutopilot(FutureActor, True, self.traffic_manager.get_port()))) + + for response in self.client.apply_batch_sync(batch, self.args.sync): + if response.error: + logging.error(response.error) + else: + self.vehicle_ids.append(response.actor_id) + + # Set automatic vehicle lights update + if self.args.car_lights_on: + self.vehicles = self.world.get_actors(self.vehicle_ids) + for actor in self.vehicles: + self.traffic_manager.update_vehicle_lights(actor, True) + + def spawn_pedestrians(self): + + if self.args.number_of_walkers == 0: + return + + blueprints_walkers = get_actor_blueprints(self.world, self.args.filterw, self.args.generationw) + if not blueprints_walkers: + raise ValueError("Couldn't find any walkers with the specified filters") + blueprints_walkers = sorted(blueprints_walkers, key=lambda bp: bp.id) + + controller_bp = self.world.get_blueprint_library().find('controller.ai.walker') + + SpawnActor = carla.command.SpawnActor + + percentagePedestriansRunning = 0.0 # how many pedestrians will run + percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road + + if self.args.seed: + self.world.set_pedestrians_seed(self.args.seed) + + # Get the spawn points + walker_spawn_points = [] + for i in range(self.args.number_of_walkers): + spawn_point = carla.Transform() + loc = self.world.get_random_location_from_navigation() + if (loc != None): + spawn_point.location = loc + spawn_point.location.z += 2 + walker_spawn_points.append(spawn_point) + + # Get the blueprints + batch = [] + walker_speed = [] + for spawn_point in walker_spawn_points: + walker_bp = random.choice(blueprints_walkers) + + if walker_bp.has_attribute('is_invincible'): + walker_bp.set_attribute('is_invincible', 'false') + + if walker_bp.has_attribute('speed'): + if (random.random() > percentagePedestriansRunning): + walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1]) + else: + walker_speed.append(walker_bp.get_attribute('speed').recommended_values[2]) + else: + print("Found a walker has no speed") + walker_speed.append(0.0) + + batch.append(SpawnActor(walker_bp, spawn_point)) + + # Spawn the walkers + results = self.client.apply_batch_sync(batch, self.args.sync) + new_walker_speed = [] + for i in range(len(results)): + if results[i].error: + print(results[i].error) + else: + self.walkers.append({"id": results[i].actor_id}) + new_walker_speed.append(walker_speed[i]) + walker_speed = new_walker_speed + + # Spawn the controllers + batch = [] + for i in range(len(self.walkers)): + batch.append(SpawnActor(controller_bp, carla.Transform(), self.walkers[i]["id"])) + + results = self.client.apply_batch_sync(batch, self.args.sync) + for i in range(len(results)): + if results[i].error: + print(results[i].error) + else: + self.walkers[i]["controller"] = results[i].actor_id + + for i in range(len(self.walkers)): + self.walker_and_control_ids.append(self.walkers[i]["controller"]) + self.walker_and_control_ids.append(self.walkers[i]["id"]) + self.walkers_and_controls = self.world.get_actors(self.walker_and_control_ids) + + if not self.args.sync: + self.world.wait_for_tick() + else: + self.world.tick() + + self.world.set_pedestrians_cross_factor(percentagePedestriansCrossing) + for i in range(0, len(self.walker_and_control_ids), 2): + self.walkers_and_controls[i].start() + self.walkers_and_controls[i].go_to_location(self.world.get_random_location_from_navigation()) + self.walkers_and_controls[i].set_max_speed(float(walker_speed[int(i/2)])) + + def destroy(self): + self.client.apply_batch([carla.command.DestroyActor(x) for x in self.vehicle_ids]) + + # Stop walker controllers (list is [controller, actor, controller, actor ...]) + for i in range(0, len(self.walker_and_control_ids), 2): + self.walkers_and_controls[i].stop() + + self.client.apply_batch([carla.command.DestroyActor(x) for x in self.walker_and_control_ids]) + + +# ============================================================================== +# -- game_loop() --------------------------------------------------------------- +# ============================================================================== + + +def game_loop(args): + pygame.init() + pygame.font.init() + world = None + traffic = None + original_settings = None + + try: + client = carla.Client(args.host, args.port) + client.set_timeout(2000.0) + traffic_manager = client.get_trafficmanager() + + sim_world = client.get_world() + if args.sync: + original_settings = sim_world.get_settings() + settings = sim_world.get_settings() + if not settings.synchronous_mode: + settings.synchronous_mode = True + settings.fixed_delta_seconds = 0.05 + sim_world.apply_settings(settings) + + if args.autopilot and not sim_world.get_settings().synchronous_mode: + print("WARNING: You are currently in asynchronous mode and could " + "experience some issues with the traffic simulation") + + display = pygame.display.set_mode( + (args.width, args.height), + pygame.HWSURFACE | pygame.DOUBLEBUF) + display.fill((0,0,0)) + pygame.display.flip() + + hud = HUD(args.width, args.height) + world = World(sim_world, hud, args) + controller = KeyboardControl(world, args.autopilot) + traffic = Traffic(client, sim_world, traffic_manager, args) + + if args.sync: + sim_world.tick() + else: + sim_world.wait_for_tick() + + clock = pygame.time.Clock() + while True: + if args.sync: + sim_world.tick() + clock.tick_busy_loop(60) + if controller.parse_events(client, world, clock, args.sync): + return + world.tick(clock) + world.render(display) + pygame.display.flip() + + finally: + + if original_settings: + if not original_settings.synchronous_mode: + traffic_manager.set_synchronous_mode(False) + sim_world.apply_settings(original_settings) + + if (world and world.recording_enabled): + client.stop_recorder() + + if world is not None: + world.destroy() + + if traffic is not None: + traffic.destroy() + + pygame.quit() + + +# ============================================================================== +# -- main() -------------------------------------------------------------------- +# ============================================================================== + + +def main(): + argparser = argparse.ArgumentParser(description='CARLA Manual Control Client') + argparser.add_argument( + '-v', '--verbose', action='store_true', dest='debug', + help='print debug information') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-a', '--autopilot', action='store_true', + help='enable the autopilot for the manual controleld vehicle') + argparser.add_argument( + '--res', metavar='WIDTHxHEIGHT', default='1280x720', + help='window resolution (default: 1280x720)') + argparser.add_argument( + '--sync', action='store_true', + help='Activate synchronous mode execution') + argparser.add_argument( + '--rolename', metavar='NAME', default='hero', + help='actor role name (default: "hero")') + argparser.add_argument( + '-s', '--seed', metavar='S', type=int, + help='Sets the seed of all random behaviors') + argparser.add_argument( + '--car-lights-on', action='store_true', default=False, + help='Enable automatic car light management') + argparser.add_argument( + '--filter', metavar='PATTERN', default='vehicle.*', + help='actor filter (default: "vehicle.*")') + argparser.add_argument( + '--generation', metavar='G', default='All', + help='restrict to certain actor generation (values: "2","3","All" - default: "All")') + argparser.add_argument( + '--filterv', metavar='PATTERN', default='vehicle.*', + help='Traffic filter (default: "vehicle.*")') + argparser.add_argument( + '--generationv', metavar='G', default='All', + help='restrict the traffic to certain actor generation (values: "2","3","All" - default: "All")') + argparser.add_argument( + '--filterw', metavar='PATTERN', default='vehicle.*', + help='pedestrian filter (default: "vehicle.*")') + argparser.add_argument( + '--generationw', metavar='G', default='All', + help='restrict the pedestrians to certain actor generation (values: "2","3","All" - default: "All")') + argparser.add_argument( + '-n', '--number-of-vehicles', metavar='N', default=30, type=int, + help='Number of vehicles (default: 30)') + argparser.add_argument( + '-w', '--number-of-walkers', metavar='W', default=10, type=int, + help='Number of walkers (default: 10)') + argparser.add_argument( + '--safe', action='store_true', + help='Avoid spawning vehicles prone to accidents') + argparser.add_argument( + '--tm-port', metavar='P', default=8000, type=int, + help='Port to communicate with TM (default: 8000)') + argparser.add_argument( + '--hybrid', action='store_true', + help='Activate hybrid mode for Traffic Manager') + argparser.add_argument( + '--respawn', action='store_true', default=False, + help='Automatically respawn dormant vehicles (only in large maps)') + argparser.add_argument( + '--hero', action='store_true', default=False, + help='Set one of the vehicles as hero') + argparser.add_argument( + '--gamma', default=1.0, type=float, + help='Gamma correction of the camera (default: 1.0)') + args = argparser.parse_args() + + args.width, args.height = [int(x) for x in args.res.split('x')] + + log_level = logging.DEBUG if args.debug else logging.INFO + logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level) + + logging.info('listening to server %s:%s', args.host, args.port) + + print(__doc__) + + try: + + game_loop(args) + + except KeyboardInterrupt: + print('\nCancelled by user. Bye!') + + +if __name__ == '__main__': + + main() diff --git a/PythonAPI/examples/no_rendering_mode.py b/PythonAPI/examples/no_rendering_mode.py index a7c712d94..d292dd187 100755 --- a/PythonAPI/examples/no_rendering_mode.py +++ b/PythonAPI/examples/no_rendering_mode.py @@ -38,14 +38,6 @@ import glob import os import sys -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - # ============================================================================== # -- imports ------------------------------------------------------------------- # ============================================================================== @@ -942,7 +934,7 @@ class World(object): self.world, self.town_map = self._get_data_from_carla() settings = self.world.get_settings() - settings.no_rendering_mode = self.args.no_rendering + settings.no_rendering_mode = True self.world.apply_settings(settings) # Create Surfaces @@ -1358,6 +1350,11 @@ class World(object): def destroy(self): """Destroy the hero actor when class instance is destroyed""" + + settings = self.world.get_settings() + settings.no_rendering_mode = False + self.world.apply_settings(settings) + if self.spawned_hero is not None: self.spawned_hero.destroy() @@ -1552,6 +1549,7 @@ def game_loop(args): print('\nCancelled by user. Bye!') finally: + if world is not None: world.destroy() @@ -1573,51 +1571,31 @@ def main(): argparser = argparse.ArgumentParser( description='CARLA No Rendering Mode Visualizer') argparser.add_argument( - '-v', '--verbose', - action='store_true', - dest='debug', + '-v', '--verbose', action='store_true', dest='debug', help='print debug information') argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '--res', - metavar='WIDTHxHEIGHT', - default='1280x720', + '--res', metavar='WIDTHxHEIGHT', default='1280x720', help='window resolution (default: 1280x720)') argparser.add_argument( - '--filter', - metavar='PATTERN', - default='vehicle.*', + '--filter', metavar='PATTERN', default='vehicle.*', help='actor filter (default: "vehicle.*")') argparser.add_argument( - '--map', - metavar='TOWN', - default=None, + '--map', metavar='TOWN', default=None, help='start a new episode at the given TOWN') argparser.add_argument( - '--no-rendering', - action='store_true', - help='switch off server rendering') - argparser.add_argument( - '--show-triggers', - action='store_true', + '--show-triggers', action='store_true', help='show trigger boxes of traffic signs') argparser.add_argument( - '--show-connections', - action='store_true', + '--show-connections', action='store_true', help='show waypoint connections') argparser.add_argument( - '--show-spawn-points', - action='store_true', + '--show-spawn-points', action='store_true', help='show recommended spawn points') # Parse arguments diff --git a/PythonAPI/examples/open3d_lidar.py b/PythonAPI/examples/open3d_lidar.py index f3fbe4418..f17933553 100644 --- a/PythonAPI/examples/open3d_lidar.py +++ b/PythonAPI/examples/open3d_lidar.py @@ -6,30 +6,20 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -"""Open3D Lidar visuialization example for CARLA""" +"""Open3D Lidar visualization example for CARLA""" -import glob -import os import sys import argparse import time from datetime import datetime import random import numpy as np -from matplotlib import cm +import matplotlib import open3d as o3d -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla -VIRIDIS = np.array(cm.get_cmap('plasma').colors) +VIRIDIS = np.array(matplotlib.colormaps['plasma'].colors) VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0]) LABEL_COLORS = np.array([ (255, 255, 255), # None @@ -155,10 +145,60 @@ def add_open3d_axis(vis): [0.0, 0.0, 1.0]])) vis.add_geometry(axis) +def main(): + argparser = argparse.ArgumentParser( + description=__doc__) + argparser.add_argument( + '--host', metavar='H', default='localhost', + help='IP of the host CARLA Simulator (default: localhost)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port of CARLA Simulator (default: 2000)') + argparser.add_argument( + '--no-rendering', action='store_true', + help='use the no-rendering mode which will provide some extra performance') + argparser.add_argument( + '--semantic', action='store_true', + help='use the semantic lidar instead, which provides ground truth information') + argparser.add_argument( + '--no-noise', action='store_true', + help='remove the drop off and noise from the normal (non-semantic) lidar') + argparser.add_argument( + '--no-autopilot', action='store_false', + help='disables the autopilot so the vehicle will remain stopped') + argparser.add_argument( + '--show-axis', action='store_true', + help='show the cartesian coordinates axis') + argparser.add_argument( + '--filter', metavar='PATTERN', default='vehicle.*', + help='actor filter (default: "vehicle.*")') + argparser.add_argument( + '--upper-fov', default=15.0, type=float, + help='lidar\'s upper field of view in degrees (default: 15.0)') + argparser.add_argument( + '--lower-fov', default=-25.0, type=float, + help='lidar\'s lower field of view in degrees (default: -25.0)') + argparser.add_argument( + '--channels', default=64.0, type=float, + help='lidar\'s channel count (default: 64)') + argparser.add_argument( + '--range', default=100.0, type=float, + help='lidar\'s maximum range in meters (default: 100.0)') + argparser.add_argument( + '--points-per-second', default=500000, type=int, + help='lidar\'s points per second (default: 500000)') + argparser.add_argument( + '-x', default=0.0, type=float, + help='offset in the sensor position in the X-axis in meters (default: 0.0)') + argparser.add_argument( + '-y', default=0.0, type=float, + help='offset in the sensor position in the Y-axis in meters (default: 0.0)') + argparser.add_argument( + '-z', default=0.0, type=float, + help='offset in the sensor position in the Z-axis in meters (default: 0.0)') + args = argparser.parse_args() -def main(arg): - """Main function of the script""" - client = carla.Client(arg.host, arg.port) + client = carla.Client(args.host, args.port) client.set_timeout(2.0) world = client.get_world() @@ -172,24 +212,24 @@ def main(arg): settings.fixed_delta_seconds = delta settings.synchronous_mode = True - settings.no_rendering_mode = arg.no_rendering + settings.no_rendering_mode = args.no_rendering world.apply_settings(settings) blueprint_library = world.get_blueprint_library() - vehicle_bp = blueprint_library.filter(arg.filter)[0] + vehicle_bp = blueprint_library.filter(args.filter)[0] vehicle_transform = random.choice(world.get_map().get_spawn_points()) vehicle = world.spawn_actor(vehicle_bp, vehicle_transform) - vehicle.set_autopilot(arg.no_autopilot) + vehicle.set_autopilot(args.no_autopilot) - lidar_bp = generate_lidar_bp(arg, world, blueprint_library, delta) + lidar_bp = generate_lidar_bp(args, world, blueprint_library, delta) - user_offset = carla.Location(arg.x, arg.y, arg.z) + user_offset = carla.Location(args.x, args.y, args.z) lidar_transform = carla.Transform(carla.Location(x=-0.5, z=1.8) + user_offset) lidar = world.spawn_actor(lidar_bp, lidar_transform, attach_to=vehicle) point_list = o3d.geometry.PointCloud() - if arg.semantic: + if args.semantic: lidar.listen(lambda data: semantic_lidar_callback(data, point_list)) else: lidar.listen(lambda data: lidar_callback(data, point_list)) @@ -205,7 +245,7 @@ def main(arg): vis.get_render_option().point_size = 1 vis.get_render_option().show_coordinate_frame = True - if arg.show_axis: + if args.show_axis: add_open3d_axis(vis) frame = 0 @@ -236,91 +276,11 @@ def main(arg): vis.destroy_window() -if __name__ == "__main__": - argparser = argparse.ArgumentParser( - description=__doc__) - argparser.add_argument( - '--host', - metavar='H', - default='localhost', - help='IP of the host CARLA Simulator (default: localhost)') - argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, - help='TCP port of CARLA Simulator (default: 2000)') - argparser.add_argument( - '--no-rendering', - action='store_true', - help='use the no-rendering mode which will provide some extra' - ' performance but you will lose the articulated objects in the' - ' lidar, such as pedestrians') - argparser.add_argument( - '--semantic', - action='store_true', - help='use the semantic lidar instead, which provides ground truth' - ' information') - argparser.add_argument( - '--no-noise', - action='store_true', - help='remove the drop off and noise from the normal (non-semantic) lidar') - argparser.add_argument( - '--no-autopilot', - action='store_false', - help='disables the autopilot so the vehicle will remain stopped') - argparser.add_argument( - '--show-axis', - action='store_true', - help='show the cartesian coordinates axis') - argparser.add_argument( - '--filter', - metavar='PATTERN', - default='model3', - help='actor filter (default: "vehicle.*")') - argparser.add_argument( - '--upper-fov', - default=15.0, - type=float, - help='lidar\'s upper field of view in degrees (default: 15.0)') - argparser.add_argument( - '--lower-fov', - default=-25.0, - type=float, - help='lidar\'s lower field of view in degrees (default: -25.0)') - argparser.add_argument( - '--channels', - default=64.0, - type=float, - help='lidar\'s channel count (default: 64)') - argparser.add_argument( - '--range', - default=100.0, - type=float, - help='lidar\'s maximum range in meters (default: 100.0)') - argparser.add_argument( - '--points-per-second', - default=500000, - type=int, - help='lidar\'s points per second (default: 500000)') - argparser.add_argument( - '-x', - default=0.0, - type=float, - help='offset in the sensor position in the X-axis in meters (default: 0.0)') - argparser.add_argument( - '-y', - default=0.0, - type=float, - help='offset in the sensor position in the Y-axis in meters (default: 0.0)') - argparser.add_argument( - '-z', - default=0.0, - type=float, - help='offset in the sensor position in the Z-axis in meters (default: 0.0)') - args = argparser.parse_args() +if __name__ == '__main__': try: - main(args) + main() except KeyboardInterrupt: - print(' - Exited by user.') + pass + finally: + print('\ndone.') diff --git a/PythonAPI/examples/recorder_replay.py b/PythonAPI/examples/recorder_replay.py new file mode 100755 index 000000000..64ffcca87 --- /dev/null +++ b/PythonAPI/examples/recorder_replay.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python + +# Copyright (c) 2022 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +"""Script that helps users to manually check a specific log""" + +import argparse +import threading +import sys +import os +import pygame + +import carla + +FPS = 20 +FIRST_FRAME = 0 +LAST_FRAME = False +REPLAY_SPEED = 1 +TIME = 0 + +def tick(world): + global TIME, REPLAY_SPEED + world.tick() + TIME += world.get_snapshot().delta_seconds * REPLAY_SPEED + +def recorder_utilities(client): + global LAST_FRAME, REPLAY_SPEED + stop = False + + while not stop and not LAST_FRAME: + data = input("\nInput the next action: ") + try: + int_data = float(data) + print(" Setting the replayer factor to {}".format(int_data)) + client.set_replayer_time_factor(int_data) + REPLAY_SPEED = int_data + except ValueError: + if data not in ("S", "R"): + print("\033[93mIgnoring unknown command '{}'\033[0m".format(data)) + continue + + print(" Time: {}".format(round(TIME, 3))) + if data == 'S': + stop = True + LAST_FRAME = True + +def main(): + argparser = argparse.ArgumentParser(description=__doc__) + argparser.add_argument( + '--host', metavar='H', default='localhost', + help='IP of the host CARLA Simulator (default: localhost)') + argparser.add_argument( + '--port', metavar='P', default=2000, type=int, + help='TCP port of CARLA Simulator (default: 2000)') + argparser.add_argument( + '-f', '--file', default='', required=True, + help='File to be executed') + argparser.add_argument( + '--start-time', default=0, type=float, + help='Start time of the recorder') + argparser.add_argument( + '--end-time', default=0, type=float, + help='End time of the recorder') + argparser.add_argument( + '--follow-id', default=0, type=int, + help='ID to follow') + argparser.add_argument( + '--follow-ego', action="store_true", + help='follow the ego vehicle') + argparser.add_argument( + '--factor', default=1, type=float, + help='Initial recorder factor') + + args = argparser.parse_args() + + if args.follow_id and args.follow_ego: + print("Choose to either follow an id, or the ego vehicle, but not both") + sys.exit(0) + + global TIME, LAST_FRAME, REPLAY_SPEED + + TIME = args.start_time + REPLAY_SPEED = args.factor + + client = None + world = None + + if not os.path.exists(args.file): + print("WARNING: The specified '.log' file does not exist. Shutting down") + sys.exit(-1) + + # Get the client + print("\n\033[1m> Setting the simulation\033[0m") + client = carla.Client(args.host, args.port) + client.set_timeout(200.0) + file_info = client.show_recorder_file_info(args.file, True) + + # Synchronous mode provides a smoother motion of the camera that follows the ego + world = client.get_world() + settings = world.get_settings() + settings.synchronous_mode = True + settings.fixed_delta_seconds = 1/FPS + world.apply_settings(settings) + + # Get the ego vehicle id so that the spectator focuses on it + follow_id = args.follow_id + if args.follow_ego: + file_split = file_info.split("Create ") + for data in file_split: + if not 'hero' in data: + continue + follow_id = int(data.split(": ")[0]) + print("Detected an ego vehicle with id '{}'".format(follow_id)) + break + + # Get the duration of the recorder (only if the end time is 0, aka until the recorder end) + duration = args.end_time + if not duration: + duration = float(file_info.split("Duration: ")[-1].split(" ")[0]) + + print("\033[1m> Starting the replayer\033[0m") + client.replay_file(args.file, args.start_time, args.end_time, follow_id) + client.set_replayer_time_factor(args.factor) + + tick(world) + + try: + print("\033[1m> Running the recorder. Use\033[0m") + print("\033[1m - R: to record the replayer timestamp data\033[0m") + print("\033[1m - S: to stop the script\033[0m") + print("\033[1m - A number: to change the speed's factor of the replayer\033[0m") + + t1 = threading.Thread(target=recorder_utilities, args=(client, )) + t1.start() + + clock = pygame.time.Clock() + while not LAST_FRAME: + clock.tick_busy_loop(20) + tick(world) + if TIME >= duration: + LAST_FRAME = True + + except KeyboardInterrupt: + pass + finally: + + if world is not None: + settings = world.get_settings() + settings.synchronous_mode = False + settings.fixed_delta_seconds = None + world.apply_settings(settings) + + if client is not None: + client.stop_replayer(True) + client.set_replayer_time_factor(1) + +if __name__ == '__main__': + main() + diff --git a/PythonAPI/examples/sensor_synchronization.py b/PythonAPI/examples/sensor_synchronization.py index fedb76875..3fc703e19 100644 --- a/PythonAPI/examples/sensor_synchronization.py +++ b/PythonAPI/examples/sensor_synchronization.py @@ -21,20 +21,9 @@ not the case, the clients needs to take in account at each frame how many sensors are going to tick at each frame. """ -import glob -import os -import sys from queue import Queue from queue import Empty -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla diff --git a/PythonAPI/examples/show_recorder_collisions.py b/PythonAPI/examples/show_recorder_collisions.py deleted file mode 100755 index af1ad2e4b..000000000 --- a/PythonAPI/examples/show_recorder_collisions.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - -import carla - -import argparse - - -def main(): - - argparser = argparse.ArgumentParser( - description=__doc__) - argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', - help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, - help='TCP port to listen to (default: 2000)') - argparser.add_argument( - '-f', '--recorder_filename', - metavar='F', - default="test1.rec", - help='recorder filename (test1.rec)') - argparser.add_argument( - '-t', '--types', - metavar='T', - default="aa", - help='pair of types (a=any, h=hero, v=vehicle, w=walkers, t=trafficLight, o=others') - args = argparser.parse_args() - - try: - - client = carla.Client(args.host, args.port) - client.set_timeout(60.0) - - # types pattern samples: - # -t aa == any to any == show every collision (the default) - # -t vv == vehicle to vehicle == show every collision between vehicles only - # -t vt == vehicle to traffic light == show every collision between a vehicle and a traffic light - # -t hh == hero to hero == show collision between a hero and another hero - print(client.show_recorder_collisions(args.recorder_filename, args.types[0], args.types[1])) - - finally: - pass - - -if __name__ == '__main__': - - try: - main() - except KeyboardInterrupt: - pass - finally: - print('\ndone.') diff --git a/PythonAPI/examples/start_recording.py b/PythonAPI/examples/start_recording.py deleted file mode 100755 index 5e809744b..000000000 --- a/PythonAPI/examples/start_recording.py +++ /dev/null @@ -1,158 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - -import carla - -import argparse -import random -import time -import logging - - -def main(): - argparser = argparse.ArgumentParser( - description=__doc__) - argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', - help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, - help='TCP port to listen to (default: 2000)') - argparser.add_argument( - '-n', '--number-of-vehicles', - metavar='N', - default=10, - type=int, - help='number of vehicles (default: 10)') - argparser.add_argument( - '-d', '--delay', - metavar='D', - default=2.0, - type=float, - help='delay in seconds between spawns (default: 2.0)') - argparser.add_argument( - '--safe', - action='store_true', - help='avoid spawning vehicles prone to accidents') - argparser.add_argument( - '-f', '--recorder_filename', - metavar='F', - default="test1.log", - help='recorder filename (test1.log)') - argparser.add_argument( - '-t', '--recorder_time', - metavar='T', - default=0, - type=int, - help='recorder duration (auto-stop)') - args = argparser.parse_args() - - actor_list = [] - logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) - - try: - - client = carla.Client(args.host, args.port) - client.set_timeout(2.0) - world = client.get_world() - blueprints = world.get_blueprint_library().filter('vehicle.*') - - spawn_points = world.get_map().get_spawn_points() - random.shuffle(spawn_points) - - print('found %d spawn points.' % len(spawn_points)) - - count = args.number_of_vehicles - - print("Recording on file: %s" % client.start_recorder(args.recorder_filename)) - - if args.safe: - blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4] - blueprints = [x for x in blueprints if not x.id.endswith('microlino')] - blueprints = [x for x in blueprints if not x.id.endswith('carlacola')] - blueprints = [x for x in blueprints if not x.id.endswith('cybertruck')] - blueprints = [x for x in blueprints if not x.id.endswith('t2')] - blueprints = [x for x in blueprints if not x.id.endswith('sprinter')] - blueprints = [x for x in blueprints if not x.id.endswith('firetruck')] - blueprints = [x for x in blueprints if not x.id.endswith('ambulance')] - - spawn_points = world.get_map().get_spawn_points() - number_of_spawn_points = len(spawn_points) - - if count < number_of_spawn_points: - random.shuffle(spawn_points) - elif count > number_of_spawn_points: - msg = 'requested %d vehicles, but could only find %d spawn points' - logging.warning(msg, count, number_of_spawn_points) - count = number_of_spawn_points - - # @todo cannot import these directly. - SpawnActor = carla.command.SpawnActor - SetAutopilot = carla.command.SetAutopilot - FutureActor = carla.command.FutureActor - - batch = [] - for n, transform in enumerate(spawn_points): - if n >= count: - break - blueprint = random.choice(blueprints) - if blueprint.has_attribute('color'): - color = random.choice(blueprint.get_attribute('color').recommended_values) - blueprint.set_attribute('color', color) - blueprint.set_attribute('role_name', 'autopilot') - batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True))) - - for response in client.apply_batch_sync(batch): - if response.error: - logging.error(response.error) - else: - actor_list.append(response.actor_id) - - print('spawned %d vehicles, press Ctrl+C to exit.' % len(actor_list)) - - if (args.recorder_time > 0): - time.sleep(args.recorder_time) - else: - while True: - world.wait_for_tick() - # time.sleep(0.1) - - finally: - - print('\ndestroying %d actors' % len(actor_list)) - client.apply_batch_sync([carla.command.DestroyActor(x) for x in actor_list]) - - print("Stop recording") - client.stop_recorder() - - -if __name__ == '__main__': - - try: - main() - except KeyboardInterrupt: - pass - finally: - print('\ndone.') diff --git a/PythonAPI/examples/synchronous_mode.py b/PythonAPI/examples/synchronous_mode.py deleted file mode 100755 index ef8897990..000000000 --- a/PythonAPI/examples/synchronous_mode.py +++ /dev/null @@ -1,207 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - -import carla - -import random - -try: - import pygame -except ImportError: - raise RuntimeError('cannot import pygame, make sure pygame package is installed') - -try: - import numpy as np -except ImportError: - raise RuntimeError('cannot import numpy, make sure numpy package is installed') - -try: - import queue -except ImportError: - import Queue as queue - - -class CarlaSyncMode(object): - """ - Context manager to synchronize output from different sensors. Synchronous - mode is enabled as long as we are inside this context - - with CarlaSyncMode(world, sensors) as sync_mode: - while True: - data = sync_mode.tick(timeout=1.0) - - """ - - def __init__(self, world, *sensors, **kwargs): - self.world = world - self.sensors = sensors - self.frame = None - self.delta_seconds = 1.0 / kwargs.get('fps', 20) - self._queues = [] - self._settings = None - - def __enter__(self): - self._settings = self.world.get_settings() - self.frame = self.world.apply_settings(carla.WorldSettings( - no_rendering_mode=False, - synchronous_mode=True, - fixed_delta_seconds=self.delta_seconds)) - - def make_queue(register_event): - q = queue.Queue() - register_event(q.put) - self._queues.append(q) - - make_queue(self.world.on_tick) - for sensor in self.sensors: - make_queue(sensor.listen) - return self - - def tick(self, timeout): - self.frame = self.world.tick() - data = [self._retrieve_data(q, timeout) for q in self._queues] - assert all(x.frame == self.frame for x in data) - return data - - def __exit__(self, *args, **kwargs): - self.world.apply_settings(self._settings) - - def _retrieve_data(self, sensor_queue, timeout): - while True: - data = sensor_queue.get(timeout=timeout) - if data.frame == self.frame: - return data - - -def draw_image(surface, image, blend=False): - array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) - array = np.reshape(array, (image.height, image.width, 4)) - array = array[:, :, :3] - array = array[:, :, ::-1] - image_surface = pygame.surfarray.make_surface(array.swapaxes(0, 1)) - if blend: - image_surface.set_alpha(100) - surface.blit(image_surface, (0, 0)) - - -def get_font(): - fonts = [x for x in pygame.font.get_fonts()] - default_font = 'ubuntumono' - font = default_font if default_font in fonts else fonts[0] - font = pygame.font.match_font(font) - return pygame.font.Font(font, 14) - - -def should_quit(): - for event in pygame.event.get(): - if event.type == pygame.QUIT: - return True - elif event.type == pygame.KEYUP: - if event.key == pygame.K_ESCAPE: - return True - return False - - -def main(): - actor_list = [] - pygame.init() - - display = pygame.display.set_mode( - (800, 600), - pygame.HWSURFACE | pygame.DOUBLEBUF) - font = get_font() - clock = pygame.time.Clock() - - client = carla.Client('localhost', 2000) - client.set_timeout(2.0) - - world = client.get_world() - - try: - m = world.get_map() - start_pose = random.choice(m.get_spawn_points()) - waypoint = m.get_waypoint(start_pose.location) - - blueprint_library = world.get_blueprint_library() - - vehicle = world.spawn_actor( - random.choice(blueprint_library.filter('vehicle.*')), - start_pose) - actor_list.append(vehicle) - vehicle.set_simulate_physics(False) - - camera_rgb = world.spawn_actor( - blueprint_library.find('sensor.camera.rgb'), - carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)), - attach_to=vehicle) - actor_list.append(camera_rgb) - - camera_semseg = world.spawn_actor( - blueprint_library.find('sensor.camera.semantic_segmentation'), - carla.Transform(carla.Location(x=-5.5, z=2.8), carla.Rotation(pitch=-15)), - attach_to=vehicle) - actor_list.append(camera_semseg) - - # Create a synchronous mode context. - with CarlaSyncMode(world, camera_rgb, camera_semseg, fps=30) as sync_mode: - while True: - if should_quit(): - return - clock.tick() - - # Advance the simulation and wait for the data. - snapshot, image_rgb, image_semseg = sync_mode.tick(timeout=2.0) - - # Choose the next waypoint and update the car location. - waypoint = random.choice(waypoint.next(1.5)) - vehicle.set_transform(waypoint.transform) - - image_semseg.convert(carla.ColorConverter.CityScapesPalette) - fps = round(1.0 / snapshot.timestamp.delta_seconds) - - # Draw the display. - draw_image(display, image_rgb) - draw_image(display, image_semseg, blend=True) - display.blit( - font.render('% 5d FPS (real)' % clock.get_fps(), True, (255, 255, 255)), - (8, 10)) - display.blit( - font.render('% 5d FPS (simulated)' % fps, True, (255, 255, 255)), - (8, 28)) - pygame.display.flip() - - finally: - - print('destroying actors.') - for actor in actor_list: - actor.destroy() - - pygame.quit() - print('done.') - - -if __name__ == '__main__': - - try: - - main() - - except KeyboardInterrupt: - print('\nCancelled by user. Bye!') diff --git a/PythonAPI/util/showcase_doors.py b/PythonAPI/examples/vehicle_doors_demo.py similarity index 81% rename from PythonAPI/util/showcase_doors.py rename to PythonAPI/examples/vehicle_doors_demo.py index 23cfbdd7b..6fa657380 100644 --- a/PythonAPI/util/showcase_doors.py +++ b/PythonAPI/examples/vehicle_doors_demo.py @@ -1,17 +1,12 @@ #!/usr/bin/env python -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see . -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - import argparse -import sys -import time import math import carla @@ -28,16 +23,23 @@ def get_transform(vehicle, angle, d=2.5): def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('-b', '--blueprint', default='lincoln', help="Blueprint to be used. 'all' cycles through all vehicles") - argparser.add_argument('--speed', default=36, type=int, help="Camera rotation speed") + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, + type=int, help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-b', '--blueprint', default='all', + help="Blueprint to be used. 'all' cycles through all vehicles") + argparser.add_argument( + '--speed', default=60, type=int, + help="Camera rotation speed") args = argparser.parse_args() client = carla.Client(args.host, args.port) client.set_timeout(10) world = client.load_world('Town10HD_Opt') - tmap = world.get_map() bp_lib = world.get_blueprint_library() diff --git a/PythonAPI/examples/vehicle_gallery.py b/PythonAPI/examples/vehicle_gallery.py index fd0e35b01..407cc807b 100755 --- a/PythonAPI/examples/vehicle_gallery.py +++ b/PythonAPI/examples/vehicle_gallery.py @@ -6,38 +6,29 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass import carla import math -import random -def get_transform(vehicle_location, angle, d=6.4): +def get_transform(vehicle, angle, d=2.5): + vehicle_location = vehicle.get_location() + radius = vehicle.bounding_box.extent.x * d + height = vehicle.bounding_box.extent.x * 0.8 a = math.radians(angle) - location = carla.Location(d * math.cos(a), d * math.sin(a), 2.0) + vehicle_location + location = carla.Location(radius * math.cos(a), radius * math.sin(a), height) + vehicle_location return carla.Transform(location, carla.Rotation(yaw=180 + angle, pitch=-15)) def main(): - client = carla.Client('localhost', 2000) - client.set_timeout(2.0) + client = carla.Client() + client.set_timeout(10.0) world = client.get_world() spectator = world.get_spectator() vehicle_blueprints = world.get_blueprint_library().filter('vehicle') - location = random.choice(world.get_map().get_spawn_points()).location + location = carla.Location(-47, 20, 0.3) for blueprint in vehicle_blueprints: transform = carla.Transform(location, carla.Rotation(yaw=-45.0)) @@ -51,7 +42,7 @@ def main(): while angle < 356: timestamp = world.wait_for_tick().timestamp angle += timestamp.delta_seconds * 60.0 - spectator.set_transform(get_transform(vehicle.get_location(), angle - 90)) + spectator.set_transform(get_transform(vehicle, angle - 90)) finally: diff --git a/PythonAPI/util/showcase_vehicle_lights.py b/PythonAPI/examples/vehicle_lights_demo.py similarity index 86% rename from PythonAPI/util/showcase_vehicle_lights.py rename to PythonAPI/examples/vehicle_lights_demo.py index 0dd688817..c924d9d1b 100644 --- a/PythonAPI/util/showcase_vehicle_lights.py +++ b/PythonAPI/examples/vehicle_lights_demo.py @@ -1,17 +1,12 @@ #!/usr/bin/env python -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see . -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - import argparse -import sys -import time import math import carla @@ -26,13 +21,22 @@ def get_transform(vehicle, angle, d=2.5): location = carla.Location(radius * math.cos(a), radius * math.sin(a), height) + vehicle_location return carla.Transform(location, carla.Rotation(yaw=180 + angle, pitch=-15)) + def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('-b', '--blueprint', default='lincoln', help="Blueprint to be used. 'all' cycles through all vehicles") - argparser.add_argument('--speed', default=36, type=int, help="Camera rotation speed") + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-b', '--blueprint', default='lincoln', + help="Blueprint to be used. 'all' cycles through all vehicles") + argparser.add_argument( + '--speed', default=60, type=int, + help="Camera rotation speed") args = argparser.parse_args() client = carla.Client(args.host, args.port) diff --git a/PythonAPI/examples/vehicle_physics.py b/PythonAPI/examples/vehicle_physics.py deleted file mode 100644 index 69dc31d20..000000000 --- a/PythonAPI/examples/vehicle_physics.py +++ /dev/null @@ -1,142 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2020 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -""" -Vehicle physics example for CARLA -Small example that shows the effect of different impulse and force aplication -methods to a vehicle. -""" - -import glob -import os -import sys -import argparse - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - -import carla - - -def print_step_info(world, vehicle): - snapshot = world.get_snapshot() - print("%d %06.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f %+8.03f" % - (snapshot.frame, snapshot.timestamp.elapsed_seconds, \ - vehicle.get_acceleration().x, vehicle.get_acceleration().y, vehicle.get_acceleration().z, \ - vehicle.get_velocity().x, vehicle.get_velocity().y, vehicle.get_velocity().z, \ - vehicle.get_location().x, vehicle.get_location().y, vehicle.get_location().z)) - - -def wait(world, frames=100): - for i in range(0, frames): - world.tick() - - -def main(arg): - """Main function of the script""" - client = carla.Client(arg.host, arg.port) - client.set_timeout(2.0) - world = client.get_world() - - try: - # Setting the world and the spawn properties - original_settings = world.get_settings() - settings = world.get_settings() - - delta = 0.1 - settings.fixed_delta_seconds = delta - settings.synchronous_mode = True - world.apply_settings(settings) - - blueprint_library = world.get_blueprint_library() - vehicle_bp = blueprint_library.filter(arg.filter)[0] - - vehicle_transform = world.get_map().get_spawn_points()[0] - vehicle_transform.location.z += 3 - vehicle = world.spawn_actor(vehicle_bp, vehicle_transform) - - physics_vehicle = vehicle.get_physics_control() - car_mass = physics_vehicle.mass - - spectator_transform = carla.Transform(vehicle_transform.location, vehicle_transform.rotation) - spectator_transform.location += vehicle_transform.get_forward_vector() * 20 - spectator_transform.rotation.yaw += 180 - spectator = world.get_spectator() - spectator.set_transform(spectator_transform) - - - # We let the vehicle stabilize and save the transform to reset it after each test. - wait(world) - vehicle.set_target_velocity(carla.Vector3D(0, 0, 0)) - vehicle_transform = vehicle.get_transform() - wait(world) - - - # Impulse/Force at the center of mass of the object - impulse = 10 * car_mass - - print("# Adding an Impulse of {:.1f} N s".format(impulse)) - vehicle.add_impulse(carla.Vector3D(0, 0, impulse)) - - wait(world) - vehicle.set_transform(vehicle_transform) - vehicle.set_target_velocity(carla.Vector3D(0, 0, 0)) - wait(world) - - print("# Adding a Force of {:.1f} N".format(impulse / delta)) - # The add_force method should not be use for instantaneous forces like this one, - # it is more useful for constant or variable forces acting in a finite amount of time. - # In this script it is done with the proper scaling to show the equivalence - # between the add_impulse and add_force methods. - # As in this case the force is going to be applied during the whole step dt=delta - # a force more or less equivalent is impulse / delta. - vehicle.add_force(carla.Vector3D(0, 0, impulse / delta)) - - wait(world) - vehicle.set_transform(vehicle_transform) - vehicle.set_target_velocity(carla.Vector3D(0, 0, 0)) - wait(world) - - - finally: - world.apply_settings(original_settings) - - vehicle.destroy() - - -if __name__ == "__main__": - - argparser = argparse.ArgumentParser( - description=__doc__) - argparser.add_argument( - '--host', - metavar='H', - default='localhost', - help='IP of the host CARLA Simulator (default: localhost)') - argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, - help='TCP port of CARLA Simulator (default: 2000)') - argparser.add_argument( - '--filter', - metavar='PATTERN', - default='vehicle.*', - help='actor filter (default: "vehicle.*")') - args = argparser.parse_args() - - try: - main(args) - except KeyboardInterrupt: - print(' - Exited by user.') diff --git a/PythonAPI/examples/visualize_multiple_sensors.py b/PythonAPI/examples/visualize_multiple_sensors.py index 6ad1985a3..705b51b99 100644 --- a/PythonAPI/examples/visualize_multiple_sensors.py +++ b/PythonAPI/examples/visualize_multiple_sensors.py @@ -14,24 +14,12 @@ It can easily be configure for any different number of sensors. To do that, check lines 290-308. """ -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla import argparse import random import time import numpy as np - +from numpy import random try: import pygame @@ -40,6 +28,7 @@ try: except ImportError: raise RuntimeError('cannot import pygame, make sure pygame package is installed') + class CustomTimer: def __init__(self): try: @@ -268,17 +257,16 @@ def run_simulation(args, client): world = client.get_world() original_settings = world.get_settings() - if args.sync: - traffic_manager = client.get_trafficmanager(8000) - settings = world.get_settings() - traffic_manager.set_synchronous_mode(True) - settings.synchronous_mode = True - settings.fixed_delta_seconds = 0.05 - world.apply_settings(settings) + traffic_manager = client.get_trafficmanager(8000) + traffic_manager.set_synchronous_mode(True) + settings = world.get_settings() + settings.synchronous_mode = True + settings.fixed_delta_seconds = 0.05 + world.apply_settings(settings) # Instanciating the vehicle to which we attached the sensors - bp = world.get_blueprint_library().filter('charger_2020')[0] + bp = random.choice(world.get_blueprint_library().filter(args.filter)) vehicle = world.spawn_actor(bp, random.choice(world.get_map().get_spawn_points())) vehicle_list.append(vehicle) vehicle.set_autopilot(True) @@ -303,16 +291,12 @@ def run_simulation(args, client): SensorManager(world, display_manager, 'SemanticLiDAR', carla.Transform(carla.Location(x=0, z=2.4)), vehicle, {'channels' : '64', 'range' : '100', 'points_per_second': '100000', 'rotation_frequency': '20'}, display_pos=[1, 2]) - #Simulation loop call_exit = False time_init_sim = timer.time() while True: # Carla Tick - if args.sync: - world.tick() - else: - world.wait_for_tick() + world.tick() # Render received data display_manager.render() @@ -337,36 +321,21 @@ def run_simulation(args, client): world.apply_settings(original_settings) - def main(): argparser = argparse.ArgumentParser( description='CARLA Sensor tutorial') argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '--sync', - action='store_true', - help='Synchronous mode execution') - argparser.add_argument( - '--async', - dest='sync', - action='store_false', - help='Asynchronous mode execution') - argparser.set_defaults(sync=True) - argparser.add_argument( - '--res', - metavar='WIDTHxHEIGHT', - default='1280x720', + '--res', metavar='WIDTHxHEIGHT', default='1280x720', help='window resolution (default: 1280x720)') + argparser.add_argument( + '--filter', metavar='PATTERN', default='vehicle.*', + help='actor filter (default: "vehicle.*")') args = argparser.parse_args() @@ -384,4 +353,4 @@ def main(): if __name__ == '__main__': - main() + main() \ No newline at end of file diff --git a/PythonAPI/examples/visualize_radar.py b/PythonAPI/examples/visualize_radar.py new file mode 100644 index 000000000..e4c1a24df --- /dev/null +++ b/PythonAPI/examples/visualize_radar.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python + +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + + +import argparse +import math +from numpy import random +import pygame + +import carla + + +def radar_callback(radar_data, world): + current_rot = radar_data.transform.rotation + for detect in radar_data: + azi = math.degrees(detect.azimuth) + alt = math.degrees(detect.altitude) + fw_vec = carla.Vector3D(x=detect.depth - 0.25) + carla.Transform( + carla.Location(), + carla.Rotation( + pitch=current_rot.pitch + alt, + yaw=current_rot.yaw + azi, + roll=current_rot.roll) + ).transform(fw_vec) + + norm_velocity = detect.velocity / 7.5 # range [-1, 1] + r = int(max(0.0, min(1.0, 1.0 - norm_velocity)) * 255.0) + g = int(max(0.0, min(1.0, 1.0 - abs(norm_velocity))) * 255.0) + b = int(abs(max(- 1.0, min(0.0, - 1.0 - norm_velocity))) * 255.0) + world.debug.draw_point(radar_data.transform.location + fw_vec, size=0.1, life_time=0.06, color=carla.Color(r, g, b)) + + +def main(): + argparser = argparse.ArgumentParser( + description='CARLA Manual Control Client') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-hfov', '--horizontal-fov', default='35', + help="RADAR's horizontal fov") + argparser.add_argument( + '-vfov', '--vertical-fov', default='20', + help="RADAR's vertical fov") + argparser.add_argument( + '-pps', '--points', default='1500', + help="RADAR's points per second") + argparser.add_argument( + '-r', '--range', default='100', + help="RADAR's range") + args = argparser.parse_args() + + client = carla.Client(args.host, args.port) + world = client.get_world() + tmap = world.get_map() + + settings = world.get_settings() + settings.synchronous_mode = True + settings.fixed_delta_seconds = 0.05 + world.apply_settings(settings) + + blueprint_library = world.get_blueprint_library() + spawn_bp = blueprint_library.filter('lincoln')[0] + + spawn_points = tmap.get_spawn_points() + spawn_transform = random.choice(spawn_points) + + vehicle = world.spawn_actor(spawn_bp, spawn_transform) + + world.tick() + vehicle.set_autopilot(True) + tm = client.get_trafficmanager() + tm.ignore_lights_percentage(vehicle, 100) + tm.ignore_signs_percentage(vehicle, 100) + + radar_bp = blueprint_library.find('sensor.other.radar') + radar_bp.set_attribute('horizontal_fov', args.horizontal_fov) + radar_bp.set_attribute('vertical_fov', args.vertical_fov) + radar_bp.set_attribute('points_per_second', args.points) + radar_bp.set_attribute('range', args.range) + + radar = world.spawn_actor( + radar_bp, carla.Transform(carla.Location(x=3, z=2), carla.Rotation(pitch=5)), attach_to=vehicle) + radar.listen(lambda radar_data: radar_callback(radar_data, world)) + + world.tick() + clock = pygame.time.Clock() + + spectator = world.get_spectator() + spec_offset_x = -5 + spec_offset_z = 3 + spec_offset_pitch = -15 + + try: + while True: + clock.tick_busy_loop(20) + world.tick() + + vehicle_tran = vehicle.get_transform() + yaw = vehicle_tran.rotation.yaw + spectator_l = vehicle_tran.location + carla.Location( + spec_offset_x * math.cos(math.radians(yaw)), + spec_offset_x * math.sin(math.radians(yaw)), + spec_offset_z, + ) + spectator_t = carla.Transform(spectator_l, carla.Rotation(pitch=spec_offset_pitch, yaw=yaw)) + spectator.set_transform(spectator_t) + + + except KeyboardInterrupt: + pass + + finally: + radar.stop() + radar.destroy() + vehicle.destroy() + + settings = world.get_settings() + settings.synchronous_mode = False + settings.fixed_delta_seconds = None + world.apply_settings(settings) + +if __name__ == '__main__': + main() diff --git a/PythonAPI/util/test_connection.py b/PythonAPI/test/test_connection.py similarity index 71% rename from PythonAPI/util/test_connection.py rename to PythonAPI/test/test_connection.py index 2aac0564a..c88ba3f61 100755 --- a/PythonAPI/util/test_connection.py +++ b/PythonAPI/test/test_connection.py @@ -8,18 +8,8 @@ """Blocks until the simulator is ready or the time-out is met.""" -import glob -import os import sys -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla @@ -31,21 +21,13 @@ def main(): argparser = argparse.ArgumentParser( description=__doc__) argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '--timeout', - metavar='T', - default=10.0, - type=float, + '--timeout', metavar='T', default=10.0, type=float, help='time-out in seconds (default: 10)') args = argparser.parse_args() diff --git a/PythonAPI/util/raycast_sensor_testing.py b/PythonAPI/test/test_raycast_sensor.py similarity index 92% rename from PythonAPI/util/raycast_sensor_testing.py rename to PythonAPI/test/test_raycast_sensor.py index 4f185642f..d13119012 100644 --- a/PythonAPI/util/raycast_sensor_testing.py +++ b/PythonAPI/test/test_raycast_sensor.py @@ -29,18 +29,6 @@ And for profiling one radar: """ -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla import argparse import random @@ -446,90 +434,56 @@ def main(): argparser = argparse.ArgumentParser( description='CARLA Sensor tutorial') argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '--sync', - action='store_true', + '--sync', action='store_true', help='Synchronous mode execution') argparser.add_argument( - '--async', - dest='sync', - action='store_false', + '--async', dest='sync', action='store_false', help='Asynchronous mode execution') argparser.set_defaults(sync=True) argparser.add_argument( - '--res', - metavar='WIDTHxHEIGHT', - default='1280x720', + '--res', metavar='WIDTHxHEIGHT', default='1280x720', help='window resolution (default: 1280x720)') argparser.add_argument( - '-lp', '--lidar_points', - metavar='LP', - default='100000', + '-lp', '--lidar_points', metavar='LP', default='100000', help='lidar points per second (default: "100000")') argparser.add_argument( - '-ln', '--lidar_number', - metavar='LN', - default=3, - type=int, + '-ln', '--lidar_number', metavar='LN', default=3, type=int, choices=range(0, 4), help='Number of lidars to render (from zero to three)') argparser.add_argument( - '-slp', '--semanticlidar_points', - metavar='SLP', - default='100000', + '-slp', '--semanticlidar_points', metavar='SLP', default='100000', help='semantic lidar points per second (default: "100000")') argparser.add_argument( - '-sln', '--semanticlidar_number', - metavar='SLN', - default=0, - type=int, - choices=range(0, 4), + '-sln', '--semanticlidar_number', metavar='SLN', default=0, type=int, choices=range(0, 4), help='Number of semantic lidars to render (from zero to three)') argparser.add_argument( - '-rp', '--radar_points', - metavar='RP', - default='100000', + '-rp', '--radar_points', metavar='RP', default='100000', help='radar points per second (default: "100000")') argparser.add_argument( - '-rn', '--radar_number', - metavar='LN', - default=0, - type=int, - choices=range(0, 4), + '-rn', '--radar_number', metavar='LN', default=0, type=int, choices=range(0, 4), help='Number of radars to render (from zero to three)') argparser.add_argument( - '--camera', - dest='render_cam', action='store_true', + '--camera', dest='render_cam', action='store_true', help='render also RGB camera (camera enable by default)') - argparser.add_argument('--no-camera', - dest='render_cam', action='store_false', + argparser.add_argument( + '--no-camera', dest='render_cam', action='store_false', default=True, help='no render RGB camera (camera disable by default)') - argparser.set_defaults(render_cam=True) argparser.add_argument( - '--profiling', - action='store_true', + '--profiling', action='store_true', default=False, help='Use the script in profiling mode. It measures the performance of \ - the lidar for different number of points.') - argparser.set_defaults(profiling=False) + the lidar for different number of points.') argparser.add_argument( - '--no-render-window', - action='store_false', - dest='render_window', + '--no-render-window', action='store_false', dest='render_window', default=True, help='Render visualization window.') - argparser.set_defaults(render_window=True) + args = argparser.parse_args() - args.width, args.height = [int(x) for x in args.res.split('x')] try: diff --git a/PythonAPI/util/vehicle_physics_tester.py b/PythonAPI/test/test_vehicle_physics.py similarity index 98% rename from PythonAPI/util/vehicle_physics_tester.py rename to PythonAPI/test/test_vehicle_physics.py index ca63d4518..2441da54b 100644 --- a/PythonAPI/util/vehicle_physics_tester.py +++ b/PythonAPI/test/test_vehicle_physics.py @@ -15,21 +15,10 @@ Uses: python vehicle_physics_tester.py --filter vehicle_id --turn """ -import glob -import os -import sys import argparse import time import numpy as np -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla class VehicleControlStop: diff --git a/PythonAPI/util/apply_texture.py b/PythonAPI/util/apply_texture.py index 19e773f00..5e12b840c 100644 --- a/PythonAPI/util/apply_texture.py +++ b/PythonAPI/util/apply_texture.py @@ -11,27 +11,9 @@ # -- imports ------------------------------------------------------------------- # ============================================================================== -import glob -import os -import sys import argparse -import math -import time -import queue import imageio -# ============================================================================== -# -- find carla module --------------------------------------------------------- -# ============================================================================== - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla def get_8bit_texture(image): @@ -69,38 +51,25 @@ def get_float_texture(image): def main(): argparser = argparse.ArgumentParser() argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '-d', '--diffuse', - type=str, - default='', - help='Path to diffuse image to update') - argparser.add_argument( - '-o', '--object-name', - type=str, - help='Object name') - argparser.add_argument( - '-l', '--list', - action='store_true', + '-l', '--list', action='store_true', help='Prints names of all objects in the scene') argparser.add_argument( - '-n', '--normal', - type=str, - default='', + '-d', '--diffuse', type=str, default='', + help='Path to diffuse image to update') + argparser.add_argument( + '-o', '--object-name', type=str, + help='Object name') + argparser.add_argument( + '-n', '--normal', type=str, default='', help='Path to normal map to update') argparser.add_argument( - '--ao_roughness_metallic_emissive', - type=str, - default='', + '--ao_roughness_metallic_emissive', type=str, default='', help='Path to normal map to update') args = argparser.parse_args() @@ -110,23 +79,23 @@ def main(): world = client.get_world() if args.list: - names = world.get_names_of_all_objects() + names = list(filter(lambda k: 'Apartment' in k, world.get_names_of_all_objects())) for name in names: print(name) return - if args.object_name is '': + if args.object_name == '': print('Error: missing object name to apply texture') return diffuse = None normal = None ao_r_m_e = None - if args.diffuse is not '': + if args.diffuse != '': diffuse = imageio.imread(args.diffuse) - if args.normal is not '': + if args.normal != '': normal = imageio.imread(args.normal) - if args.ao_roughness_metallic_emissive is not '': + if args.ao_roughness_metallic_emissive != '': ao_r_m_e = imageio.imread(args.ao_roughness_metallic_emissive) tex_diffuse = get_8bit_texture(diffuse) diff --git a/PythonAPI/util/change_map_layer.py b/PythonAPI/util/change_map_layer.py index bcc6cd20f..ef17be514 100644 --- a/PythonAPI/util/change_map_layer.py +++ b/PythonAPI/util/change_map_layer.py @@ -1,14 +1,11 @@ #!/usr/bin/env python -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see . -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - import argparse import sys import carla @@ -27,13 +24,22 @@ MAP_LAYERS = { 'All': carla.MapLayer.All } + def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('-l', '--layer',required=True, help='Layer to load / unload') - argparser.add_argument('--action', choices=["load", "unload"], help='Layer to load / unload') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-l', '--layer',required=True, + help='Layer to load / unload') + argparser.add_argument( + '--action', choices=["load", "unload"], + help='Layer to load / unload') args = argparser.parse_args() client = carla.Client(args.host, args.port) @@ -46,9 +52,11 @@ def main(): sys.exit() if args.action == 'load': + print(f"Loading layer {layer}") world.load_map_layer(layer) elif args.action == 'unload': - world_unload_map_layer(layer) + print(f"Unloading layer {layer}") + world.unload_map_layer(layer) if __name__ == '__main__': main() diff --git a/PythonAPI/util/change_textures.py b/PythonAPI/util/change_textures.py deleted file mode 100644 index bcc6cd20f..000000000 --- a/PythonAPI/util/change_textures.py +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - -import argparse -import sys -import carla - - -MAP_LAYERS = { - 'Buildings': carla.MapLayer.Buildings, - 'Decals': carla.MapLayer.Decals, - 'Foliage': carla.MapLayer.Foliage, - 'Ground': carla.MapLayer.Ground, - 'ParkedVehicles': carla.MapLayer.ParkedVehicles, - 'Particles': carla.MapLayer.Particles, - 'Props': carla.MapLayer.Props, - 'StreetLights': carla.MapLayer.StreetLights, - 'Walls': carla.MapLayer.Walls, - 'All': carla.MapLayer.All -} - -def main(): - argparser = argparse.ArgumentParser( - description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('-l', '--layer',required=True, help='Layer to load / unload') - argparser.add_argument('--action', choices=["load", "unload"], help='Layer to load / unload') - args = argparser.parse_args() - - client = carla.Client(args.host, args.port) - world = client.get_world() - try: - layer = MAP_LAYERS[args.layer] - except KeyError: - print(f"Map layer '{args.layer}' doesn't exist. The possible map layers are:") - print(list(MAP_LAYERS.keys())) - sys.exit() - - if args.action == 'load': - world.load_map_layer(layer) - elif args.action == 'unload': - world_unload_map_layer(layer) - -if __name__ == '__main__': - main() diff --git a/PythonAPI/util/config.py b/PythonAPI/util/config.py new file mode 100644 index 000000000..cd842df09 --- /dev/null +++ b/PythonAPI/util/config.py @@ -0,0 +1,264 @@ +#!/usr/bin/env python + +# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +""" +Configure and inspect an instance of CARLA Simulator. + +For further details, visit +https://carla.readthedocs.io/en/latest/configuring_the_simulation/ +""" + +import os +import sys +import argparse +import datetime +import socket +import textwrap + +import carla + + +def get_ip(host): + if host in ['localhost', '127.0.0.1']: + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + sock.connect(('10.255.255.255', 1)) + host = sock.getsockname()[0] + except RuntimeError: + pass + finally: + sock.close() + return host + + +def list_options(client): + maps = [m.replace('/Game/Carla/Maps/', '') for m in client.get_available_maps()] + indent = 4 * ' ' + def wrap(text): + return '\n'.join(textwrap.wrap(text, initial_indent=indent, subsequent_indent=indent)) + print('available maps:\n') + print(wrap(', '.join(sorted(maps))) + '.\n') + + +def list_blueprints(world, bp_filter): + blueprint_library = world.get_blueprint_library() + blueprints = [bp.id for bp in blueprint_library.filter(bp_filter)] + print('available blueprints (filter %r):\n' % bp_filter) + for bp in sorted(blueprints): + print(' ' + bp) + print('') + + +def inspect(args, client): + address = '%s:%d' % (get_ip(args.host), args.port) + + world = client.get_world() + elapsed_time = world.get_snapshot().timestamp.elapsed_seconds + elapsed_time = datetime.timedelta(seconds=int(elapsed_time)) + + actors = world.get_actors() + s = world.get_settings() + + if s.fixed_delta_seconds is None: + frame_rate = 'variable' + else: + frame_rate = '%.2f ms (%d FPS)' % ( + 1000.0 * s.fixed_delta_seconds, + 1.0 / s.fixed_delta_seconds) + + print('---------------------------------') + print('address:% 26s' % address) + print('version:% 26s\n' % client.get_server_version()) + print('map: % 22s' % world.get_map().name) + print('time: % 22s\n' % elapsed_time) + print('frame rate: % 22s' % frame_rate) + print('rendering: % 22s' % ('disabled' if s.no_rendering_mode else 'enabled')) + print('sync mode: % 22s\n' % ('disabled' if not s.synchronous_mode else 'enabled')) + print('actors: % 22d' % len(actors)) + print(' * spectator:% 20d' % len(actors.filter('spectator'))) + print(' * static: % 20d' % len(actors.filter('static.*'))) + print(' * traffic: % 20d' % len(actors.filter('traffic.*'))) + print(' * vehicles: % 20d' % len(actors.filter('vehicle.*'))) + print(' * walkers: % 20d' % len(actors.filter('walker.*'))) + print('---------------------------------') + + +def main(): + argparser = argparse.ArgumentParser( + description=__doc__) + argparser.add_argument( + '--host', metavar='H', default='localhost', + help='IP of the host CARLA Simulator (default: localhost)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port of CARLA Simulator (default: 2000)') + argparser.add_argument( + '-l', '--list', action='store_true', + help='list available options') + argparser.add_argument( + '-b', '--list-blueprints', metavar='FILTER', + help='list available blueprints matching FILTER (use \'*\' to list them all)') + argparser.add_argument( + '-m', '--map', + help='load a new map, use --list to see available maps') + argparser.add_argument( + '-r', '--reload-map', action='store_true', + help='reload current map') + argparser.add_argument( + '--delta-seconds', metavar='S', + type=float, + help='set fixed delta seconds, zero for variable frame rate') + argparser.add_argument( + '--fps', metavar='N', type=float, + help='set fixed FPS, zero for variable FPS (similar to --delta-seconds)') + argparser.add_argument( + '--rendering', action='store_true', + help='enable rendering') + argparser.add_argument( + '--no-rendering', action='store_true', + help='disable rendering') + argparser.add_argument( + '--no-sync', action='store_true', + help='disable synchronous mode') + argparser.add_argument( + '-i', '--inspect', action='store_true', + help='inspect simulation') + argparser.add_argument( + '-x', '--xodr-path', metavar='XODR_FILE_PATH', + help='load a new map with a minimum physical road representation of the provided OpenDRIVE') + argparser.add_argument( + '--osm-path', metavar='OSM_FILE_PATH', + help='load a new map with a minimum physical road representation of the provided OpenStreetMaps') + argparser.add_argument( + '--tile-stream-distance', metavar='N', type=float, + help='Set tile streaming distance (large maps only)') + argparser.add_argument( + '--actor-active-distance', metavar='N', type=float, + help='Set actor active distance (large maps only)') + + args = argparser.parse_args() + + if len(sys.argv) < 2: + argparser.print_help() + return + + client = carla.Client(args.host, args.port, worker_threads=1) + client.set_timeout(10.0) + + if args.default: + args.rendering = True + args.delta_seconds = None + args.no_sync = True + + if args.map is not None: + print('Load map %r' % args.map) + world = client.load_world(args.map) + + elif args.reload_map: + print('Reload map') + world = client.reload_world() + + elif args.xodr_path is not None: + if os.path.exists(args.xodr_path): + with open(args.xodr_path, encoding='utf-8') as od_file: + try: + data = od_file.read() + except OSError: + print('File could not be readed') + sys.exit() + print('Loading opendrive map %r' % os.path.basename(args.xodr_path)) + world = client.generate_opendrive_world( + data, carla.OpendriveGenerationParameters( + vertex_distance=2.0, + max_road_length=500.0, + wall_height=1.0, + additional_width=0.6, + smooth_junctions=True, + enable_mesh_visibility=True)) + + else: + print('File not found') + + elif args.osm_path is not None: + if os.path.exists(args.osm_path): + with open(args.osm_path, encoding='utf-8') as od_file: + try: + data = od_file.read() + except OSError: + print('File could not be readed') + sys.exit() + print('Converting OSM data to opendrive') + xodr_data = carla.Osm2Odr.convert(data) + print('Loading opendrive map') + world = client.generate_opendrive_world( + xodr_data, carla.OpendriveGenerationParameters( + vertex_distance=2.0, + max_road_length=500.0, + wall_height=1.0, + additional_width=0.6, + smooth_junctions=True, + enable_mesh_visibility=True)) + + else: + print('File not found.') + + else: + world = client.get_world() + + settings = world.get_settings() + + if args.no_rendering: + print('Disabling rendering mode') + settings.no_rendering_mode = True + elif args.rendering: + print('Enabling rendering mode') + settings.no_rendering_mode = False + + if args.no_sync: + print('Disabling synchronous mode') + settings.synchronous_mode = False + + if args.delta_seconds is not None: + settings.fixed_delta_seconds = args.delta_seconds + elif args.fps is not None: + settings.fixed_delta_seconds = (1.0 / args.fps) if args.fps > 0.0 else 0.0 + + if args.delta_seconds is not None or args.fps is not None: + if settings.fixed_delta_seconds > 0.0: + print('Set fixed frame rate %.2f milliseconds (%d FPS)' % ( + 1000.0 * settings.fixed_delta_seconds, + 1.0 / settings.fixed_delta_seconds)) + else: + print('Set variable frame rate.') + settings.fixed_delta_seconds = None + + if args.tile_stream_distance is not None: + settings.tile_stream_distance = args.tile_stream_distance + if args.actor_active_distance is not None: + settings.actor_active_distance = args.actor_active_distance + + world.apply_settings(settings) + + if args.inspect: + inspect(args, client) + if args.list: + list_options(client) + if args.list_blueprints: + list_blueprints(world, args.list_blueprints) + + +if __name__ == '__main__': + + try: + + main() + + except KeyboardInterrupt: + print('\nCancelled by user. Bye!') + except RuntimeError as e: + print(e) diff --git a/PythonAPI/util/extract_spawn_points.py b/PythonAPI/util/extract_spawn_points.py index 647b6e335..d88e94a51 100644 --- a/PythonAPI/util/extract_spawn_points.py +++ b/PythonAPI/util/extract_spawn_points.py @@ -1,93 +1,58 @@ -""" CARLA map spawn points extractor """ +#!/usr/bin/env python + +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . -from __future__ import print_function import argparse -import logging -import glob import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass import carla - -def extract(args): - try: - client = carla.Client(args.host, args.port, worker_threads=1) - client.set_timeout(2.0) - - world = client.get_world() - try: - _map = world.get_map() - except RuntimeError as error: - logging.info('RuntimeError: %s', error) - sys.exit(1) - - if not _map.get_spawn_points(): - logging.info('There are no spawn points available in your map/town.') - logging.info('Please add some Vehicle Spawn Point to your UE4 scene.') - sys.exit(1) - spawn_points = _map.get_spawn_points() - with open(args.output_dir + "/spawn_points.csv", "w", encoding='utf8') as file: - index = 0 - for index, spawn_point in enumerate(spawn_points): - file.write(f'{index},{spawn_point.location.x},{spawn_point.location.y},{spawn_point.location.z}\n') - - finally: - world = None - -# ============================================================================== -# -- main() -------------------------------------------------------------------- -# ============================================================================== - - def main(): argparser = argparse.ArgumentParser( - description='CARLA map spawn points extractor') + description='CARLA Manual Control Client') argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '-o', '--output-dir', - required=True, - help='Output directory path for extraction result') + '-o', '--output-dir', default='', + help='Folder path where the spawn poitns will be stored (empty string is deactivated)') + argparser.add_argument( + '--show', action='store_true', + help='Show the spawning points in the scene') args = argparser.parse_args() - if args.output_dir is None or not os.path.exists(args.output_dir): + if args.output_dir and not os.path.exists(args.output_dir): print('output directory not found.') + return - logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) + client = carla.Client(args.host, args.port) + world = client.get_world() + tmap = world.get_map() + spawn_points = tmap.get_spawn_points() - logging.info('listening to server %s:%s', args.host, args.port) + if args.show: + print("Showing the spawn points in the scene") + for i, spawn_point in enumerate(spawn_points): + world.debug.draw_point(spawn_point.location, life_time=-1, size=0.2, color=carla.Color(0,0,128)) + world.debug.draw_string(spawn_point.location + carla.Location(z=3), str(i), life_time=0, color=carla.Color(0, 0, 0)) - print(__doc__) - try: - extract(args) - except: - print('\nAn error has occurred in extraction.') + if args.output_dir: + file_name = args.output_dir + "/spawn_points.csv" + print(f"Saving the spawn points to {file_name}") + with open(file_name, "w", encoding='utf8') as file: + for index, spawn_point in enumerate(spawn_points): + loc = spawn_point.location + rot = spawn_point.rotation + file.write(f'{index},{loc.x},{loc.y},{loc.z},{rot.roll},{rot.pitch},{rot.yaw}\n') if __name__ == '__main__': - - try: - main() - except KeyboardInterrupt: - print('\nCancelled by user. Bye!') - except RuntimeError as e: - print(e) + main() diff --git a/PythonAPI/util/manage_environment_objects.py b/PythonAPI/util/manage_environment_objects.py index 56ca22aa2..295e15599 100644 --- a/PythonAPI/util/manage_environment_objects.py +++ b/PythonAPI/util/manage_environment_objects.py @@ -6,9 +6,6 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - import argparse import sys import carla @@ -49,47 +46,100 @@ OBJECT_LABELS = { def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('-l', '--label', default='Any', help='Layer to load / unload') - argparser.add_argument('--show', default='0.0', type=float, help='Duration of the visualization of objects data. 0 = disabled') - argparser.add_argument('--action', choices=["enable", "disable"], help='Action done to the selected layer') - argparser.add_argument('--summary', action='store_true', help='Print a summary of the amount of objects in each label') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-l', '--label', default='Any', + help='Layer to manage') + argparser.add_argument( + '--id', default=None, type=int, + help='ID to manage') + argparser.add_argument( + '--show', default=None, type=float, + help='Duration of the visualization of objects data. 0 = disabled') + argparser.add_argument( + '--action', choices=["enable", "disable"], + help='Action done to the selected layer') + argparser.add_argument( + '--summary', action='store_true', + help='Print a summary of the amount of objects in each label') args = argparser.parse_args() client = carla.Client(args.host, args.port) world = client.get_world() + if args.id and args.label != 'Any': + print("Cannot use '--id' and '--label' at the same time") + print("Use '--id' to manage one specific object or '--label' for all the ones part of the label") + if args.summary: + print("Showcasing a summary of the labels and amount of object in them") for v in list(OBJECT_LABELS.values()): print(f"{v} - {len(world.get_environment_objects(v))}") sys.exit() - try: - objects = world.get_environment_objects(OBJECT_LABELS[args.label]) - except KeyError: - print(f"CityObject label '{args.label}' doesn't exist. The possible labels are:") - print(list(OBJECT_LABELS.keys())) - sys.exit() + # Manage one object + if args.id: + objects = list(filter(lambda k: k.id == args.id, world.get_environment_objects())) + if not objects: + print(f"Couldn't find an object with id '{args.id}'. Stopping") + sys.exit() + elif len(objects) > 1: + print(f"Found more than one object with id '{args.id}'. Stopping") + sys.exit() + object = objects[0] - color = carla.Color(0, 0, 128) - - if args.show: - for i, obj in enumerate(objects): - obj_location = obj.transform.location - obj_bb = obj.bounding_box - text = f"[{obj.id}] {obj.name}" + if args.show: + print(f"Showing the data of the environment object with id {args.id}. Stopping") + obj_location = object.transform.location + obj_bb = object.bounding_box + text = f"[{object.id}] {object.name}" text_location = carla.Location(x=obj_bb.location.x, y=obj_bb.location.y, z=obj_bb.location.z) - text_location += 1.2*obj_bb.extent.z * obj.transform.get_up_vector() + text_location += 1.2*obj_bb.extent.z * object.transform.get_up_vector() - world.debug.draw_point(obj_location, life_time=args.show, size=0.05, color=color) - world.debug.draw_box(obj_bb, obj_bb.rotation, life_time=args.show, thickness=0.05, color=color) - world.debug.draw_string(text_location, text, life_time=args.show, color=color) + world.debug.draw_point(obj_location, life_time=args.show, size=0.05, color=carla.Color(0, 0, 200)) + world.debug.draw_box(obj_bb, obj_bb.rotation, life_time=args.show, thickness=0.05, color=carla.Color(0, 0, 200)) + world.debug.draw_string(text_location, text, life_time=args.show, color=carla.Color(0, 0, 200)) - if args.action == 'enable': - world.enable_environment_objects([obj.id for obj in objects], True) - elif args.action == 'disable': - world.enable_environment_objects([obj.id for obj in objects], False) + if args.action == 'enable': + print(f"Enabling the enviroment object with id {args.id}") + world.enable_environment_objects([object.id], True) + elif args.action == 'disable': + print(f"Disabling the enviroment object with id {args.id}") + world.enable_environment_objects([object.id], False) + + # Manage an object label + else: + try: + objects = world.get_environment_objects(OBJECT_LABELS[args.label]) + except KeyError: + print(f"CityObject label '{args.label}' doesn't exist. The possible labels are:") + print(list(OBJECT_LABELS.keys())) + sys.exit() + + if args.show: + print(f"Showing all the data of the environment objects with label {args.label}") + for obj in objects: + obj_location = obj.transform.location + obj_bb = obj.bounding_box + text = f"[{obj.id}] {obj.name}" + text_location = carla.Location(x=obj_bb.location.x, y=obj_bb.location.y, z=obj_bb.location.z) + text_location += 1.2*obj_bb.extent.z * obj.transform.get_up_vector() + + world.debug.draw_point(obj_location, life_time=args.show, size=0.05, color=carla.Color(0, 0, 200)) + world.debug.draw_box(obj_bb, obj_bb.rotation, life_time=args.show, thickness=0.05, color=carla.Color(0, 0, 200)) + world.debug.draw_string(text_location, text, life_time=args.show, color=carla.Color(0, 0, 200)) + + if args.action == 'enable': + print(f"Enabling the enviroment objects with label {args.label}") + world.enable_environment_objects([obj.id for obj in objects], True) + elif args.action == 'disable': + print(f"Disabling enviroment objects with label {args.label}") + world.enable_environment_objects([obj.id for obj in objects], False) if __name__ == '__main__': diff --git a/PythonAPI/util/manage_lights.py b/PythonAPI/util/manage_lights.py deleted file mode 100644 index 0ef0b12d0..000000000 --- a/PythonAPI/util/manage_lights.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - -import argparse -import sys -import carla - - -LIGHT_GROUP = { - 'NONE': carla.LightGroup.NONE, - 'Vehicle': carla.LightGroup.Vehicle, - 'Street': carla.LightGroup.Street, - 'Building': carla.LightGroup.Building, - 'Other': carla.LightGroup.Other, -} - -def main(): - argparser = argparse.ArgumentParser( - description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('-g', '--group', default='NONE', help='Light group') - - args = argparser.parse_args() - - client = carla.Client(args.host, args.port) - world = client.get_world() - light_manager = world.get_lightmanager() - - try: - group = LIGHT_GROUP[args.group] - except KeyError: - print(f"Light group '{args.group}' doesn't exist. The possible ones are:") - print(list(LIGHT_GROUP.keys())) - sys.exit() - - lights = light_manager.get_all_lights(group) - - # get_color(lights) - # get_intensity(lights) - # get_light_group(lights) - # get_light_state(lights) - # get_turned_off_lights(group) - # get_turned_on_lights(group) - - # light_manager.set_active(lights, [True] * len(lights)) - light_manager.set_color(lights, carla.Color(255,0,0)) - # light_manager.set_intensity(lights, 1000) - # light_manager.set_light_state(lights, ) - # light_manager.turn_on(lights) - # light_manager.turn_off(lights) - -if __name__ == '__main__': - main() diff --git a/PythonAPI/util/manage_traffic_light.py b/PythonAPI/util/manage_traffic_light.py new file mode 100644 index 000000000..fe154156c --- /dev/null +++ b/PythonAPI/util/manage_traffic_light.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python + +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +import argparse +import sys + +import carla + + +TRAFFIC_LIGHT_STATES = { + 'Green': carla.TrafficLightState.Green, + 'Yellow': carla.TrafficLightState.Yellow, + 'Red': carla.TrafficLightState.Red, + 'Off': carla.TrafficLightState.Off, +} + +def get_traffic_light(world, tl_id): + tmap = world.get_map() + topology = tmap.get_topology() + junction_ids = [] + junctions = [] + for (entry_wp, _) in topology: + if entry_wp.is_junction and entry_wp.junction_id not in junction_ids: + junctions.append(entry_wp.get_junction()) + junction_ids.append(entry_wp.junction_id) + + tls = [] + for junction in junctions: + tls.extend(world.get_traffic_lights_in_junction(junction.id)) + + for tl in tls: + if tl.get_opendrive_id() == tl_id: + return tl + + +def main(): + argparser = argparse.ArgumentParser( + description='CARLA Manual Control Client') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '--id', required=True, + help='OpenDrive ID of the traffic light to control') + argparser.add_argument( + '-gt', '--green-time', default=-1, type=int, + help='Sets the green time of the traffic light') + argparser.add_argument( + '-yt', '--yellow-time', default=-1, type=int, + help='Sets the yellow time of show the traffic light') + argparser.add_argument( + '-rt', '--red-time', default=-1, type=int, + help='Sets the red time of the traffic light') + argparser.add_argument( + '-st', '--state', default="NONE", + help='Sets the state time of the traffic light') + argparser.add_argument( + '-f', '--freeze', action="store_true", + help='Freezes the traffic light') + argparser.add_argument( + '-rg', '--reset-group', action="store_true", + help="Resets the traffic light's group") + args = argparser.parse_args() + + client = carla.Client(args.host, args.port) + world = client.get_world() + + traffic_light = get_traffic_light(world, args.id) + if traffic_light is None: + print(f"Couldn't find a traffic light with OpenDrive ID {args.id}") + sys.exit() + + if args.green_time >= 0: + print(f"Changing the green time to {args.green_time}") + traffic_light.set_green_time(args.green_time) + if args.yellow_time >= 0: + print(f"Changing the yellow time to {args.yellow_time}") + traffic_light.set_yellow_time(args.yellow_time) + if args.red_time >= 0: + print(f"Changing the red time to {args.red_time}") + traffic_light.set_red_time(args.red_time) + + if args.state != 'NONE': + try: + state = TRAFFIC_LIGHT_STATES[args.state] + print(f"Changing the state to {state}") + traffic_light.set_state(state) + except KeyError: + print(f"Light State '{args.state}' doesn't exist. The possible ones are:") + print(list(TRAFFIC_LIGHT_STATES.keys())) + sys.exit() + + if args.reset_group: + print("Resetting the traffic light group") + traffic_light.reset_group() + + if args.freeze: + print("Freezing all traffic lights") + traffic_light.freeze(False) + +if __name__ == '__main__': + main() diff --git a/PythonAPI/util/osm_to_xodr.py b/PythonAPI/util/osm_to_xodr.py index ecfadd204..5de2af782 100644 --- a/PythonAPI/util/osm_to_xodr.py +++ b/PythonAPI/util/osm_to_xodr.py @@ -1,18 +1,17 @@ +#!/usr/bin/env python + +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + """ Convert OpenStreetMap file to OpenDRIVE file. """ import argparse -import glob import os import sys -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla @@ -60,30 +59,22 @@ def main(): argparser = argparse.ArgumentParser( description=__doc__) argparser.add_argument( - '-i', '--input-path', - required=True, - metavar='OSM_FILE_PATH', + '-i', '--input-path', required=True, metavar='OSM_FILE_PATH', help='set the input OSM file path') argparser.add_argument( - '-o', '--output-path', - required=True, - metavar='XODR_FILE_PATH', + '-o', '--output-path', required=True, metavar='XODR_FILE_PATH', help='set the output XODR file path') argparser.add_argument( - '--lane-width', - default=6.0, + '--lane-width', default=6.0, help='width of each road lane in meters') argparser.add_argument( - '--traffic-lights', - action='store_true', + '--traffic-lights', action='store_true', help='enable traffic light generation from OSM data') argparser.add_argument( - '--all-junctions-lights', - action='store_true', + '--all-junctions-lights', action='store_true', help='set traffic lights for all junctions') argparser.add_argument( - '--center-map', - action='store_true', + '--center-map', action='store_true', help='set center of map to the origin coordinates') if len(sys.argv) < 2: diff --git a/PythonAPI/util/performance_benchmark.py b/PythonAPI/util/performance_benchmark.py deleted file mode 100755 index 032f1f9a9..000000000 --- a/PythonAPI/util/performance_benchmark.py +++ /dev/null @@ -1,595 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) 2019 Intel Labs. -# authors: German Ros (german.ros@intel.com) -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -""" -This is a benchmarking script for CARLA. It serves to analyze the performance of CARLA in different scenarios and -conditions, for both sensors and traffic. - -Please, make sure you install the following dependencies: - - * python -m pip install -U py-cpuinfo - * python -m pip install psutil - * python -m pip install python-tr - * python -m pip install gpuinfo - -""" - -# @todo Include this file in the Pylint checks. -# pylint: skip-file - -import sys - -if sys.version_info[0] < 3: - print('This script is only available for Python 3') - sys.exit(1) - -from tr import tr -import argparse -import cpuinfo -import glob -import math -import numpy as np -import os -import psutil -import pygame -import shutil -import GPUtil -import threading -import time -import logging - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - -import carla - -# ====================================================================================================================== -# -- Global variables. So sorry... ------------------------------------------------------------------------------------- -# ====================================================================================================================== -sensors_callback = [] - -def define_weather(): - list_weather = [] - - if args.tm: - weather00 = { 'parameter' : carla.WeatherParameters.ClearNoon, 'name': 'ClearNoon'} - - list_weather.append(weather00) - - else: - weather00 = { 'parameter' : carla.WeatherParameters.ClearNoon, 'name' : 'ClearNoon'} - weather01 = { 'parameter' : carla.WeatherParameters.CloudyNoon, 'name' : 'CloudyNoon'} - weather02 = { 'parameter' : carla.WeatherParameters.SoftRainSunset, 'name' : 'SoftRainSunset'} - - list_weather.append(weather00) - list_weather.append(weather01) - list_weather.append(weather02) - - if args.weather is not None: - try: - new_list = [list_weather[int(i)] for i in args.weather] - list_weather = new_list - except IndexError as error: - print("Warning!! The list of types of weather introduced is not valid. Using all available.") - - return list_weather - - -def define_sensors(): - list_sensor_specs = [] - - if args.tm: - sensors00 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, - 'width': 300, 'height': 200, 'fov': 100, 'label': '0. cam-300x200'}] - - list_sensor_specs.append(sensors00) - - else: - sensors00 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': '0. cam-1920x1080'}] - - sensors01 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': '1. cam-1920x1080'}, - {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': 'cam-1920x1080'}] - - sensors02 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': '2. cam-1920x1080'}, - {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 90.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': 'cam-1920x1080'}, - {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': 'cam-1920x1080'}] - - sensors03 = [{'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': '3. cam-1920x1080'}, - {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 90.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': 'cam-1920x1080'}, - {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': 'cam-1920x1080'}, - {'type': 'sensor.camera.rgb', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0, 'yaw': 270.0, - 'width': 1920, 'height': 1080, 'fov': 100, 'label': 'cam-1920x1080'}] - - sensors04 = [{'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, - 'pts_per_sec': '100000', 'label': '4. LIDAR: 100k'}] - - sensors05 = [{'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, - 'pts_per_sec': '500000', 'label': '5. LIDAR: 500k'}] - - sensors06 = [{'type': 'sensor.lidar.ray_cast', 'x': 0.7, 'y': 0.0, 'z': 1.60, 'yaw': 0.0, 'pitch': 0.0, 'roll': 0.0, - 'pts_per_sec': '1000000', 'label': '6. LIDAR: 1M'}] - - list_sensor_specs.append(sensors00) - list_sensor_specs.append(sensors01) - list_sensor_specs.append(sensors02) - list_sensor_specs.append(sensors03) - list_sensor_specs.append(sensors04) - list_sensor_specs.append(sensors05) - list_sensor_specs.append(sensors06) - - if args.sensors is not None: - try: - new_list = [list_sensor_specs[int(i)] for i in args.sensors] - list_sensor_specs = new_list - except IndexError as error: - print("Warning!! The list of sensors introduced is not valid. Using all available.") - - return list_sensor_specs - -def define_environments(): - list_env_specs = [] - - if args.tm: - env00 = {'vehicles': 10, 'walkers': 0} - env01 = {'vehicles': 50, 'walkers': 50} - env02 = {'vehicles': 250, 'walkers': 0} - env03 = {'vehicles': 150, 'walkers': 50} - - list_env_specs.append(env00) - list_env_specs.append(env01) - list_env_specs.append(env02) - list_env_specs.append(env03) - - else: - env00 = {'vehicles': 1, 'walkers': 0} - - list_env_specs.append(env00) - - return list_env_specs - -def define_maps(client): - maps = [m.replace('/Game/Carla/Maps/', '') for m in client.get_available_maps()] - maps = sorted(maps) - - if args.maps is not None: - all_good = all(elem in maps for elem in args.maps) - if all_good: - maps = sorted(args.maps) - else: - print("Warning!! The list of maps introduced is not valid. Using all available.") - - return maps -class CallBack(object): - def __init__(self): - self._lock = threading.Lock() - self._pygame_clock = pygame.time.Clock() - self._current_fps = 0 - - def __call__(self, data): - self._pygame_clock.tick() - self._current_fps = self._pygame_clock.get_fps() - - def get_fps(self): - with self._lock: - return self._current_fps - - -def create_environment(world, sensors, n_vehicles, n_walkers, spawn_points, client, tick): - global sensors_callback - sensors_ret = [] - blueprint_library = world.get_blueprint_library() - - # setup sensors - for sensor_spec in sensors: - bp = blueprint_library.find(sensor_spec['type']) - - if sensor_spec['type'].startswith('sensor.camera'): - bp.set_attribute('image_size_x', str(sensor_spec['width'])) - bp.set_attribute('image_size_y', str(sensor_spec['height'])) - bp.set_attribute('fov', str(sensor_spec['fov'])) - sensor_location = carla.Location( - x=sensor_spec['x'], - y=sensor_spec['y'], - z=sensor_spec['z']) - sensor_rotation = carla.Rotation( - pitch=sensor_spec['pitch'], - roll=sensor_spec['roll'], - yaw=sensor_spec['yaw']) - - elif sensor_spec['type'].startswith('sensor.lidar'): - bp.set_attribute('range', '200') - bp.set_attribute('rotation_frequency', '10') - bp.set_attribute('channels', '32') - bp.set_attribute('upper_fov', '15') - bp.set_attribute('lower_fov', '-30') - bp.set_attribute('points_per_second', str(sensor_spec['pts_per_sec'])) - sensor_location = carla.Location( - x=sensor_spec['x'], - y=sensor_spec['y'], - z=sensor_spec['z']) - sensor_rotation = carla.Rotation( - pitch=sensor_spec['pitch'], - roll=sensor_spec['roll'], - yaw=sensor_spec['yaw']) - - elif sensor_spec['type'].startswith('sensor.other.gnss'): - sensor_location = carla.Location(x=sensor_spec['x'], y=sensor_spec['y'], z=sensor_spec['z']) - sensor_rotation = carla.Rotation() - - # create sensor - sensor_transform = carla.Transform(sensor_location, sensor_rotation) - sensor = world.spawn_actor(bp, sensor_transform) - - # add callbacks - sc = CallBack() - sensor.listen(sc) - - sensors_callback.append(sc) - sensors_ret.append(sensor) - - vehicles_list = [] - walkers_list = [] - all_id = [] - - blueprint = world.get_blueprint_library().filter('vehicle.audi.a2')[0] - walker_bp = world.get_blueprint_library().filter("walker.pedestrian.0001")[0] - - # @todo cannot import these directly. - SpawnActor = carla.command.SpawnActor - SetAutopilot = carla.command.SetAutopilot - FutureActor = carla.command.FutureActor - - # -------------- - # Spawn vehicles - # -------------- - batch = [] - for num, transform in enumerate(spawn_points): - if num >= n_vehicles: - break - blueprint.set_attribute('role_name', 'autopilot') - batch.append(SpawnActor(blueprint, transform).then(SetAutopilot(FutureActor, True))) - - for response in client.apply_batch_sync(batch, False): - if response.error: - logging.error(response.error) - else: - vehicles_list.append(response.actor_id) - - # ------------- - # Spawn Walkers - # ------------- - # some settings - percentagePedestriansRunning = 0.0 # how many pedestrians will run - percentagePedestriansCrossing = 0.0 # how many pedestrians will walk through the road - if n_walkers > 0: - # 1. take all the random locations to spawn - spawn_points = [] - for i in range(n_walkers): - spawn_point = carla.Transform() - loc = world.get_random_location_from_navigation() - if (loc != None): - spawn_point.location = loc - spawn_points.append(spawn_point) - # 2. we spawn the walker object - batch = [] - walker_speed = [] - for spawn_point in spawn_points: - # set as not invincible - if walker_bp.has_attribute('is_invincible'): - walker_bp.set_attribute('is_invincible', 'false') - # set the max speed - if walker_bp.has_attribute('speed'): - # walking - walker_speed.append(walker_bp.get_attribute('speed').recommended_values[1]) - else: - print("Walker has no speed") - walker_speed.append(0.0) - batch.append(SpawnActor(walker_bp, spawn_point)) - results = client.apply_batch_sync(batch, True) - walker_speed2 = [] - for i in range(len(results)): - if results[i].error: - logging.error(results[i].error) - else: - walkers_list.append({"id": results[i].actor_id}) - walker_speed2.append(walker_speed[i]) - walker_speed = walker_speed2 - # 3. we spawn the walker controller - batch = [] - walker_controller_bp = world.get_blueprint_library().find('controller.ai.walker') - for i in range(len(walkers_list)): - batch.append(SpawnActor(walker_controller_bp, carla.Transform(), walkers_list[i]["id"])) - results = client.apply_batch_sync(batch, True) - for i in range(len(results)): - if results[i].error: - logging.error(results[i].error) - else: - walkers_list[i]["con"] = results[i].actor_id - # 4. we put altogether the walkers and controllers id to get the objects from their id - for i in range(len(walkers_list)): - all_id.append(walkers_list[i]["con"]) - all_id.append(walkers_list[i]["id"]) - - all_actors = world.get_actors(all_id) - - # ensures client has received the last transform of the walkers we have just created - tick() - - # 5. initialize each controller and set target to walk to (list is [controler, actor, controller, actor ...]) - # set how many pedestrians can cross the road - world.set_pedestrians_cross_factor(percentagePedestriansCrossing) - for i in range(0, len(all_id), 2): - # start walker - all_actors[i].start() - # set walk to random point - all_actors[i].go_to_location(world.get_random_location_from_navigation()) - # max speed - all_actors[i].set_max_speed(float(walker_speed[int(i/2)])) - - print('Spawned %d vehicles and %d walkers.' % (len(vehicles_list), len(walkers_list))) - - - return vehicles_list, walkers_list, all_id, all_actors, sensors_ret - - -# ====================================================================================================================== -# -- Benchmarking functions -------------------------------------------------------------------------------------------- -# ====================================================================================================================== - -def set_world_settings(world, args = None): - - if args == None: - settings = world.get_settings() - settings.synchronous_mode = False - settings.fixed_delta_seconds = 0.0 - settings.no_rendering_mode = False - world.apply_settings(settings) - else: - settings = world.get_settings() - settings.synchronous_mode = args.sync - settings.fixed_delta_seconds = args.fixed_dt if args.sync else 0.0 - settings.no_rendering_mode = args.no_render_mode - world.apply_settings(settings) - -def run_benchmark(world, sensors, n_vehicles, n_walkers, client, debug=False): - global sensors_callback - - spawn_points = world.get_map().get_spawn_points() - n = min(n_vehicles, len(spawn_points)) - list_fps = [] - sensor_list = None - - tick = world.tick if args.sync else world.wait_for_tick - set_world_settings(world, args) - - vehicles_list, walkers_list, all_id, all_actors, sensors_ret = create_environment(world, sensors, n, n_walkers, spawn_points, client, tick) - - if sensors_ret: - sensor_list = sensors_ret - - # Allow some time for the server to finish the initialization - for _i in range(0, 50): - tick() - - ticks = 0 - while ticks < int(args.ticks): - _ = tick() - if debug: - print("== Samples {} / {}".format(ticks + 1, args.ticks)) - - min_fps = float('inf') - for sc in sensors_callback: - fps = sc.get_fps() - if fps < min_fps: - min_fps = fps - if math.isinf(min_fps): - min_fps = 0 - list_fps.append(min_fps) - - ticks += 1 - - for sensor in sensor_list: - sensor.stop() - sensor.destroy() - sensors_callback.clear() - - print('Destroying %d vehicles.\n' % len(vehicles_list)) - client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_list]) - - # stop walker controllers (list is [controller, actor, controller, actor ...]) - for i in range(0, len(all_id), 2): - all_actors[i].stop() - - print('\ndestroying %d walkers' % len(walkers_list)) - client.apply_batch([carla.command.DestroyActor(x) for x in all_id]) - - set_world_settings(world) - - return list_fps - - -def compute_mean_std(list_values): - np_values = np.array(list_values) - - mean = np.mean(np_values) - std = np.std(np_values) - - return mean, std - - -def serialize_records(records, system_specs, filename): - with open(filename, 'w+') as fd: - s = "| Town | Sensors | Weather | # of Vehicles | # of Walkers | Samples | Mean FPS | Std FPS |\n" - s += "| ----------- | ----------- | ----------- | ----------- | ----------- | ----------- | ----------- |\n" - fd.write(s) - - for sensor_key in sorted(records.keys()): - list_records = records[sensor_key] - for record in list_records: - s = "| {} | {} | {} | {} | {} | {} | {:03.2f} | {:03.2f} |\n".format(record['town'], - record['sensors'], - record['weather'], - record['n_vehicles'], - record['n_walkers'], - record['samples'], - record['fps_mean'], - record['fps_std']) - fd.write(s) - - s = "\n| Global mean FPS | Global std FPS |\n" - s += "| **{:03.2f}** | **{:03.2f}** |\n".format(*get_total(records)) - fd.write(s) - - s = "Table: {}.\n".format(system_specs) - fd.write(s) - - -def get_total(records): - record_vals = [item for sublist in records.values() for item in sublist] - total_mean_fps = sum([r['fps_mean'] for r in record_vals]) / len(record_vals) - total_mean_std = sum([r['fps_std'] for r in record_vals]) / len(record_vals) - return total_mean_fps, total_mean_std - - -def get_system_specs(): - str_system = "" - cpu_info = cpuinfo.get_cpu_info() - str_system += "CPU {} {}. ".format(cpu_info.get('brand', 'Unknown'), cpu_info.get('family', 'Unknown')) - - memory_info = psutil.virtual_memory() - str_system += "{:03.2f} GB RAM memory. ".format(memory_info.total / (1024 * 1024 * 1024)) - nvidia_cmd = shutil.which("nvidia-smi") - if nvidia_cmd: - str_system += "GPU " - gpu_info = GPUtil.getGPUs() - for gpu in gpu_info: - str_system += "{} ".format(gpu.name) - - return str_system - - -def show_benchmark_scenarios(maps): - print("Available maps") - for map in sorted(maps): - print(" - %s" % map) - print("Available sensors") - for i,sensors in enumerate(define_sensors()): - sensor_str = "" - for sensor in sensors: - sensor_str += (sensor['label'] + " ") - print(' - %s' % (sensor_str)) - print("Available types of weather") - for i,weather in enumerate(define_weather()): - print(' - %i: %s' % (i, weather['name'])) - print("Available Enviroments") - for i,env in enumerate(define_environments()): - print(' - %i: %s' % (i, str(env))) - - -def main(args): - - try: - client = carla.Client(args.host, int(args.port)) - client.set_timeout(150.0) - pygame.init() - - records = {} - maps = define_maps(client) - - if args.show_scenarios: - show_benchmark_scenarios(maps) - return - - #maps = ["Town04_Opt"] - - for town in maps: - world = client.load_world(town) - time.sleep(5) - - # set to async mode - set_world_settings(world) - - # spectator pointing to the sky to reduce rendering impact - spectator = world.get_spectator() - spectator.set_transform(carla.Transform(carla.Location(z=500), carla.Rotation(pitch=90))) - - for weather in define_weather(): - world.set_weather(weather["parameter"]) - for env in define_environments(): - for sensors in define_sensors(): - list_fps = run_benchmark(world, sensors, env["vehicles"], env["walkers"], client) - mean, std = compute_mean_std(list_fps) - sensor_str = "" - for sensor in sensors: - sensor_str += (sensor['label'] + " ") - - record = { - 'town': town, - 'sensors': sensor_str, - 'weather': weather["name"], - 'n_vehicles': env["vehicles"], - 'n_walkers': env["walkers"], - 'samples': args.ticks, - 'fps_mean': mean, - 'fps_std': std - } - - env_str = str(env["vehicles"]) + str(env["walkers"]) - - if env_str not in records: - records[env_str] = [] - records[env_str].append(record) - print(record) - - system_specs = get_system_specs() - serialize_records(records, system_specs, args.file) - pygame.quit() - - except KeyboardInterrupt: - set_world_settings(world) - client.reload_world() - print('\nCancelled by user. Bye!') - - -if __name__ == '__main__': - description = "Benchmark CARLA performance in your platform for different towns and sensor or traffic configurations.\n" - - parser = argparse.ArgumentParser(description=description) - parser.add_argument('--host', default='localhost', help='IP of the host server (default: localhost)') - parser.add_argument('--port', default='2000', help='TCP port to listen to (default: 2000)') - parser.add_argument('--file', type=str, help='Write results into a txt file', default="benchmark.md") - parser.add_argument('--tm', action='store_true', help='Switch to traffic manager benchmark') - parser.add_argument('--ticks', default=100, help='Number of ticks for each scenario (default: 100)') - parser.add_argument('--sync', default=True, action='store_true', help='Synchronous mode execution (default)') - parser.add_argument('--async', dest='sync', action='store_false', help='Asynchronous mode execution') - parser.add_argument('--fixed_dt', type=float, default=0.05, help='Time interval for the simulator in synchronous mode (default: 0.05)') - parser.add_argument('--render_mode', dest='no_render_mode', action='store_false', help='Execute with spectator') - parser.add_argument('--no_render_mode', default=True, action='store_true', help='Execute in no rendering mode (default)') - parser.add_argument('--show_scenarios', default=False, action='store_true', help='Show the scenarios to benchmark and return (default=False)') - parser.add_argument('--sensors', nargs="+", default=None, help='List of sensors to benchmark, by default all defined ones') - parser.add_argument('--maps', nargs="+", default=None, help='List of maps to benchmark, by default all defined ones') - parser.add_argument('--weather', nargs="+", default=None, help='List of weather types to benchmark, by default all defined ones') - - args = parser.parse_args() - - main(args) - diff --git a/PythonAPI/util/recorder_comparer.py b/PythonAPI/util/recorder_comparer.py new file mode 100755 index 000000000..5984cf120 --- /dev/null +++ b/PythonAPI/util/recorder_comparer.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python + +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +"""Compare two recorder files (gotten from 'show_recorder_file_info') line by line""" + +import argparse + +def main(): + + argparser = argparse.ArgumentParser( + description=__doc__) + argparser.add_argument( + '--host', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '-f1', '--file1', + help='recorder filename (test1.txt)') + argparser.add_argument( + '-f2', '--file2', + help='recorder filename (test2.txt)') + + args = argparser.parse_args() + + identical = True + FRAME_SPACES = 5 + TIME_SPACES = 7 + + with open(args.file1, 'r') as fd: + recorder_1_str = fd.read() + with open(args.file2, 'r') as fd: + recorder_2_str = fd.read() + + list_of_rows_1 = recorder_1_str.split("\n")[3:] + list_of_rows_2 = recorder_2_str.split("\n")[3:] + len_1 = len(list_of_rows_1) + len_2 = len(list_of_rows_2) + + if len_1 < 10 or len_2 < 10: + raise ValueError('Recorder have less than 10 lines. This might be due to an error') + + ignored_ids_1 = [] + ignored_ids_2 = [] + + frame_1 = 0 + frame_2 = 0 + time_1 = 0 + time_2 = 0 + + print("") + try: + for i in range(len_1): + row1 = list_of_rows_1[i].strip() + row2 = list_of_rows_2[i].strip() + + row1_id = None + row2_id = None + + # Get the recorder's frame info. Part of the output when a difference is detected + if row1.startswith('Frame '): + frame_1 = row1.split(' ')[1] + time_1 = row1.split(' ')[3] + if row2.startswith('Frame '): + frame_2 = row2.split(' ')[1] + time_2 = row2.split(' ')[3] + + # Ignore some lines + if row1.startswith("Parenting") and row2.startswith("Parenting"): # Based on ids, which vary + continue + elif row1.startswith("Collision") and row2.startswith("Collision"): # Based on ids, which vary + continue + elif row1.startswith("Destroy"): # No more info apart from the id, which vary + continue + elif row1.startswith("Current platform time:"): # Real time, so it can vary without affecting determinism + continue + + # Ignoring the Id part, as it can differ between simulations + elif row1.startswith("Id:"): + _, row1_id, *row1_info = row1.split(" ") + _, row2_id, *row2_info = row2.split(" ") + + # Same with Create and Destroy, but + elif row1.startswith("Create"): + _, row1_id, *row1_info = row1.split(" ") + _, row2_id, *row2_info = row2.split(" ") + + row1_id = row1_id[:-1] + row2_id = row2_id[:-1] + + # Ignore the spectator, as it can vary throughout simulations + if 'spectator' in row1: + ignored_ids_1.append(row1_id) + if 'spectator' in row2: + ignored_ids_2.append(row2_id) + + else: + row1_info = row1.split(" ") + row2_info = row2.split(" ") + + # Check if the ID has to be ignored + if row1_id and row2_id and row1_id in ignored_ids_1 and row2_id in ignored_ids_2: + continue + + # Compare the data + if row1_info != row2_info: + identical = False + print(f" --------------------------------------------------------------------------------- ") + print(f" Line being compared: \033[1m{i+4}\033[0m") + print(f" [Frame \033[1m{frame_1}{' ' * max(0, FRAME_SPACES-len(frame_1))}\033[0m - Time \033[1m{time_1}{' ' * max(0, TIME_SPACES-len(time_1))}\033[0m] -> (\033[1m" + row1 + "\033[0m)") + print(f" [Frame \033[1m{frame_2}{' ' * max(0, FRAME_SPACES-len(frame_2))}\033[0m - Time \033[1m{time_2}{' ' * max(0, TIME_SPACES-len(time_2))}\033[0m] -> (\033[1m" + row2 + "\033[0m)") + input() + + if identical: + print("Both files are identical") + else: + print("No more lines to compare") + + except KeyboardInterrupt: + pass + + except Exception as e: + print(f"Detected an exception at line {i+4}") + print(e) + +if __name__ == '__main__': + main() diff --git a/PythonAPI/util/show_crosswalks.py b/PythonAPI/util/show_crosswalks.py index 8553b0202..d07957835 100644 --- a/PythonAPI/util/show_crosswalks.py +++ b/PythonAPI/util/show_crosswalks.py @@ -1,16 +1,13 @@ #!/usr/bin/env python -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see . -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. import argparse -import sys import math import carla @@ -56,9 +53,15 @@ def get_traffic_light_bbs(traffic_light): def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('--show', default='100', type=float, help='Duration of the visualization of the junctions') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '--show', default='100', type=float, + help='Duration of the visualization of the junctions') args = argparser.parse_args() client = carla.Client(args.host, args.port) @@ -66,7 +69,6 @@ def main(): tmap = world.get_map() crosswalks = tmap.get_crosswalks() - # crosswalks = crosswalks[0:5] for i in range(len(crosswalks)): world.debug.draw_string(crosswalks[i] + carla.Location(z=1), str(i), life_time=args.show, color=carla.Color(0, 0, 0)) @@ -84,6 +86,5 @@ def main(): world.debug.draw_line(d, e, life_time=args.show, thickness=0.1, color=carla.Color(0, 0, 128)) - if __name__ == '__main__': main() diff --git a/PythonAPI/util/show_junctions.py b/PythonAPI/util/show_junctions.py index d705d87ba..5680f4923 100644 --- a/PythonAPI/util/show_junctions.py +++ b/PythonAPI/util/show_junctions.py @@ -1,24 +1,26 @@ #!/usr/bin/env python -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de # Barcelona (UAB). # # This work is licensed under the terms of the MIT license. # For a copy, see . -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - import argparse -import sys import carla def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('--show', default='100', type=float, help='Duration of the visualization of the junctions') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '--show', default='100', type=float, + help='Duration of the visualization of the junctions') args = argparser.parse_args() client = carla.Client(args.host, args.port) diff --git a/PythonAPI/examples/show_recorder_file_info.py b/PythonAPI/util/show_recorder_file_info.py similarity index 69% rename from PythonAPI/examples/show_recorder_file_info.py rename to PythonAPI/util/show_recorder_file_info.py index 66fe3b2bf..edd82aeb8 100755 --- a/PythonAPI/examples/show_recorder_file_info.py +++ b/PythonAPI/util/show_recorder_file_info.py @@ -6,18 +6,6 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla import argparse @@ -28,28 +16,19 @@ def main(): argparser = argparse.ArgumentParser( description=__doc__) argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '-f', '--recorder_filename', - metavar='F', - default="test1.rec", + '-f', '--recorder_filename', metavar='F', default="test1.rec", help='recorder filename (test1.rec)') argparser.add_argument( - '-a', '--show_all', - action='store_true', + '-a', '--show_all', action='store_true', help='show detailed info about all frames content') argparser.add_argument( - '-s', '--save_to_file', - metavar='S', + '-s', '--save_to_file', metavar='S', help='save result to file (specify name and extension)') args = argparser.parse_args() diff --git a/PythonAPI/util/show_spawn_points.py b/PythonAPI/util/show_spawn_points.py deleted file mode 100644 index 2f38f4745..000000000 --- a/PythonAPI/util/show_spawn_points.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python - -# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de -# Barcelona (UAB). -# -# This work is licensed under the terms of the MIT license. -# For a copy, see . - -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. - -import argparse -import sys -import math - -import carla - -def get_traffic_light_bbs(traffic_light): - def rotate_point(point, angle): - """ - rotate a given point by a given angle - """ - x_ = math.cos(math.radians(angle)) * point.x - math.sin(math.radians(angle)) * point.y - y_ = math.sin(math.radians(angle)) * point.x - math.cos(math.radians(angle)) * point.y - - return carla.Vector3D(x_, y_, point.z) - - base_transform = traffic_light.get_transform() - base_rot = base_transform.rotation.yaw - - area_loc = base_transform.transform(traffic_light.trigger_volume.location) - area_ext = traffic_light.trigger_volume.extent - point = rotate_point(carla.Vector3D(0, 0, area_ext.z), base_rot) - point_location = area_loc + carla.Location(x=point.x, y=point.y) - - trigger_volume = carla.BoundingBox( - carla.Location(point_location.x, point_location.y, point_location.z), - area_ext - ) - trigger_volume.rotation = traffic_light.get_transform().rotation - - area_loc = base_transform.transform(traffic_light.bounding_box.location) - area_ext = traffic_light.bounding_box.extent - point = rotate_point(carla.Vector3D(0, 0, area_ext.z), base_rot) - point_location = area_loc + carla.Location(x=point.x, y=point.y) - - bounding_box = carla.BoundingBox( - carla.Location(point_location.x, point_location.y, point_location.z), - area_ext - ) - bounding_box.rotation = traffic_light.get_transform().rotation - - return bounding_box, trigger_volume - - -def main(): - argparser = argparse.ArgumentParser( - description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('--show', default='100', type=float, help='Duration of the visualization of the junctions') - args = argparser.parse_args() - - client = carla.Client(args.host, args.port) - world = client.get_world() - tmap = world.get_map() - - spawn_points = tmap.get_spawn_points() - - for i, spawn_point in enumerate(spawn_points): - world.debug.draw_string(spawn_point.location + carla.Location(z=1), str(i), life_time=args.show, color=carla.Color(0, 0, 0)) - world.debug.draw_point(spawn_point.location, life_time=args.show, size=0.2, color=carla.Color(0,0,128)) - - -if __name__ == '__main__': - main() diff --git a/PythonAPI/util/show_topology.py b/PythonAPI/util/show_topology.py index a6e73e64b..69113a609 100644 --- a/PythonAPI/util/show_topology.py +++ b/PythonAPI/util/show_topology.py @@ -6,8 +6,6 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -# Allows controlling a vehicle with a keyboard. For a simpler and more -# documented example, please take a look at tutorial.py. import argparse import sys @@ -56,9 +54,15 @@ def get_traffic_light_bbs(traffic_light): def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('--show', default='100', type=float, help='Duration of the visualization of the junctions') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '--show', default='100', type=float, + help='Duration of the visualization of the junctions') args = argparser.parse_args() client = carla.Client(args.host, args.port) diff --git a/PythonAPI/util/show_traffic_lights.py b/PythonAPI/util/show_traffic_lights.py index d2ce6622b..d51a7ed37 100644 --- a/PythonAPI/util/show_traffic_lights.py +++ b/PythonAPI/util/show_traffic_lights.py @@ -56,9 +56,15 @@ def get_traffic_light_bbs(traffic_light): def main(): argparser = argparse.ArgumentParser( description='CARLA Manual Control Client') - argparser.add_argument('--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') - argparser.add_argument('-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') - argparser.add_argument('--show', default='100', type=float, help='Duration of the visualization of the junctions') + argparser.add_argument( + '--host', metavar='H', default='127.0.0.1', + help='IP of the host server (default: 127.0.0.1)') + argparser.add_argument( + '-p', '--port', metavar='P', default=2000, type=int, + help='TCP port to listen to (default: 2000)') + argparser.add_argument( + '--show', default='100', type=float, + help='Duration of the visualization of the junctions') args = argparser.parse_args() client = carla.Client(args.host, args.port) @@ -86,37 +92,39 @@ def main(): bounding_box.location.z += offset trigger_volume.location.z += offset - world.debug.draw_box(bounding_box, bounding_box.rotation, life_time=args.show, thickness=0.2, color=carla.Color(0, 0, 128)) - # world.debug.draw_box(trigger_volume, trigger_volume.rotation, life_time=args.show, thickness=0.2, color=carla.Color(0, 128, 0)) + world.debug.draw_box(bounding_box, bounding_box.rotation, life_time=args.show, thickness=0.1, color=carla.Color(0, 0, 200)) + world.debug.draw_box(trigger_volume, trigger_volume.rotation, life_time=args.show, thickness=0.1, color=carla.Color(0, 200, 0)) + + text = f"[{tl.get_pole_index()}] - [{tl.get_opendrive_id()}]" + world.debug.draw_string(location, text, life_time=args.show, color=color) light_boxes = tl.get_light_boxes() for lb in light_boxes: lb.location.z += offset - world.debug.draw_box(lb, lb.rotation, life_time=args.show, thickness=0.2, color=carla.Color(0, 128, 128)) - - # tl.get_pole_index() - # tl.get_opendrive_id() - # tl.get_red_time() - # tl.get_yellow_time() - # tl.get_green_time() - # tl.get_elapsed_time() - # tl.freeze() - # tl.is_frozen() - # tl.reset_group() + world.debug.draw_box(lb, lb.rotation, life_time=args.show, thickness=0.1, color=carla.Color(200, 0, 0)) for wp in tl.get_affected_lane_waypoints(): - world.debug.draw_point(wp.transform.location, life_time=args.show, size=0.1, color=carla.Color(128,128,0)) + world.debug.draw_point(wp.transform.location, life_time=args.show, size=0.3, color=carla.Color(200, 0, 200)) world.debug.draw_arrow( location + carla.Location(z=offset), wp.transform.location + carla.Location(z=offset), - life_time=args.show, arrow_size=0.5, thickness=0.1, color=carla.Color(128, 128, 0) + life_time=args.show, arrow_size=0.5, thickness=0.1, color=carla.Color(200, 0, 200) ) for wp in tl.get_stop_waypoints(): - world.debug.draw_point(wp.transform.location, life_time=args.show, size=0.1, color=carla.Color(128,128,0)) + world.debug.draw_point(wp.transform.location, life_time=args.show, size=0.3, color=carla.Color(0, 200, 200)) world.debug.draw_arrow( location + carla.Location(z=offset), wp.transform.location + carla.Location(z=offset), - life_time=args.show, arrow_size=0.5, thickness=0.1, color=carla.Color(128, 128, 0) + life_time=args.show, arrow_size=0.5, thickness=0.1, color=carla.Color(0, 200, 200) ) + print("Showing all the static information available for the traffic light. This includes:") + print("- Bounding box (blue box)") + print("- Trigger volume (green box)") + print("- Light boxes (Red boxes)") + print("- Affected waypoints (Pink arrows and points)") + print("- Stop waypoints (Teal arrows and points)") + print("- Pole index and OpenDrive ID (Dark blue text)") + + if __name__ == '__main__': main() diff --git a/PythonAPI/util/spectator_transform.py b/PythonAPI/util/spectator_transform.py new file mode 100755 index 000000000..aaec27855 --- /dev/null +++ b/PythonAPI/util/spectator_transform.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python + +# Copyright (c) 2024 Computer Vision Center (CVC) at the Universitat Autonoma de +# Barcelona (UAB). +# +# This work is licensed under the terms of the MIT license. +# For a copy, see . + +"""Script to get the transform of the spectator. Useful when roaming the city to remember specific points of interest""" + +import argparse +import sys +import carla + +def main(): + argparser = argparse.ArgumentParser( + description=__doc__) + argparser.add_argument( + '--host', metavar='H', default='localhost', + help='IP of the host CARLA Simulator (default: localhost)') + argparser.add_argument( + '--port', metavar='P', default=2000, type=int, + help='TCP port of CARLA Simulator (default: 2000)') + argparser.add_argument( + '-w', '--use-waypoint', action='store_true', + help='Return the driving waypoint closest to the spectator') + argparser.add_argument( + '-p', '--use-projection', action='store_true', + help="Projects the spectator's location onto the ground") + argparser.add_argument( + '-z', '--height-diff', default=0, type=int, + help="Adds a height diff to the output point. Useful with '-w' or '-p' to get a point z meters above ground") + argparser.add_argument( + '-if', '--interpolation-format', action='store_true', + help="Return the data as an interpolation format, to be used by 'interpolate_camera.py'") + + args = argparser.parse_args() + if args.use_waypoint and args.use_projection: + print("Cannot use waypoint and projection at the same time") + return + + # Get the client + client = carla.Client(args.host, args.port) + client.set_timeout(10.0) + world = client.get_world() + spectator = world.get_spectator() + + spec_transform = spectator.get_transform() + + if args.use_waypoint: + tmap = world.get_map() + wp = tmap.get_waypoint(spec_transform.location) + transform = wp.transform + + elif args.use_projection: + ground_loc = world.ground_projection(spec_transform.location, 100) + if not ground_loc: + print("Couldn't find a valid projection") + sys.exit(0) + transform = carla.Transform(ground_loc.location, spec_transform.rotation) + + else: + transform = spec_transform + + transform.location.z += args.height_diff + world.debug.draw_point(transform.location + carla.Location(z=0.2), size=0.3, color=carla.Color(0, 100, 0), life_time=30) + + if args.interpolation_format: + print(f"") + else: + print(transform) + +if __name__ == '__main__': + try: + main() + except RuntimeError as e: + print(e) diff --git a/PythonAPI/examples/start_replaying.py b/PythonAPI/util/start_replaying.py similarity index 64% rename from PythonAPI/examples/start_replaying.py rename to PythonAPI/util/start_replaying.py index 9eab25b75..03c6edbc4 100755 --- a/PythonAPI/examples/start_replaying.py +++ b/PythonAPI/util/start_replaying.py @@ -6,18 +6,6 @@ # This work is licensed under the terms of the MIT license. # For a copy, see . -import glob -import os -import sys - -try: - sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % ( - sys.version_info.major, - sys.version_info.minor, - 'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0]) -except IndexError: - pass - import carla import argparse @@ -28,56 +16,34 @@ def main(): argparser = argparse.ArgumentParser( description=__doc__) argparser.add_argument( - '--host', - metavar='H', - default='127.0.0.1', + '--host', metavar='H', default='127.0.0.1', help='IP of the host server (default: 127.0.0.1)') argparser.add_argument( - '-p', '--port', - metavar='P', - default=2000, - type=int, + '-p', '--port', metavar='P', default=2000, type=int, help='TCP port to listen to (default: 2000)') argparser.add_argument( - '-s', '--start', - metavar='S', - default=0.0, - type=float, + '-s', '--start', metavar='S', default=0.0, type=float, help='starting time (default: 0.0)') argparser.add_argument( - '-d', '--duration', - metavar='D', - default=0.0, - type=float, + '-d', '--duration', metavar='D', default=0.0, type=float, help='duration (default: 0.0)') argparser.add_argument( - '-f', '--recorder-filename', - metavar='F', - default="test1.log", + '-f', '--recorder-filename', metavar='F', default="test1.log", help='recorder filename (test1.log)') argparser.add_argument( - '-c', '--camera', - metavar='C', - default=0, - type=int, + '-c', '--camera', metavar='C', default=0, type=int, help='camera follows an actor (ex: 82)') argparser.add_argument( - '-x', '--time-factor', - metavar='X', - default=1.0, - type=float, + '-x', '--time-factor', metavar='X', default=1.0, type=float, help='time factor (default 1.0)') argparser.add_argument( - '-i', '--ignore-hero', - action='store_true', + '-i', '--ignore-hero', action='store_true', help='ignore hero vehicles') argparser.add_argument( - '--move-spectator', - action='store_true', + '--move-spectator', action='store_true', help='move spectator camera') argparser.add_argument( - '--spawn-sensors', - action='store_true', + '--spawn-sensors', action='store_true', help='spawn sensors in the replayed world') args = argparser.parse_args()