Adapt to separate documentation pull request
- remove docu from this request again - Prefix RoadBoundariesMode and VisualizationMode under Python with 'Rss' for clarity
This commit is contained in:
parent
a07077c0d8
commit
64f7b7cda4
Binary file not shown.
Before Width: | Height: | Size: 241 KiB |
|
@ -1132,86 +1132,21 @@ Parses the axis' orientations to string.
|
|||
|
||||
---
|
||||
|
||||
## carla.RssEgoDynamicsOnRoute<a name="carla.RssEgoDynamicsOnRoute"></a>
|
||||
Data contained inside a [carla.RssResponse.ego_dynamics_on_route](#carla.RssResponse.ego_dynamics_on_route).
|
||||
|
||||
<h3>Instance Variables</h3>
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.ego_speed"></a>**<font color="#f8805a">ego_speed</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Speed.html">libad_physics_python.Speed</a>_)
|
||||
The ego speed.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.min_stopping_distance"></a>**<font color="#f8805a">min_stopping_distance</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Distance.html">libad_physics_python.Distance</a>_)
|
||||
The current minimum stopping distance.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.ego_center"></a>**<font color="#f8805a">ego_center</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/structad_1_1map_1_1point_1_1ENUPoint.html">libad_map_access_python.ENUPoint</a>_)
|
||||
The considered enu position of the ego vehicle.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.ego_heading"></a>**<font color="#f8805a">ego_heading</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/classad_1_1map_1_1point_1_1ENUHeading.html">libad_map_access_python.ENUHeading</a>_)
|
||||
The considered heading of the ego vehicle.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.ego_center_within_route"></a>**<font color="#f8805a">ego_center_within_route</font>** (_bool_)
|
||||
Indicating if the ego center is within route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.crossing_border"></a>**<font color="#f8805a">crossing_border</font>** (_bool_)
|
||||
Indicating if the vehicle is already crossing one of the lane borders.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.route_heading"></a>**<font color="#f8805a">route_heading</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/classad_1_1map_1_1point_1_1ENUHeading.html">libad_map_access_python.ENUHeading</a>_)
|
||||
The considered heading of the route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.route_nominal_center"></a>**<font color="#f8805a">route_nominal_center</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/structad_1_1map_1_1point_1_1ENUPoint.html">libad_map_access_python.ENUPoint</a>_)
|
||||
The considered nominal center of the current route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.heading_diff"></a>**<font color="#f8805a">heading_diff</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/classad_1_1map_1_1point_1_1ENUHeading.html">libad_map_access_python.ENUHeading</a>_)
|
||||
The considered heading diff towards the route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.route_speed_lat"></a>**<font color="#f8805a">route_speed_lat</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Speed.html">libad_physics_python.Speed</a>_)
|
||||
The ego speed component lat in respect to the route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.route_speed_lon"></a>**<font color="#f8805a">route_speed_lon</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Speed.html">libad_physics_python.Speed</a>_)
|
||||
The ego speed component lon in respect to the route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.route_accel_lat"></a>**<font color="#f8805a">route_accel_lat</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>_)
|
||||
The ego acceleration component lat in respect to the route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.route_accel_lon"></a>**<font color="#f8805a">route_accel_lon</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>_)
|
||||
The ego acceleration component lon in respect to the route.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.avg_route_accel_lat"></a>**<font color="#f8805a">avg_route_accel_lat</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>_)
|
||||
The ego acceleration component lat in respect to the route smoothened by an average filter.
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.avg_route_accel_lon"></a>**<font color="#f8805a">avg_route_accel_lon</font>** (_<a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>_)
|
||||
The ego acceleration component lon in respect to the route smoothened by an average filter.
|
||||
|
||||
<h3>Methods</h3>
|
||||
|
||||
<h3>Dunder methods</h3>
|
||||
- <a name="carla.RssEgoDynamicsOnRoute.__str__"></a>**<font color="#7fb800">\__str__</font>**(<font color="#00a6ed">**self**</font>)
|
||||
|
||||
---
|
||||
|
||||
## carla.RssResponse<a name="carla.RssResponse"></a>
|
||||
Data contained inside a [carla.RssResponse](#carla.RssResponse).
|
||||
|
||||
<h3>Instance Variables</h3>
|
||||
- <a name="carla.RssResponse.response_valid"></a>**<font color="#f8805a">response_valid</font>** (_bool_)
|
||||
Is the current response data valid.
|
||||
- <a name="carla.RssResponse.proper_response"></a>**<font color="#f8805a">proper_response</font>** (_<a href="https://intel.github.io/ad-rss-lib/doxygen/ad_rss/structad_1_1rss_1_1state_1_1ProperResponse.html">libad_rss_python.ProperResponse</a>_)
|
||||
The proper response of the RSS calculation.
|
||||
- <a name="carla.RssResponse.acceleration_restriction"></a>**<font color="#f8805a">acceleration_restriction</font>** (_<a href="https://intel.github.io/ad-rss-lib/doxygen/ad_rss/structad_1_1rss_1_1world_1_1AccelerationRestriction.html">libad_rss_python.AccelerationRestriction</a>_)
|
||||
The acceleration restrictions of the RSS calculation.
|
||||
- <a name="carla.RssResponse.rss_state_snapshot"></a>**<font color="#f8805a">rss_state_snapshot</font>** (_<a href="https://intel.github.io/ad-rss-lib/doxygen/ad_rss/structad_1_1rss_1_1state_1_1RssStateSnapshot.html">libad_rss_python.RssStateSnapshot</a>_)
|
||||
The detailed RSS states at the current point in time.
|
||||
- <a name="carla.RssResponse.ego_dynamics_on_route"></a>**<font color="#f8805a">ego_dynamics_on_route</font>** (_[carla.RssEgoDynamicsOnRoute](#carla.RssEgoDynamicsOnRoute)_)
|
||||
The current ego vehicle dynamics in respect to the current route.
|
||||
|
||||
<h3>Methods</h3>
|
||||
|
||||
<h3>Dunder methods</h3>
|
||||
- <a name="carla.RssResponse.__str__"></a>**<font color="#7fb800">\__str__</font>**(<font color="#00a6ed">**self**</font>)
|
||||
|
||||
---
|
||||
|
||||
## carla.Sensor<a name="carla.Sensor"></a>
|
||||
<div style="padding-left:30px;margin-top:-20px"><small><b>Inherited from _[carla.Actor](#carla.Actor)_</b></small></div></p><p>Sensors compound a specific family of actors quite diverse and unique. They are normally spawned as attachment/sons of a vehicle (take a look at [carla.World](#carla.World) to learn about actor spawning). Sensors are thoroughly designed to retrieve different types of data that they are listening to. The data they receive is shaped as different subclasses inherited from [carla.SensorData](#carla.SensorData) (depending on the sensor).
|
||||
|
||||
Most sensors can be divided in two groups: those receiving data on every tick (cameras, point clouds and some specific sensors) and those who only receive under certain circumstances (trigger detectors). CARLA provides a specific set of sensors and their blueprint can be found in [carla.BlueprintLibrary](#carla.BlueprintLibrary). All the information on their preferences and settlement can be found [here](ref_sensors.md), but the list of those available in CARLA so far goes as follow:
|
||||
<b>Receive data on every tick:</b>
|
||||
- [Gnss sensor](ref_sensors.md#gnss-sensor).
|
||||
- [IMU sensor](ref_sensors.md#imu-sensor).
|
||||
- [Radar](ref_sensors.md#radar-sensor).
|
||||
- [Depth camera](ref_sensors.md#depth-camera).
|
||||
- [Lidar raycast](ref_sensors.md#lidar-raycast-sensor).
|
||||
- [RGB camera](ref_sensors.md#rgb-camera).
|
||||
- [Semantic Segmentation camera](ref_sensors.md#semantic-segmentation-camera).
|
||||
- [RSS sensor](ref_sensors.md#rss-sensor).
|
||||
<b>Only receive data when triggered:</b>
|
||||
- [Collision detector](ref_sensors.md#collision-detector).
|
||||
- [Lane invasion detector](ref_sensors.md#lane-invasion-detector).
|
||||
<div style="padding-left:30px;margin-top:-20px"><small><b>Inherited from _[carla.Actor](#carla.Actor)_</b></small></div></p><p>Sensors compound a specific family of actors quite diverse and unique. They are normally spawned as attachment/sons of a vehicle (take a look at [carla.World](#carla.World) to learn about actor spawning). Sensors are thoroughly designed to retrieve different types of data that they are listening to. The data they receive is shaped as different subclasses inherited from [carla.SensorData](#carla.SensorData) (depending on the sensor).
|
||||
|
||||
Most sensors can be divided in two groups: those receiving data on every tick (cameras, point clouds and some specific sensors) and those who only receive under certain circumstances (trigger detectors). CARLA provides a specific set of sensors and their blueprint can be found in [carla.BlueprintLibrary](#carla.BlueprintLibrary). All the information on their preferences and settlement can be found [here](ref_sensors.md), but the list of those available in CARLA so far goes as follow:
|
||||
<b>Receive data on every tick:</b>
|
||||
- [Gnss sensor](ref_sensors.md#gnss-sensor).
|
||||
- [IMU sensor](ref_sensors.md#imu-sensor).
|
||||
- [Radar](ref_sensors.md#radar-sensor).
|
||||
- [Depth camera](ref_sensors.md#depth-camera).
|
||||
- [Lidar raycast](ref_sensors.md#lidar-raycast-sensor).
|
||||
- [RGB camera](ref_sensors.md#rgb-camera).
|
||||
- [Semantic Segmentation camera](ref_sensors.md#semantic-segmentation-camera).
|
||||
<b>Only receive data when triggered:</b>
|
||||
- [Collision detector](ref_sensors.md#collision-detector).
|
||||
- [Lane invasion detector](ref_sensors.md#lane-invasion-detector).
|
||||
- [Obstacle detector](ref_sensors.md#obstacle-detector).
|
||||
|
||||
<h3>Instance Variables</h3>
|
||||
|
@ -1232,16 +1167,15 @@ Commands the sensor to stop listening for data.
|
|||
---
|
||||
|
||||
## carla.SensorData<a name="carla.SensorData"></a>
|
||||
Base class for all the objects containing data generated by a [carla.Sensor](#carla.Sensor). This objects should be the argument of the function said sensor is listening to, in order to work with them. Each of these sensors needs for a specific type of sensor data. The relation between available sensors and their corresponding data goes like:
|
||||
- Cameras (RGB, depth and semantic segmentation): [carla.Image](#carla.Image).
|
||||
- Collision detector: [carla.CollisionEvent](#carla.CollisionEvent).
|
||||
- Gnss detector: [carla.GnssMeasurement](#carla.GnssMeasurement).
|
||||
- IMU detector: [carla.IMUMeasurement](#carla.IMUMeasurement).
|
||||
- Lane invasion detector: [carla.LaneInvasionEvent](#carla.LaneInvasionEvent).
|
||||
- Lidar raycast: [carla.LidarMeasurement](#carla.LidarMeasurement).
|
||||
- Obstacle detector: [carla.ObstacleDetectionEvent](#carla.ObstacleDetectionEvent).
|
||||
- Radar detector: [carla.RadarMeasurement](#carla.RadarMeasurement).
|
||||
- RSS response: [carla.RssResponse](#carla.RssResponse).
|
||||
Base class for all the objects containing data generated by a [carla.Sensor](#carla.Sensor). This objects should be the argument of the function said sensor is listening to, in order to work with them. Each of these sensors needs for a specific type of sensor data. The relation between available sensors and their corresponding data goes like:
|
||||
- Cameras (RGB, depth and semantic segmentation): [carla.Image](#carla.Image).
|
||||
- Collision detector: [carla.CollisionEvent](#carla.CollisionEvent).
|
||||
- Gnss detector: [carla.GnssMeasurement](#carla.GnssMeasurement).
|
||||
- IMU detector: [carla.IMUMeasurement](#carla.IMUMeasurement).
|
||||
- Lane invasion detector: [carla.LaneInvasionEvent](#carla.LaneInvasionEvent).
|
||||
- Lidar raycast: [carla.LidarMeasurement](#carla.LidarMeasurement).
|
||||
- Obstacle detector: [carla.ObstacleDetectionEvent](#carla.ObstacleDetectionEvent).
|
||||
- Radar detector: [carla.RadarMeasurement](#carla.RadarMeasurement).
|
||||
|
||||
<h3>Instance Variables</h3>
|
||||
- <a name="carla.SensorData.frame"></a>**<font color="#f8805a">frame</font>** (_int_)
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
# Sensors reference
|
||||
|
||||
* [__Collision detector__](#collision-detector)
|
||||
* [__Depth camera__](#depth-camera)
|
||||
* [__GNSS sensor__](#gnss-sensor)
|
||||
* [__IMU sensor__](#imu-sensor)
|
||||
* [__Lane invasion detector__](#lane-invasion-detector)
|
||||
* [__Lidar raycast sensor__](#lidar-raycast-sensor)
|
||||
* [__Obstacle detector__](#obstacle-detector)
|
||||
* [__Radar sensor__](#radar-sensor)
|
||||
* [__RGB camera__](#rgb-camera)
|
||||
* [__Semantic segmentation camera__](#semantic-segmentation-camera)
|
||||
* [__RSS sensor__](#rss-sensor)
|
||||
* [__Collision detector__](#collision-detector)
|
||||
* [__Depth camera__](#depth-camera)
|
||||
* [__GNSS sensor__](#gnss-sensor)
|
||||
* [__IMU sensor__](#imu-sensor)
|
||||
* [__Lane invasion detector__](#lane-invasion-detector)
|
||||
* [__Lidar raycast sensor__](#lidar-raycast-sensor)
|
||||
* [__Obstacle detector__](#obstacle-detector)
|
||||
* [__Radar sensor__](#radar-sensor)
|
||||
* [__RGB camera__](#rgb-camera)
|
||||
* [__Semantic segmentation camera__](#semantic-segmentation-camera)
|
||||
|
||||
|
||||
---
|
||||
|
@ -19,7 +18,7 @@
|
|||
* __Blueprint:__ sensor.other.collision
|
||||
* __Output:__ [carla.CollisionEvent](python_api.md#carla.CollisionEvent) per collision.
|
||||
|
||||
This sensor registers an event each time its parent actor collisions against something in the world. Several collisions may be detected during a single simulation step.
|
||||
This sensor registers an event each time its parent actor collisions against something in the world. Several collisions may be detected during a single simulation step.
|
||||
To ensure that collisions with any kind of object are detected, the server creates "fake" actors for elements such as buildings or bushes so the semantic tag can be retrieved to identify it.
|
||||
|
||||
Collision detectors do not have any configurable attribute.
|
||||
|
@ -64,9 +63,9 @@ Collision detectors do not have any configurable attribute.
|
|||
## Depth camera
|
||||
|
||||
* __Blueprint:__ sensor.camera.depth
|
||||
* __Output:__ [carla.Image](python_api.md#carla.Image) per step (unless `sensor_tick` says otherwise).
|
||||
* __Output:__ [carla.Image](python_api.md#carla.Image) per step (unless `sensor_tick` says otherwise).
|
||||
|
||||
The camera provides a raw data of the scene codifying the distance of each pixel to the camera (also known as **depth buffer** or **z-buffer**) to create a depth map of the elements.
|
||||
The camera provides a raw data of the scene codifying the distance of each pixel to the camera (also known as **depth buffer** or **z-buffer**) to create a depth map of the elements.
|
||||
|
||||
The image codifies depth value per pixel using 3 channels of the RGB color space, from less to more significant bytes: _R -> G -> B_. The actual distance in meters can be
|
||||
decoded with:
|
||||
|
@ -76,8 +75,8 @@ normalized = (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1)
|
|||
in_meters = 1000 * normalized
|
||||
```
|
||||
|
||||
The output [carla.Image](python_api.md#carla.Image) should then be saved to disk using a [carla.colorConverter](python_api.md#carla.ColorConverter) that will turn the distance stored in RGB channels into a __[0,1]__ float containing the distance and then translate this to grayscale.
|
||||
There are two options in [carla.colorConverter](python_api.md#carla.ColorConverter) to get a depth view: __Depth__ and __Logaritmic depth__. The precision is milimetric in both, but the logarithmic approach provides better results for closer objects.
|
||||
The output [carla.Image](python_api.md#carla.Image) should then be saved to disk using a [carla.colorConverter](python_api.md#carla.ColorConverter) that will turn the distance stored in RGB channels into a __[0,1]__ float containing the distance and then translate this to grayscale.
|
||||
There are two options in [carla.colorConverter](python_api.md#carla.ColorConverter) to get a depth view: __Depth__ and __Logaritmic depth__. The precision is milimetric in both, but the logarithmic approach provides better results for closer objects.
|
||||
|
||||
![ImageDepth](img/capture_depth.png)
|
||||
|
||||
|
@ -204,7 +203,7 @@ There are two options in [carla.colorConverter](python_api.md#carla.ColorConvert
|
|||
## GNSS sensor
|
||||
|
||||
* __Blueprint:__ sensor.other.gnss
|
||||
* __Output:__ [carla.GNSSMeasurement](python_api.md#carla.GnssMeasurement) per step (unless `sensor_tick` says otherwise).
|
||||
* __Output:__ [carla.GNSSMeasurement](python_api.md#carla.GnssMeasurement) per step (unless `sensor_tick` says otherwise).
|
||||
|
||||
Reports current [gnss position](https://www.gsa.europa.eu/european-gnss/what-gnss) of its parent object. This is calculated by adding the metric position to an initial geo reference location defined within the OpenDRIVE map definition.
|
||||
|
||||
|
@ -417,17 +416,17 @@ Provides measures that accelerometer, gyroscope and compass would retrieve for t
|
|||
* __Blueprint:__ sensor.other.lane_invasion
|
||||
* __Output:__ [carla.LaneInvasionEvent](python_api.md#carla.LaneInvasionEvent) per crossing.
|
||||
|
||||
Registers an event each time its parent crosses a lane marking.
|
||||
The sensor uses road data provided by the OpenDRIVE description of the map to determine whether the parent vehicle is invading another lane by considering the space between wheels.
|
||||
However there are some things to be taken into consideration:
|
||||
Registers an event each time its parent crosses a lane marking.
|
||||
The sensor uses road data provided by the OpenDRIVE description of the map to determine whether the parent vehicle is invading another lane by considering the space between wheels.
|
||||
However there are some things to be taken into consideration:
|
||||
|
||||
* Discrepancies between the OpenDRIVE file and the map will create irregularities such as crossing lanes that are not visible in the map.
|
||||
* The output retrieves a list of crossed lane markings: the computation is done in OpenDRIVE and considering the whole space between the four wheels as a whole. Thus, there may be more than one lane being crossed at the same time.
|
||||
* Discrepancies between the OpenDRIVE file and the map will create irregularities such as crossing lanes that are not visible in the map.
|
||||
* The output retrieves a list of crossed lane markings: the computation is done in OpenDRIVE and considering the whole space between the four wheels as a whole. Thus, there may be more than one lane being crossed at the same time.
|
||||
|
||||
This sensor does not have any configurable attribute.
|
||||
|
||||
!!! Important
|
||||
This sensor works fully on the client-side.
|
||||
This sensor works fully on the client-side.
|
||||
|
||||
#### Output attributes
|
||||
|
||||
|
@ -467,11 +466,11 @@ This sensor does not have any configurable attribute.
|
|||
* __Blueprint:__ sensor.lidar.ray_cast
|
||||
* __Output:__ [carla.LidarMeasurement](python_api.md#carla.LidarMeasurement) per step (unless `sensor_tick` says otherwise).
|
||||
|
||||
This sensor simulates a rotating Lidar implemented using ray-casting.
|
||||
The points are computed by adding a laser for each channel distributed in the vertical FOV. The rotation is simulated computing the horizontal angle that the Lidar rotated in a frame. The point cloud is calculated by doing a ray-cast for each laser in every step:
|
||||
This sensor simulates a rotating Lidar implemented using ray-casting.
|
||||
The points are computed by adding a laser for each channel distributed in the vertical FOV. The rotation is simulated computing the horizontal angle that the Lidar rotated in a frame. The point cloud is calculated by doing a ray-cast for each laser in every step:
|
||||
`points_per_channel_each_step = points_per_second / (FPS * channels)`
|
||||
|
||||
A Lidar measurement contains a packet with all the points generated during a `1/FPS` interval. During this interval the physics are not updated so all the points in a measurement reflect the same "static picture" of the scene.
|
||||
A Lidar measurement contains a packet with all the points generated during a `1/FPS` interval. During this interval the physics are not updated so all the points in a measurement reflect the same "static picture" of the scene.
|
||||
|
||||
This output contains a cloud of simulation points and thus, can be iterated to retrieve a list of their [`carla.Location`](python_api.md#carla.Location):
|
||||
|
||||
|
@ -578,10 +577,10 @@ for location in lidar_measurement:
|
|||
## Obstacle detector
|
||||
|
||||
* __Blueprint:__ sensor.other.obstacle
|
||||
* __Output:__ [carla.ObstacleDetectionEvent](python_api.md#carla.ObstacleDetectionEvent) per obstacle (unless `sensor_tick` says otherwise).
|
||||
* __Output:__ [carla.ObstacleDetectionEvent](python_api.md#carla.ObstacleDetectionEvent) per obstacle (unless `sensor_tick` says otherwise).
|
||||
|
||||
Registers an event every time the parent actor has an obstacle ahead.
|
||||
In order to anticipate obstacles, the sensor creates a capsular shape ahead of the parent vehicle and uses it to check for collisions.
|
||||
Registers an event every time the parent actor has an obstacle ahead.
|
||||
In order to anticipate obstacles, the sensor creates a capsular shape ahead of the parent vehicle and uses it to check for collisions.
|
||||
To ensure that collisions with any kind of object are detected, the server creates "fake" actors for elements such as buildings or bushes so the semantic tag can be retrieved to identify it.
|
||||
|
||||
<table class ="defTable">
|
||||
|
@ -661,19 +660,19 @@ To ensure that collisions with any kind of object are detected, the server creat
|
|||
## Radar sensor
|
||||
|
||||
* __Blueprint:__ sensor.other.radar
|
||||
* __Output:__ [carla.RadarMeasurement](python_api.md#carla.RadarMeasurement) per step (unless `sensor_tick` says otherwise).
|
||||
* __Output:__ [carla.RadarMeasurement](python_api.md#carla.RadarMeasurement) per step (unless `sensor_tick` says otherwise).
|
||||
|
||||
The sensor creates a conic view that is translated to a 2D point map of the elements in sight and their speed regarding the sensor. This can be used to shape elements and evaluate their movement and direction. Due to the use of polar coordinates, the points will concentrate around the center of the view.
|
||||
The sensor creates a conic view that is translated to a 2D point map of the elements in sight and their speed regarding the sensor. This can be used to shape elements and evaluate their movement and direction. Due to the use of polar coordinates, the points will concentrate around the center of the view.
|
||||
|
||||
Points measured are contained in [carla.RadarMeasurement](python_api.md#carla.RadarMeasurement) as an array of [carla.RadarDetection](python_api.md#carla.RadarDetection), which specifies their polar coordinates, distance and velocity.
|
||||
Points measured are contained in [carla.RadarMeasurement](python_api.md#carla.RadarMeasurement) as an array of [carla.RadarDetection](python_api.md#carla.RadarDetection), which specifies their polar coordinates, distance and velocity.
|
||||
This raw data provided by the radar sensor can be easily converted to a format manageable by __numpy__:
|
||||
```py
|
||||
# To get a numpy [[vel, altitude, azimuth, depth],...[,,,]]:
|
||||
points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))
|
||||
points = np.reshape(points, (len(radar_data), 4))
|
||||
```
|
||||
```
|
||||
|
||||
The provided script `manual_control.py` uses this sensor to show the points being detected and paint them white when static, red when moving towards the object and blue when moving away:
|
||||
The provided script `manual_control.py` uses this sensor to show the points being detected and paint them white when static, red when moving towards the object and blue when moving away:
|
||||
|
||||
![ImageRadar](img/sensor_radar.png)
|
||||
|
||||
|
@ -763,17 +762,17 @@ The provided script `manual_control.py` uses this sensor to show the points bein
|
|||
* __Blueprint:__ sensor.camera.rgb
|
||||
* __Output:__ [carla.Image](python_api.md#carla.Image) per step (unless `sensor_tick` says otherwise)..
|
||||
|
||||
The "RGB" camera acts as a regular camera capturing images from the scene.
|
||||
The "RGB" camera acts as a regular camera capturing images from the scene.
|
||||
[carla.colorConverter](python_api.md#carla.ColorConverter)
|
||||
|
||||
If `enable_postprocess_effects` is enabled, a set of post-process effects is applied to the image for the sake of realism:
|
||||
If `enable_postprocess_effects` is enabled, a set of post-process effects is applied to the image for the sake of realism:
|
||||
|
||||
* __Vignette:__ darkens the border of the screen.
|
||||
* __Grain jitter:__ adds some noise to the render.
|
||||
* __Bloom:__ intense lights burn the area around them.
|
||||
* __Auto exposure:__ modifies the image gamma to simulate the eye adaptation to darker or brighter areas.
|
||||
* __Lens flares:__ simulates the reflection of bright objects on the lens.
|
||||
* __Depth of field:__ blurs objects near or very far away of the camera.
|
||||
* __Vignette:__ darkens the border of the screen.
|
||||
* __Grain jitter:__ adds some noise to the render.
|
||||
* __Bloom:__ intense lights burn the area around them.
|
||||
* __Auto exposure:__ modifies the image gamma to simulate the eye adaptation to darker or brighter areas.
|
||||
* __Lens flares:__ simulates the reflection of bright objects on the lens.
|
||||
* __Depth of field:__ blurs objects near or very far away of the camera.
|
||||
|
||||
|
||||
The `sensor_tick` tells how fast we want the sensor to capture the data.
|
||||
|
@ -1072,13 +1071,13 @@ Since these effects are provided by UE, please make sure to check their document
|
|||
## Semantic segmentation camera
|
||||
|
||||
* __Blueprint:__ sensor.camera.semantic_segmentation
|
||||
* __Output:__ [carla.Image](python_api.md#carla.Image) per step (unless `sensor_tick` says otherwise).
|
||||
* __Output:__ [carla.Image](python_api.md#carla.Image) per step (unless `sensor_tick` says otherwise).
|
||||
|
||||
This camera classifies every object in sight by displaying it in a different color according to its tags (e.g., pedestrians in a different color than vehicles).
|
||||
When the simulation starts, every element in scene is created with a tag. So it happens when an actor is spawned. The objects are classified by their relative file path in the project. For example, meshes stored in `Unreal/CarlaUE4/Content/Static/Pedestrians` are tagged as `Pedestrian`.
|
||||
|
||||
The server provides an image with the tag information __encoded in the red channel__: A pixel with a red value of `x` belongs to an object with tag `x`.
|
||||
This raw [carla.Image](python_api.md#carla.Image) can be stored and converted it with the help of __CityScapesPalette__ in [carla.ColorConverter](python_api.md#carla.ColorConverter) to apply the tags information and show picture with the semantic segmentation.
|
||||
This camera classifies every object in sight by displaying it in a different color according to its tags (e.g., pedestrians in a different color than vehicles).
|
||||
When the simulation starts, every element in scene is created with a tag. So it happens when an actor is spawned. The objects are classified by their relative file path in the project. For example, meshes stored in `Unreal/CarlaUE4/Content/Static/Pedestrians` are tagged as `Pedestrian`.
|
||||
|
||||
The server provides an image with the tag information __encoded in the red channel__: A pixel with a red value of `x` belongs to an object with tag `x`.
|
||||
This raw [carla.Image](python_api.md#carla.Image) can be stored and converted it with the help of __CityScapesPalette__ in [carla.ColorConverter](python_api.md#carla.ColorConverter) to apply the tags information and show picture with the semantic segmentation.
|
||||
The following tags are currently available:
|
||||
|
||||
<table class ="defTable">
|
||||
|
@ -1268,118 +1267,3 @@ The following tags are currently available:
|
|||
|
||||
<br>
|
||||
|
||||
---
|
||||
## RSS sensor
|
||||
|
||||
* __Blueprint:__ sensor.other.rss
|
||||
* __Output:__ [carla.RssResponse](python_api.md#carla.RssResponse) per step (unless `sensor_tick` says otherwise).
|
||||
|
||||
This sensor is disabled by default and has to be explicitly enalbed and built. Details on additional requirements and the scope of the sensor is described [here](rss_lib_integration.md).
|
||||
The RSS sensor calculates the RSS state of a vehicle and reports the current RSS Response. The output can be used in conjunction with a [RssRestrictor](rss_lib_integration.md#RssRestrictor) to adapt
|
||||
a _VehicleControl_ command before sending to a vehicle (see the _PythonAPI/examples/manual_control_rss.py_).
|
||||
|
||||
!!! Important
|
||||
This sensor works fully on the client-side.
|
||||
|
||||
#### Configuration attributes
|
||||
|
||||
!!! Important
|
||||
The change of attributes only has an effect AFTER the sensor.listen() has been called since no actual server side blueprint is available.
|
||||
|
||||
<table class ="defTable">
|
||||
<thead>
|
||||
<th>Configuration attribute</th>
|
||||
<th>Type</th>
|
||||
<th>Default</th>
|
||||
<th>Description</th>
|
||||
</thead>
|
||||
<tbody>
|
||||
<td>
|
||||
<code>ego_vehicle_dynamics</code> </td>
|
||||
<td>libad_rss_python.RssDynamics</td>
|
||||
<td>see <a href="https://intel.github.io/ad-rss-lib/ad_rss/Appendix-ParameterDiscussion">RSS Parameter discussion</a></td>
|
||||
<td>Get/set the RSS parameters to be applied for the ego vehicle </td>
|
||||
<tr>
|
||||
<td>
|
||||
<code>other_vehicle_dynamics</code> </td>
|
||||
<td>libad_rss_python.RssDynamics</td>
|
||||
<td>see <a href="https://intel.github.io/ad-rss-lib/ad_rss/Appendix-ParameterDiscussion">RSS Parameter discussion</a></td>
|
||||
<td>Get/set the RSS parameters to be applied for the other vehicles</td>
|
||||
<tr>
|
||||
<td><code>road_boundaries_mode</code></td>
|
||||
<td>carla.RoadBoundariesMode</td>
|
||||
<td>carla.RoadBoundariesMode.On</td>
|
||||
<td>Switch the <a href="https://intel.github.io/ad-rss-lib/ad_rss_map_integration/HandleRoadBoundaries">stay on road feature</a> <i>On</i> and <i>Off</i></td>
|
||||
<tr>
|
||||
<td><code>visualization_mode</code></td>
|
||||
<td>carla.VisualizationMode</td>
|
||||
<td>carla.VisualizationMode.All</td>
|
||||
<td>The RSS Sensor makes use of the <code>DebugHelper</code> to draw RSS visulation on server side. Possible values to set are: <i>Off</i>, <i>RouteOnly</i>,
|
||||
<i>VehicleStateOnly</i>, <i>VehicleStateAndRoute</i> and <i>All</i>. Because debug drawing takes some time, some RSS responses will
|
||||
be delayed. Therefore, best is to disable the visualization if performing automated RSS evaluations.
|
||||
</td>
|
||||
</table>
|
||||
<br>
|
||||
|
||||
#### Routing functionality
|
||||
|
||||
The RSS calcuations always are based on a route of the ego vehicle throught the road network.
|
||||
Therefore, the sensor provides the possibility to control the considered route from outside by providing some key points on the way.
|
||||
These points are best selected after the intersections to force the route to take the desired turn.
|
||||
If no routing targets are defined, a random route is selected automatically.
|
||||
|
||||
<table class ="defTable">
|
||||
<thead>
|
||||
<th>Routing functions</th>
|
||||
<th>Type</th>
|
||||
<th>Description</th>
|
||||
</thead>
|
||||
<tbody>
|
||||
<td><code>routing_targets</code></td>
|
||||
<td>vector<<a href="../python_api#carlatransform">carla.Transform</a>> </td>
|
||||
<td>Get the current list of routing targets used for route.</td>
|
||||
<tr>
|
||||
<td><code>append_routing_target</code></td>
|
||||
<td><a href="../python_api#carlatransform">carla.Transform</a></td>
|
||||
<td>Append an additional position to the current routing targets.</td>
|
||||
<tr>
|
||||
<td><code>reset_routing_targets</code></td>
|
||||
<td>(void)</td>
|
||||
<td>Reset the current list of routing targets.</td>
|
||||
<tr>
|
||||
<td><code>drop_route</code></td>
|
||||
<td>(void)</td>
|
||||
<td>Drop the current selected route.</td>
|
||||
</table>
|
||||
<br>
|
||||
|
||||
#### RSS Restrictor functionality
|
||||
|
||||
The carla.RssRestrictor object can be instanciated to caluculate restrictions based on the RSS acceleration restrictions calculated by the RSS sensor.
|
||||
Therefore, the <code>restrict_vehicle_control</code> function has to be called returning the adapted <a href="../python_api#carlavehiclecontrol">carla.VehicleControl</a>
|
||||
data with the following input parameters:
|
||||
|
||||
<table class ="defTable">
|
||||
<thead>
|
||||
<th>Parameter Name</th>
|
||||
<th>Parameter Type</th>
|
||||
<th>Description</th>
|
||||
</thead>
|
||||
<tbody>
|
||||
<td><code>vehicle_control</code></td>
|
||||
<td><a href="../python_api#carlavehiclecontrol">carla.VehicleControl</a></td>
|
||||
<td>The current input vehicle control data to be restricted.</td>
|
||||
<tr>
|
||||
<td><code>restriction</code></td>
|
||||
<td><a href="https://intel.github.io/ad-rss-lib/doxygen/ad_rss/structad_1_1rss_1_1world_1_1AccelerationRestriction.html">libad_rss_python.AccelerationRestriction</a></td>
|
||||
<td>Usually the RSS sensor output <code>libad_rss_python.RssResponse.acceleration_restriction</code> is passed in as a parameter.</td>
|
||||
<tr>
|
||||
<td><code>ego_dynamics_on_route</code></td>
|
||||
<td>carla.RssEgoDynamicsOnRoute</td>
|
||||
<td>Usually the RSS sensor output <code>libad_rss_python.RssResponse.ego_dynamics_on_route</code> is passed in as a parameter.</td>
|
||||
<tr>
|
||||
<td><code>vehicle_physics</code></td>
|
||||
<td><a href="../python_api#carlavehiclephysicscontrol">carla.VehcilePhysicsControl</a></td>
|
||||
<td>The physics control of the ego vehicle.</td>
|
||||
</table>
|
||||
<br>
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
<h1>AD Responsibility Sensitive Safety model (RSS) integration</h1>
|
||||
|
||||
> _This feature is a work in progress, only a Linux build variant is available._
|
||||
|
||||
This feature integrates the [C++ Library for Responsibility Sensitive Safety](https://github.com/intel/ad-rss-lib) into the CARLA Client library.
|
||||
|
||||
It provides basic implementations of both a **RssSensor**, the situation analysis and response generation by the **ad-rss-lib** and a basic **RssRestrictor** class which applies the restrictions to given vehicle commands.
|
||||
The following image sketches the integration of **RSS** into the CARLA architecture:
|
||||
![Interate RSS into CARLA](img/rss_carla_integration_architecture.png)
|
||||
|
||||
The **RssSensor** results can be visualized within CARLA.
|
||||
[![RSS sensor in CARLA](img/rss_carla_integration.png)](https://www.youtube.com/watch?v=UxKPXPT2T8Q)
|
||||
|
||||
Please see [C++ Library for Responsibility Sensitive Safety documentation](https://intel.github.io/ad-rss-lib/) and especially the [Background documentation](https://intel.github.io/ad-rss-lib/ad_rss/Overview/) for further details.
|
||||
|
||||
<h2>Compilation</h2>
|
||||
|
||||
RSS integration is a Linux-only build variant.
|
||||
Please see [Build System](build_system.md) for general information.
|
||||
Furthermore, there are additional prerequisites required for building RSS and its dependencies (see also [**ad-rss-lib**](https://intel.github.io/ad-rss-lib/BUILDING)); briefly:
|
||||
|
||||
Dependencies provided by Ubunutu (>= 16.04):
|
||||
```sh
|
||||
sudo apt-get install libgtest-dev libpython-dev libpugixml-dev libproj-dev libtbb-dev
|
||||
```
|
||||
|
||||
As the dependencies are built using [colcon](https://colcon.readthedocs.io/en/released/user/installation.html) you have to install the following:
|
||||
```sh
|
||||
pip3 install --user -U colcon-common-extensions
|
||||
```
|
||||
|
||||
Additional dependencies for the python bindings:
|
||||
```sh
|
||||
sudo apt-get install castxml
|
||||
pip install --user pygccxml
|
||||
pip install --user https://bitbucket.org/ompl/pyplusplus/get/1.8.1.zip
|
||||
```
|
||||
|
||||
Once this is done, the full set of dependencies and RSS components can be built calling the following.
|
||||
|
||||
_LibCarla_ with RSS has the be explicitly compiled by
|
||||
|
||||
```sh
|
||||
make LibCarla.client.rss
|
||||
```
|
||||
|
||||
The _PythonAPI_ with RSS is built by
|
||||
|
||||
```sh
|
||||
make PythonAPI.rss
|
||||
```
|
||||
|
||||
<h2>Current state</h2>
|
||||
|
||||
<h3>RssSensor</h3>
|
||||
|
||||
The RssSensor is supporting the full spectrum of [**ad-rss-lib** v3.0.0 feature set](https://intel.github.io/ad-rss-lib/RELEASE_NOTES_AND_DISCLAIMERS), including intersections and [stay on road](https://intel.github.io/ad-rss-lib/ad_rss_map_integration/HandleRoadBoundaries/) support.
|
||||
|
||||
<h3>RssRestrictor</h3>
|
||||
|
||||
The current implementation of the RssRestrictor checks and potentially modifies a given _VehicleControl_ generated by e.g. and Automated Driving stack or user input via a _manual_control_ client (see the _PythonAPI/examples/manual_control_rss.py_).
|
||||
|
||||
Due to the structure of _VehicleControl_ (just throttle, brake, streering values for the car under control), the Restrictor modifies and sets these values to best reach the desired accelerations or decelerations by a given restriction. Due to car physics and the simple control options these might not be met. While in an automated vehicle controller it might be possible to adapt the planned trajectory to meet the requirements of the restrictions and to use a fast control loop (e.g. > 1KHz) to ensure these are followed, the simple _RssRestrictor_ intervenes in lateral direction simply by counter steering towards the parallel lane direction and activates the brake, if deceleration requested by RSS, depending on vehicle mass and brake torques provided by the CARLA vehicle.
|
|
@ -118,11 +118,11 @@ void export_ad_rss() {
|
|||
.def_readwrite("avg_route_accel_lon", &carla::rss::EgoDynamicsOnRoute::avg_route_accel_lon)
|
||||
.def(self_ns::str(self_ns::self));
|
||||
|
||||
enum_<carla::rss::RoadBoundariesMode>("RoadBoundariesMode")
|
||||
enum_<carla::rss::RoadBoundariesMode>("RssRoadBoundariesMode")
|
||||
.value("Off", carla::rss::RoadBoundariesMode::Off)
|
||||
.value("On", carla::rss::RoadBoundariesMode::On);
|
||||
|
||||
enum_<carla::rss::VisualizationMode>("VisualizationMode")
|
||||
enum_<carla::rss::VisualizationMode>("RssVisualizationMode")
|
||||
.value("Off", carla::rss::VisualizationMode::Off)
|
||||
.value("RouteOnly", carla::rss::VisualizationMode::RouteOnly)
|
||||
.value("VehicleStateOnly", carla::rss::VisualizationMode::VehicleStateOnly)
|
||||
|
@ -144,7 +144,7 @@ void export_ad_rss() {
|
|||
.add_property("road_boundaries_mode", &GetRoadBoundariesMode, &cc::RssSensor::SetRoadBoundariesMode)
|
||||
.add_property("visualization_mode", &GetVisualizationMode, &cc::RssSensor::SetVisualizationMode)
|
||||
.add_property("routing_targets", &GetRoutingTargets)
|
||||
.def("append_routing_target", &cc::RssSensor::AppendRoutingTarget)
|
||||
.def("append_routing_target", &cc::RssSensor::AppendRoutingTarget, (arg("routing_target")))
|
||||
.def("reset_routing_targets", &cc::RssSensor::ResetRoutingTargets)
|
||||
.def("drop_route", &cc::RssSensor::DropRoute)
|
||||
.def(self_ns::str(self_ns::self));
|
||||
|
|
|
@ -6,21 +6,20 @@
|
|||
parent: carla.Actor
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Sensors compound a specific family of actors quite diverse and unique. They are normally spawned as attachment/sons of a vehicle (take a look at carla.World to learn about actor spawning). Sensors are thoroughly designed to retrieve different types of data that they are listening to. The data they receive is shaped as different subclasses inherited from carla.SensorData (depending on the sensor).
|
||||
|
||||
Most sensors can be divided in two groups: those receiving data on every tick (cameras, point clouds and some specific sensors) and those who only receive under certain circumstances (trigger detectors). CARLA provides a specific set of sensors and their blueprint can be found in carla.BlueprintLibrary. All the information on their preferences and settlement can be found [here](ref_sensors.md), but the list of those available in CARLA so far goes as follow:
|
||||
<b>Receive data on every tick:</b>
|
||||
- [Gnss sensor](ref_sensors.md#gnss-sensor).
|
||||
- [IMU sensor](ref_sensors.md#imu-sensor).
|
||||
- [Radar](ref_sensors.md#radar-sensor).
|
||||
- [Depth camera](ref_sensors.md#depth-camera).
|
||||
- [Lidar raycast](ref_sensors.md#lidar-raycast-sensor).
|
||||
- [RGB camera](ref_sensors.md#rgb-camera).
|
||||
- [Semantic Segmentation camera](ref_sensors.md#semantic-segmentation-camera).
|
||||
- [RSS sensor](ref_sensors.md#rss-sensor).
|
||||
<b>Only receive data when triggered:</b>
|
||||
- [Collision detector](ref_sensors.md#collision-detector).
|
||||
- [Lane invasion detector](ref_sensors.md#lane-invasion-detector).
|
||||
Sensors compound a specific family of actors quite diverse and unique. They are normally spawned as attachment/sons of a vehicle (take a look at carla.World to learn about actor spawning). Sensors are thoroughly designed to retrieve different types of data that they are listening to. The data they receive is shaped as different subclasses inherited from carla.SensorData (depending on the sensor).
|
||||
|
||||
Most sensors can be divided in two groups: those receiving data on every tick (cameras, point clouds and some specific sensors) and those who only receive under certain circumstances (trigger detectors). CARLA provides a specific set of sensors and their blueprint can be found in carla.BlueprintLibrary. All the information on their preferences and settlement can be found [here](ref_sensors.md), but the list of those available in CARLA so far goes as follow:
|
||||
<b>Receive data on every tick:</b>
|
||||
- [Gnss sensor](ref_sensors.md#gnss-sensor).
|
||||
- [IMU sensor](ref_sensors.md#imu-sensor).
|
||||
- [Radar](ref_sensors.md#radar-sensor).
|
||||
- [Depth camera](ref_sensors.md#depth-camera).
|
||||
- [Lidar raycast](ref_sensors.md#lidar-raycast-sensor).
|
||||
- [RGB camera](ref_sensors.md#rgb-camera).
|
||||
- [Semantic Segmentation camera](ref_sensors.md#semantic-segmentation-camera).
|
||||
<b>Only receive data when triggered:</b>
|
||||
- [Collision detector](ref_sensors.md#collision-detector).
|
||||
- [Lane invasion detector](ref_sensors.md#lane-invasion-detector).
|
||||
- [Obstacle detector](ref_sensors.md#obstacle-detector).
|
||||
|
||||
# - PROPERTIES -------------------------
|
||||
|
@ -28,7 +27,7 @@
|
|||
- var_name: is_listening
|
||||
type: boolean
|
||||
doc: >
|
||||
When <b>True</b> the sensor will be waiting for data.
|
||||
When <b>True</b> the sensor will be waiting for data.
|
||||
# - METHODS ----------------------------
|
||||
methods:
|
||||
- def_name: listen
|
||||
|
@ -38,7 +37,7 @@
|
|||
doc: >
|
||||
The called function with one argument containing the sensor data.
|
||||
doc: >
|
||||
The function the sensor will be calling to every time a new measurement is received. This function needs for an argument containing an object type carla.SensorData to work with.
|
||||
The function the sensor will be calling to every time a new measurement is received. This function needs for an argument containing an object type carla.SensorData to work with.
|
||||
# --------------------------------------
|
||||
- def_name: stop
|
||||
doc: >
|
||||
|
|
|
@ -5,16 +5,15 @@
|
|||
- class_name: SensorData
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Base class for all the objects containing data generated by a carla.Sensor. This objects should be the argument of the function said sensor is listening to, in order to work with them. Each of these sensors needs for a specific type of sensor data. The relation between available sensors and their corresponding data goes like:
|
||||
- Cameras (RGB, depth and semantic segmentation): carla.Image.
|
||||
- Collision detector: carla.CollisionEvent.
|
||||
- Gnss detector: carla.GnssMeasurement.
|
||||
- IMU detector: carla.IMUMeasurement.
|
||||
- Lane invasion detector: carla.LaneInvasionEvent.
|
||||
- Lidar raycast: carla.LidarMeasurement.
|
||||
- Obstacle detector: carla.ObstacleDetectionEvent.
|
||||
Base class for all the objects containing data generated by a carla.Sensor. This objects should be the argument of the function said sensor is listening to, in order to work with them. Each of these sensors needs for a specific type of sensor data. The relation between available sensors and their corresponding data goes like:
|
||||
- Cameras (RGB, depth and semantic segmentation): carla.Image.
|
||||
- Collision detector: carla.CollisionEvent.
|
||||
- Gnss detector: carla.GnssMeasurement.
|
||||
- IMU detector: carla.IMUMeasurement.
|
||||
- Lane invasion detector: carla.LaneInvasionEvent.
|
||||
- Lidar raycast: carla.LidarMeasurement.
|
||||
- Obstacle detector: carla.ObstacleDetectionEvent.
|
||||
- Radar detector: carla.RadarMeasurement.
|
||||
- RSS response: carla.RssResponse.
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: frame
|
||||
|
@ -34,21 +33,21 @@
|
|||
- class_name: ColorConverter
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Class that defines conversion patterns that can be applied to a carla.Image in order to show information provided by carla.Sensor. Depth conversions cause a loss of accuracy, as sensors detect depth as <b>float</b> that is then converted to a grayscale value between 0 and 255. Take a look a this [recipe](ref_code_recipes.md#converted-image-recipe) to see an example of how to create and save image data for <b>sensor.camera.semantic_segmentation</b>.
|
||||
Class that defines conversion patterns that can be applied to a carla.Image in order to show information provided by carla.Sensor. Depth conversions cause a loss of accuracy, as sensors detect depth as <b>float</b> that is then converted to a grayscale value between 0 and 255. Take a look a this [recipe](ref_code_recipes.md#converted-image-recipe) to see an example of how to create and save image data for <b>sensor.camera.semantic_segmentation</b>.
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: CityScapesPalette
|
||||
doc: >
|
||||
Converts the image to a segmentated map using tags provided by the blueprint library. Used by <b>sensor.camera.semantic_segmentation</b>.
|
||||
Converts the image to a segmentated map using tags provided by the blueprint library. Used by <b>sensor.camera.semantic_segmentation</b>.
|
||||
- var_name: Depth
|
||||
doc: >
|
||||
Converts the image to a linear depth map. Used by <b>sensor.camera.depth</b>.
|
||||
Converts the image to a linear depth map. Used by <b>sensor.camera.depth</b>.
|
||||
- var_name: LogarithmicDepth
|
||||
doc: >
|
||||
Converts the image to a depth map using a logarithmic scale, leading to better precision for small distances at the expense of losing it when further away.
|
||||
Converts the image to a depth map using a logarithmic scale, leading to better precision for small distances at the expense of losing it when further away.
|
||||
- var_name: Raw
|
||||
doc: >
|
||||
No changes applied to the image.
|
||||
No changes applied to the image.
|
||||
|
||||
- class_name: Image
|
||||
parent: carla.SensorData
|
||||
|
@ -64,7 +63,7 @@
|
|||
- var_name: height
|
||||
type: int
|
||||
doc: >
|
||||
Image height in pixels.
|
||||
Image height in pixels.
|
||||
- var_name: width
|
||||
type: int
|
||||
doc: >
|
||||
|
@ -85,12 +84,12 @@
|
|||
- param_name: path
|
||||
type: str
|
||||
doc: >
|
||||
Path that will contain the image.
|
||||
Path that will contain the image.
|
||||
- param_name: color_converter
|
||||
type: carla.ColorConverter
|
||||
default: Raw
|
||||
doc: >
|
||||
Default <b>Raw</b> will make no changes.
|
||||
Default <b>Raw</b> will make no changes.
|
||||
doc: >
|
||||
Saves the image to disk using a converter pattern stated as `color_converter`. The default conversion pattern is <b>Raw</b> that will make no changes to the image.
|
||||
# --------------------------------------
|
||||
|
@ -123,7 +122,7 @@
|
|||
- var_name: channels
|
||||
type: int
|
||||
doc: >
|
||||
Number of lasers shot.
|
||||
Number of lasers shot.
|
||||
- var_name: horizontal_angle
|
||||
type: float
|
||||
doc: >
|
||||
|
@ -131,7 +130,7 @@
|
|||
- var_name: raw_data
|
||||
type: bytes
|
||||
doc: >
|
||||
List of 3D points received as data.
|
||||
List of 3D points received as data.
|
||||
# - METHODS ----------------------------
|
||||
methods:
|
||||
- def_name: save_to_disk
|
||||
|
@ -139,7 +138,7 @@
|
|||
- param_name: path
|
||||
type: str
|
||||
doc: >
|
||||
Saves the point cloud to disk as a <b>.ply</b> file describing data from 3D scanners. The files generated are ready to be used within [MeshLab](http://www.meshlab.net/), an open source system for processing said files. Just take into account that axis may differ from Unreal Engine and so, need to be reallocated.
|
||||
Saves the point cloud to disk as a <b>.ply</b> file describing data from 3D scanners. The files generated are ready to be used within [MeshLab](http://www.meshlab.net/), an open source system for processing said files. Just take into account that axis may differ from Unreal Engine and so, need to be reallocated.
|
||||
# --------------------------------------
|
||||
- def_name: get_point_count
|
||||
params:
|
||||
|
@ -171,7 +170,7 @@
|
|||
parent: carla.SensorData
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Class that defines a collision data for <b>sensor.other.collision</b>. The sensor creates one of this for every collision detected which may be many for one simulation step. Learn more about this [here](ref_sensors.md#collision-detector).
|
||||
Class that defines a collision data for <b>sensor.other.collision</b>. The sensor creates one of this for every collision detected which may be many for one simulation step. Learn more about this [here](ref_sensors.md#collision-detector).
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: actor
|
||||
|
@ -181,7 +180,7 @@
|
|||
- var_name: other_actor
|
||||
type: carla.Actor
|
||||
doc: >
|
||||
The second actor involved in the collision.
|
||||
The second actor involved in the collision.
|
||||
- var_name: normal_impulse
|
||||
type: carla.Vector3D
|
||||
doc: >
|
||||
|
@ -191,13 +190,13 @@
|
|||
parent: carla.SensorData
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Class that defines the obstacle data for <b>sensor.other.obstacle</b>. Learn more about this [here](ref_sensors.md#obstacle-detector).
|
||||
Class that defines the obstacle data for <b>sensor.other.obstacle</b>. Learn more about this [here](ref_sensors.md#obstacle-detector).
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: actor
|
||||
type: carla.Actor
|
||||
doc: >
|
||||
The actor the sensor is attached to.
|
||||
The actor the sensor is attached to.
|
||||
- var_name: other_actor
|
||||
type: carla.Actor
|
||||
doc: >
|
||||
|
@ -205,7 +204,7 @@
|
|||
- var_name: distance
|
||||
type: float
|
||||
doc: >
|
||||
Distance between `actor` and `other`.
|
||||
Distance between `actor` and `other`.
|
||||
# - METHODS ----------------------------
|
||||
methods:
|
||||
- def_name: __str__
|
||||
|
@ -215,7 +214,7 @@
|
|||
parent: carla.SensorData
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Class that defines lanes invasion for <b>sensor.other.lane_invasion</b>. It works only client-side and is dependant on OpenDRIVE to provide reliable information. The sensor creates one of this every time there is a lane invasion, which may be more than once per simulation step. Learn more about this [here](ref_sensors.md#lane-invasion-detector).
|
||||
Class that defines lanes invasion for <b>sensor.other.lane_invasion</b>. It works only client-side and is dependant on OpenDRIVE to provide reliable information. The sensor creates one of this every time there is a lane invasion, which may be more than once per simulation step. Learn more about this [here](ref_sensors.md#lane-invasion-detector).
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: actor
|
||||
|
@ -259,7 +258,7 @@
|
|||
parent: carla.SensorData
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Class that defines the data registered by a <b>sensor.other.imu</b>, regarding the sensor's transformation according to the current carla.World. It essentially acts as accelerometer, gyroscope and compass.
|
||||
Class that defines the data registered by a <b>sensor.other.imu</b>, regarding the sensor's transformation according to the current carla.World. It essentially acts as accelerometer, gyroscope and compass.
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: accelerometer
|
||||
|
@ -289,12 +288,12 @@
|
|||
- var_name: raw_data
|
||||
type: bytes
|
||||
doc: >
|
||||
The complete information of the carla.RadarDetection the radar has registered.
|
||||
The complete information of the carla.RadarDetection the radar has registered.
|
||||
# - METHODS ----------------------------
|
||||
methods:
|
||||
- def_name: get_detection_count
|
||||
doc: >
|
||||
Retrieves the number of entries generated, same as **<font color="#7fb800">\__str__()</font>**.
|
||||
Retrieves the number of entries generated, same as **<font color="#7fb800">\__str__()</font>**.
|
||||
# --------------------------------------
|
||||
- def_name: __getitem__
|
||||
params:
|
||||
|
@ -318,7 +317,7 @@
|
|||
- class_name: RadarDetection
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Data contained inside a carla.RadarMeasurement. Each of these represents one of the points in the cloud that a <b>sensor.other.radar</b> registers and contains the distance, angle and velocity in relation to the radar.
|
||||
Data contained inside a carla.RadarMeasurement. Each of these represents one of the points in the cloud that a <b>sensor.other.radar</b> registers and contains the distance, angle and velocity in relation to the radar.
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: altitude
|
||||
|
@ -344,125 +343,4 @@
|
|||
methods:
|
||||
- def_name: __str__
|
||||
# --------------------------------------
|
||||
|
||||
- class_name: RssResponse
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Data contained inside a carla.RssResponse.
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: response_valid
|
||||
type: bool
|
||||
doc: >
|
||||
Is the current response data valid.
|
||||
# --------------------------------------
|
||||
- var_name: proper_response
|
||||
type: <a href="https://intel.github.io/ad-rss-lib/doxygen/ad_rss/structad_1_1rss_1_1state_1_1ProperResponse.html">libad_rss_python.ProperResponse</a>
|
||||
doc: >
|
||||
The proper response of the RSS calculation.
|
||||
# --------------------------------------
|
||||
- var_name: acceleration_restriction
|
||||
type: <a href="https://intel.github.io/ad-rss-lib/doxygen/ad_rss/structad_1_1rss_1_1world_1_1AccelerationRestriction.html">libad_rss_python.AccelerationRestriction</a>
|
||||
doc: >
|
||||
The acceleration restrictions of the RSS calculation.
|
||||
# --------------------------------------
|
||||
- var_name: rss_state_snapshot
|
||||
type: <a href="https://intel.github.io/ad-rss-lib/doxygen/ad_rss/structad_1_1rss_1_1state_1_1RssStateSnapshot.html">libad_rss_python.RssStateSnapshot</a>
|
||||
doc: >
|
||||
The detailed RSS states at the current point in time.
|
||||
# --------------------------------------
|
||||
- var_name: ego_dynamics_on_route
|
||||
type: carla.RssEgoDynamicsOnRoute
|
||||
doc: >
|
||||
The current ego vehicle dynamics in respect to the current route.
|
||||
# - METHODS ----------------------------
|
||||
methods:
|
||||
- def_name: __str__
|
||||
# --------------------------------------
|
||||
|
||||
- class_name: RssEgoDynamicsOnRoute
|
||||
# - DESCRIPTION ------------------------
|
||||
doc: >
|
||||
Data contained inside a carla.RssResponse.ego_dynamics_on_route
|
||||
# - PROPERTIES -------------------------
|
||||
instance_variables:
|
||||
- var_name: ego_speed
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Speed.html">libad_physics_python.Speed</a>
|
||||
doc: >
|
||||
The ego speed.
|
||||
# --------------------------------------
|
||||
- var_name: min_stopping_distance
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Distance.html">libad_physics_python.Distance</a>
|
||||
doc: >
|
||||
The current minimum stopping distance.
|
||||
# --------------------------------------
|
||||
- var_name: ego_center
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/structad_1_1map_1_1point_1_1ENUPoint.html">libad_map_access_python.ENUPoint</a>
|
||||
doc: >
|
||||
The considered enu position of the ego vehicle.
|
||||
# --------------------------------------
|
||||
- var_name: ego_heading
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/classad_1_1map_1_1point_1_1ENUHeading.html">libad_map_access_python.ENUHeading</a>
|
||||
doc: >
|
||||
The considered heading of the ego vehicle.
|
||||
# --------------------------------------
|
||||
- var_name: ego_center_within_route
|
||||
type: bool
|
||||
doc: >
|
||||
Indicating if the ego center is within route.
|
||||
# --------------------------------------
|
||||
- var_name: crossing_border
|
||||
type: bool
|
||||
doc: >
|
||||
Indicating if the vehicle is already crossing one of the lane borders.
|
||||
# --------------------------------------
|
||||
- var_name: route_heading
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/classad_1_1map_1_1point_1_1ENUHeading.html">libad_map_access_python.ENUHeading</a>
|
||||
doc: >
|
||||
The considered heading of the route.
|
||||
# --------------------------------------
|
||||
- var_name: route_nominal_center
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/structad_1_1map_1_1point_1_1ENUPoint.html">libad_map_access_python.ENUPoint</a>
|
||||
doc: >
|
||||
The considered nominal center of the current route.
|
||||
# --------------------------------------
|
||||
- var_name: heading_diff
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_map_access/apidoc/html/classad_1_1map_1_1point_1_1ENUHeading.html">libad_map_access_python.ENUHeading</a>
|
||||
doc: >
|
||||
The considered heading diff towards the route.
|
||||
# --------------------------------------
|
||||
- var_name: route_speed_lat
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Speed.html">libad_physics_python.Speed</a>
|
||||
doc: >
|
||||
The ego speed component lat in respect to the route.
|
||||
# --------------------------------------
|
||||
- var_name: route_speed_lon
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Speed.html">libad_physics_python.Speed</a>
|
||||
doc: >
|
||||
The ego speed component lon in respect to the route.
|
||||
# --------------------------------------
|
||||
- var_name: route_accel_lat
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>
|
||||
doc: >
|
||||
The ego acceleration component lat in respect to the route.
|
||||
# --------------------------------------
|
||||
- var_name: route_accel_lon
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>
|
||||
doc: >
|
||||
The ego acceleration component lon in respect to the route.
|
||||
# --------------------------------------
|
||||
- var_name: avg_route_accel_lat
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>
|
||||
doc: >
|
||||
The ego acceleration component lat in respect to the route smoothened by an average filter.
|
||||
# --------------------------------------
|
||||
- var_name: avg_route_accel_lon
|
||||
type: <a href="https://ad-map-access.readthedocs.io/en/latest/ad_physics/apidoc/html/classad_1_1physics_1_1Acceleration.html">libad_physics_python.Acceleration</a>
|
||||
doc: >
|
||||
The ego acceleration component lon in respect to the route smoothened by an average filter.
|
||||
# - METHODS ----------------------------
|
||||
methods:
|
||||
- def_name: __str__
|
||||
# --------------------------------------
|
||||
|
||||
...
|
||||
|
|
|
@ -352,17 +352,17 @@ class KeyboardControl(object):
|
|||
if self._world and self._world.rss_sensor:
|
||||
visualization_mode = self._world.rss_sensor.sensor.visualization_mode
|
||||
visualization_mode = visualization_mode + 1
|
||||
if visualization_mode > carla.VisualizationMode.All:
|
||||
visualization_mode = carla.VisualizationMode.Off
|
||||
self._world.rss_sensor.sensor.visualization_mode = carla.VisualizationMode(visualization_mode)
|
||||
if visualization_mode > carla.RssVisualizationMode.All:
|
||||
visualization_mode = carla.RssVisualizationMode.Off
|
||||
self._world.rss_sensor.sensor.visualization_mode = carla.RssVisualizationMode(visualization_mode)
|
||||
elif event.key == K_b:
|
||||
if self._world and self._world.rss_sensor:
|
||||
if self._world.rss_sensor.sensor.road_boundaries_mode == carla.RoadBoundariesMode.Off:
|
||||
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RoadBoundariesMode.On
|
||||
print("carla.RoadBoundariesMode.On")
|
||||
if self._world.rss_sensor.sensor.road_boundaries_mode == carla.RssRoadBoundariesMode.Off:
|
||||
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.On
|
||||
print("carla.RssRoadBoundariesMode.On")
|
||||
else:
|
||||
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RoadBoundariesMode.Off
|
||||
print("carla.RoadBoundariesMode.Off")
|
||||
self._world.rss_sensor.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.Off
|
||||
print("carla.RssRoadBoundariesMode.Off")
|
||||
elif event.key == K_g:
|
||||
if self._world and self._world.rss_sensor:
|
||||
self._world.rss_sensor.drop_route()
|
||||
|
@ -802,9 +802,9 @@ class RssSensor(object):
|
|||
raise RuntimeError('CARLA PythonAPI not compiled in RSS variant, please "make PythonAPI.rss"')
|
||||
weak_self = weakref.ref(self)
|
||||
self.sensor.listen(lambda event: RssSensor._on_rss_response(weak_self, event))
|
||||
self.sensor.visualization_mode = carla.VisualizationMode.All
|
||||
self.sensor.visualization_mode = carla.RssVisualizationMode.All
|
||||
self.sensor.visualize_results = True
|
||||
self.sensor.road_boundaries_mode = carla.RoadBoundariesMode.On
|
||||
self.sensor.road_boundaries_mode = carla.RssRoadBoundariesMode.On
|
||||
self.set_default_parameters()
|
||||
self.sensor.reset_routing_targets()
|
||||
if routing_targets:
|
||||
|
|
Loading…
Reference in New Issue