Add a minimal ros bridge: camera, lidar, control

First version of the bridge written in python. Inspired by the work of
WenchaoDing (https://gist.github.com/WenchaoDing/8bdf2b6753f0cc6cb1c4019529a79c9f).
This commit is contained in:
Laurent George 2018-03-22 15:28:36 +01:00
parent 332d40b58e
commit 0f934cd1f1
11 changed files with 1333 additions and 0 deletions

View File

@ -0,0 +1,199 @@
cmake_minimum_required(VERSION 2.8.3)
project(carla_ros_bridge)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
rospy
sensor_msgs
tf
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a run_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
# add_message_files(
# FILES
# Message1.msg
# Message2.msg
# )
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
# generate_messages(
# DEPENDENCIES
# sensor_msgs
# )
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a run_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES carla_ros_bridge
# CATKIN_DEPENDS rospy sensor_msgs tf
# DEPENDS system_lib
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/carla_ros_bridge.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/carla_ros_bridge_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_carla_ros_bridge.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

View File

@ -0,0 +1,90 @@
# Ros bridge for Carla simulator
This ros package aims at providing a simple ros bridge for carla simulator.
![rviz setup](./assets/rviz_carla_default.png "rviz")
![depthcloud](./assets/depth_cloud_and_lidar.png "depthcloud")
![short video](https://youtu.be/S_NoN2GBtdY)
# Features/Roadmap/TODO
- [x] Cameras (depth, segmentation, rgb) support
- [x] Add camera matrix
- [x] Lidar sensor support
- [x] Transform publications
- [x] Manual control using ackermann msg
- [x] Autopilot mode using rosparam
- [x] Rosbag in the bridge (in order to avoid rosbag recoard -a small time errors)
- [x] Handle ros dependencies
- [x] Marker/bounding box messages for cars/pedestrian
- [ ] Better looking color map for segmentation
- [ ] Support dynamic change (restarting simulation using a topic/rosparam)
# Setup
Create a catkin workspace and install carla_ros_bridge package
mdkir -p ~/catkin_ws/src
ln -s path_to_carla_ros_bridge ~/catkin_ws/src/
source /opt/ros/lunar/setup.bash # if you use kinetic source /opt/ros/kinetic/setup.bash
cd ~/catkin_ws ; catkin_make
source ~/catkin_ws/devel/setup.bash
Then install dependencies using rosdep (you can also use apt-get)
rosdep install --from-paths ~/catkin_ws/
# Start the ros bridge
First run the simulator (see carla documentation)
./CarlaUE4.sh /Game/Maps/Town01 -carla-server -benchmark -fps=15 -windowed -ResX=320 -ResY=240
Then start the ros bridge:
roslaunch carla_ros_client client.launch
To start the ros bridge with rviz use:
roslaunch carla_ros_client client_with_rviz.launch
You can setup the wanted camera/sensors in config/settings.yaml.
# Autopilot control
To enable autopilot control set the ros param carla_autopilot to True
rosparam set carla_autopilot True
# Manual control
To enable manual control set the ros param carla_autopilot to False
rosparam set carla_autopilot False
Then you can send command to the car using the /ackermann_cmd topic.
Example of forward movements, speed in in meters/sec.
rostopic pub /cmd_vel ackermann_msgs/AckermannDrive "{steering_angle: 0.0, steering_angle_velocity: 0.0, speed: 10, acceleration: 0.0,
jerk: 0.0}" -r 10
Example of forward with steering
rostopic pub /cmd_vel ackermann_msgs/AckermannDrive "{steering_angle: 5.41, steering_angle_velocity: 0.0, speed: 10, acceleration: 0.0,
jerk: 0.0}" -r 10
Warning: the steering_angle is the driving angle (in radians) not the wheel angle, for now max wheel is set to 500 degrees.
Example for backward :
rostopic pub /cmd_vel ackermann_msgs/AckermannDrive "{steering_angle: 0, steering_angle_velocity: 0.0, speed: -10, acceleration: 0.0,
jerk: 0.0}" -r 10

Binary file not shown.

After

Width:  |  Height:  |  Size: 411 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 326 KiB

View File

@ -0,0 +1,7 @@
<launch>
<arg name='enable_rosbag' default='False'/>
<rosparam file="$(find carla_ros_bridge)/config/settings.yaml" command="load" />
<param name="carla_autopilot" type="boolean" value="True" />
<param name='enable_rosbag' type="boolean" value="$(arg enable_rosbag)"/>
<node pkg="carla_ros_bridge" name="carla_ros_bridge" type="client.py" output="screen"/>
</launch>

View File

@ -0,0 +1,5 @@
<launch>
<include file="$(find carla_ros_bridge)/client.launch" pass_all_args="True"/>
<param name ="/use_sim_time" value="true"/>
<node pkg="rviz" type="rviz" name="rviz" args="-d $(find carla_ros_bridge)/config/carla_default_rviz.cfg"/>
</launch>

View File

@ -0,0 +1,238 @@
Panels:
- Class: rviz/Displays
Help Height: 102
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /DepthCloud1/Auto Size1
Splitter Ratio: 0.5
Tree Height: 334
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: true
Name: Time
SyncMode: 0
SyncSource: Front_cam
- Class: rviz/Displays
Help Height: 0
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Grid1
- /DepthCloud1/Auto Size1
Splitter Ratio: 0.496614009141922
Tree Height: 191
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.10000000149011612
Cell Size: 1
Class: rviz/Grid
Color: 170; 164; 162
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 1000
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /CameraRGB/image_raw
Max Value: 1
Median window: 5
Min Value: 0
Name: Front_cam
Normalize Range: true
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /camera_depth/image_raw
Max Value: 30
Median window: 5
Min Value: 0
Name: Depth
Normalize Range: false
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
- Alpha: 1
Auto Size:
Auto Size Factor: 1
Value: true
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/DepthCloud
Color: 255; 255; 255
Color Image Topic: /CameraRGB/image_raw
Color Transformer: RGB8
Color Transport Hint: raw
Decay Time: 0
Depth Map Topic: /camera_depth/image_raw
Depth Map Transport Hint: raw
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: DepthCloud
Occlusion Compensation:
Occlusion Time-Out: 30
Value: false
Position Transformer: XYZ
Queue Size: 5
Selectable: true
Size (Pixels): 3
Style: Flat Squares
Topic Filter: true
Use Fixed Frame: true
Use rainbow: true
Value: true
- Class: rviz/MarkerArray
Enabled: true
Marker Topic: /vehicles
Name: non_player_vehicles
Namespaces:
"": true
Queue Size: 100
Value: true
- Class: rviz/Marker
Enabled: true
Marker Topic: /player_vehicle
Name: Marker
Namespaces:
"": true
Queue Size: 100
Value: true
- Alpha: 1
Autocompute Intensity Bounds: true
Autocompute Value Bounds:
Max Value: 10
Min Value: -10
Value: true
Axis: Z
Channel Name: intensity
Class: rviz/PointCloud2
Color: 255; 255; 255
Color Transformer: Intensity
Decay Time: 0
Enabled: true
Invert Rainbow: false
Max Color: 255; 255; 255
Max Intensity: 4096
Min Color: 0; 0; 0
Min Intensity: 0
Name: PointCloud2
Position Transformer: XYZ
Queue Size: 10
Selectable: true
Size (Pixels): 3
Size (m): 0.009999999776482582
Style: Flat Squares
Topic: /lidar_0
Unreliable: false
Use Fixed Frame: true
Use rainbow: true
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: base_link
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Topic: /initialpose
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 20.504690170288086
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: 0
Y: 0
Z: 0
Focal Shape Fixed Size: false
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.37039822340011597
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 3.0753960609436035
Saved: ~
Window Geometry:
"&Depth":
collapsed: false
"&Time":
collapsed: false
"&Views":
collapsed: false
D&isplays:
collapsed: false
Displays:
collapsed: false
Front_&cam:
collapsed: false
Height: 994
Hide Left Dock: false
Hide Right Dock: false
QMainWindow State: 000000ff00000000fd0000000400000000000001c30000032bfc020000000bfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000007601000003fb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007300000000430000032d000000f601000003fb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb000000100044006900730070006c00610079007301000000430000010b000000f601000003fb0000001200460072006f006e0074005f00630061006d010000014f000001180000002201000003fb0000000a00440065007000740068010000026800000106000000220100000300000001000001d70000032bfc0200000008fb0000000a004400650070007400680000000043000001e80000000000000000fb00000018005300650067006d0065006e0074006100740069006f006e000000017f0000011f0000000000000000fb0000000a0049006d0061006700650000000043000000f60000000000000000fb0000000a0049006d0061006700650000000043000001630000000000000000fb0000001a00630061006d005f00660072006f006e0074005f00720067006200000001a7000001c70000000000000000fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000430000032b000000ce01000003fb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000690000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000006900000004dfc0100000002fb0000000800540069006d0065010000000000000690000002bb01000003fb0000000800540069006d00650100000000000004500000000000000000000004cc0000032b00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Tool Properties:
collapsed: false
Width: 1680
X: 0
Y: 30

View File

@ -0,0 +1,97 @@
# carla original variable name is used unchanged here (i.e CamelCase, etc)
carla:
host: localhost
port: 2000
QualityLevel: Low # Low or Epic
SynchronousMode: True
sensors:
CameraRGB:
SensorType: CAMERA
carla_settings:
PostProcessing: SceneFinal
ImageSizeX: 800
ImageSizeY: 600
FOV: 90
PositionX: -5.00
PositionY: 0
PositionZ: 1.30
RotationPitch: 0
RotationRoll: 0
RotationYaw: 0
camera_depth:
SensorType: CAMERA
carla_settings:
PostProcessing: Depth
ImageSizeX: 800
ImageSizeY: 600
FOV: 90
PositionX: -5.00
PositionY: 0
PositionZ: 1.30
RotationPitch: 0
RotationRoll: 0
RotationYaw: 0
camera_depth_front:
SensorType: CAMERA
carla_settings:
PostProcessing: Depth
ImageSizeX: 800
ImageSizeY: 600
FOV: 90
PositionX: 7.00
PositionY: 0
PositionZ: 1.30
RotationPitch: 0
RotationRoll: 0
RotationYaw: 180
CameraRGB_front:
SensorType: CAMERA
carla_settings:
PostProcessing: SceneFinal
ImageSizeX: 800
ImageSizeY: 600
FOV: 90
PositionX: 7.00
PositionY: 0
PositionZ: 1.30
RotationPitch: 0
RotationRoll: 0
RotationYaw: 180
#camera_semantic_segmentation:
# SensorType: CAMERA
# carla_settings:
# PostProcessing: SemanticSegmentation
# ImageSizeX: 800
# ImageSizeY: 600
# FOV: 90
# PositionX: 0.00
# PositionY: 0
# PositionZ: 1.30
# RotationPitch: 0
# RotationRoll: 0
# RotationYaw: 0
lidar_0:
SensorType: LIDAR_RAY_CAST
carla_settings:
Channels: 4
Range: 120
PointsPerSecond: 200000
RotationFrequency: 25
UpperFovLimit: 1.6
LowerFovLimit: -1.6
PositionX: 0.0
PositionY: 0
PositionZ: 1.50
RotationPitch: 0
RotationYaw: 0
RotationRoll: 0

View File

@ -0,0 +1,66 @@
# carla original variable name is used unchanged here (i.e CamelCase, etc)
carla:
host: localhost
port: 2000
QualityLevel: Low # Low or Epic
SynchronousMode: True
sensors:
CameraRGB:
SensorType: CAMERA
carla_settings:
PostProcessing: SceneFinal
ImageSizeX: 800
ImageSizeY: 600
FOV: 90
PositionX: 0.00
PositionY: 0
PositionZ: 1.00
RotationPitch: 0
RotationRoll: 0
RotationYaw: 0
camera_depth:
SensorType: CAMERA
carla_settings:
PostProcessing: Depth
ImageSizeX: 800
ImageSizeY: 600
FOV: 90
PositionX: 0.00
PositionY: 0
PositionZ: 1.00
RotationPitch: 0
RotationRoll: 0
RotationYaw: 0
camera_semantic_segmentation:
SensorType: CAMERA
carla_settings:
PostProcessing: SemanticSegmentation
ImageSizeX: 800
ImageSizeY: 600
FOV: 90
PositionX: 0.00
PositionY: 0
PositionZ: 1.30
RotationPitch: 0
RotationRoll: 0
RotationYaw: 0
lidar_0:
SensorType: LIDAR_RAY_CAST
carla_settings:
Channels: 4
Range: 120
PointsPerSecond: 200000
RotationFrequency: 25
UpperFovLimit: 1.6
LowerFovLimit: -1.6
PositionX: 0.0
PositionY: 0
PositionZ: 1.50
RotationPitch: 0
RotationYaw: 0
RotationRoll: 0

View File

@ -0,0 +1,75 @@
<?xml version="1.0"?>
<package format="2">
<name>carla_ros_bridge</name>
<version>0.0.1</version>
<description>The carla_ros_bridge package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="laurent.george@valeo.com">lgeorge</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>MIT</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/carla_ros_client</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>rospy</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>tf</build_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>sensor_msgs</build_export_depend>
<build_export_depend>tf</build_export_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>sensor_msgs</exec_depend>
<exec_depend>tf</exec_depend>
<exec_depend>tf2</exec_depend>
<exec_depend>rviz</exec_depend>
<exec_depend>ackermann_msgs</exec_depend>
<exec_depend>cv_bridge</exec_depend>
<exec_depend>geometry_msgs</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>rosbag_storage</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

556
carla_ros_bridge/src/client.py Executable file
View File

@ -0,0 +1,556 @@
#!/usr/bin/env python
"""
Ros Bridge for carla
Using python2 for now, simpler for user on ubuntu to have python2
(default setup for kinetic and lunar)
# TODO: group tf in same message ?
"""
import math
import numpy as np
import random
import time
import copy
import rosbag
import rospy
import tf
from carla.client import make_carla_client
from carla.sensor import Camera, Lidar, LidarMeasurement, Image
from carla.sensor import Transform as carla_Transform
from carla.settings import CarlaSettings
from ackermann_msgs.msg import AckermannDrive
from cv_bridge import CvBridge
from geometry_msgs.msg import TransformStamped, Transform, Pose
from rosgraph_msgs.msg import Clock
from sensor_msgs.msg import Image as RosImage
from sensor_msgs.msg import PointCloud2, CameraInfo
from sensor_msgs.point_cloud2 import create_cloud_xyz32
from std_msgs.msg import Header
from tf2_msgs.msg import TFMessage
from visualization_msgs.msg import MarkerArray, Marker
def carla_transform_to_ros_transform(carla_transform):
transform_matrix = carla_transform.matrix
x, y, z = tf.transformations.translation_from_matrix(transform_matrix)
quat = tf.transformations.quaternion_from_matrix(transform_matrix)
ros_transform = Transform()
# remember that we go from left-handed system (unreal) to right-handed system (ros)
ros_transform.translation.x = x
ros_transform.translation.y = -y
ros_transform.translation.z = z
roll, pitch, yaw = tf.transformations.euler_from_quaternion(quat)
roll = -roll
pitch = pitch
yaw = -yaw
quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
ros_transform.rotation.x = quat[0]
ros_transform.rotation.y = quat[1]
ros_transform.rotation.z = quat[2]
ros_transform.rotation.w = quat[3]
return ros_transform
def carla_transform_to_ros_pose(carla_transform):
transform_matrix = Transform(carla_transform).matrix
x, y, z = tf.transformations.translation_from_matrix(transform_matrix)
quat = tf.transformations.quaternion_from_matrix(transform_matrix)
ros_transform = Transform()
ros_transform.translation.x = x
ros_transform.translation.y = y
ros_transform.translation.z = z
ros_transform.rotation.x = quat[0]
ros_transform.rotation.y = quat[1]
ros_transform.rotation.z = quat[2]
ros_transform.rotation.w = quat[3]
return ros_transform
def _ros_transform_to_pose(ros_transform):
pose = Pose()
pose.position.x, pose.position.y, pose.position.z = ros_transform.translation.x, \
ros_transform.translation.y, \
ros_transform.translation.z
pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w = ros_transform.rotation.x, \
ros_transform.rotation.y, \
ros_transform.rotation.z,\
ros_transform.rotation.w
return pose
def update_marker_pose(object, base_marker):
ros_transform = carla_transform_to_ros_transform(carla_Transform(object.transform))
base_marker.pose = _ros_transform_to_pose(ros_transform)
base_marker.scale.x = object.box_extent.x * 2.0
base_marker.scale.y = object.box_extent.y * 2.0
base_marker.scale.z = object.box_extent.z * 2.0 * 2.0 # <-- carla seems to report only half of the bounding box on thix axis
base_marker.type = Marker.CUBE
lookup_table_marker_id = {} # <-- TODO: migrate this in a class
def get_vehicle_marker(object, header, agent_id=88, player=False):
"""
:param pb2 object (vehicle, pedestrian or traffic light)
:param base_marker: a marker to fill/update
:return: a marker
"""
marker = Marker(header=header)
marker.color.a = 0.3
if player:
marker.color.g = 1
marker.color.r = 0
marker.color.b = 0
else:
marker.color.r = 1
marker.color.g = 0
marker.color.b = 0
if agent_id not in lookup_table_marker_id:
lookup_table_marker_id[agent_id] = len(lookup_table_marker_id)
_id = lookup_table_marker_id[agent_id]
marker.id = _id
marker.text = "id = {}".format(_id)
update_marker_pose(object, marker)
if not player: # related to bug request #322
marker.scale.x = marker.scale.x / 100.0
marker.scale.y = marker.scale.y / 100.0
marker.scale.z = marker.scale.z / 100.0
# the box pose seems to require a little bump to be well aligned with camera depth
marker.pose.position.z += marker.scale.z / 2.0
return marker
class CarlaRosBridge(object):
"""
Carla Ros bridge
"""
def __init__(self, client, params, rate=15):
"""
:param params: dict of parameters, see settings.yaml
:param rate: rate to query data from carla in Hz
"""
self.message_to_publish = []
self.param_sensors = params['sensors']
self.client = client
self.carla_settings = CarlaSettings()
self.carla_settings.set(
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=20,
NumberOfPedestrians=40,
WeatherId=random.choice([1, 3, 7, 8, 14]),
SynchronousMode=params['SynchronousMode'],
QualityLevel=params['QualityLevel'])
self.carla_settings.randomize_seeds()
self.cv_bridge = CvBridge()
self.cur_time = rospy.Time.from_sec(0) # at the beginning of simulation
self.carla_game_stamp = 0
self.carla_platform_stamp = 0
self.rate = rospy.Rate(rate)
self.publishers = {}
self._camera_infos = {}
self.processes = []
self.publishers['tf'] = rospy.Publisher("/tf", TFMessage, queue_size=100)
self.publishers['vehicles'] = rospy.Publisher("/vehicles", MarkerArray, queue_size=10)
self.publishers['vehicles_text'] = rospy.Publisher("/vehicles_text", MarkerArray, queue_size=10)
self.publishers['player_vehicle'] = rospy.Publisher("/player_vehicle", Marker, queue_size=10)
self.publishers['pedestrians'] = rospy.Publisher("/pedestrians", MarkerArray, queue_size=10)
self.publishers['traffic_lights'] = rospy.Publisher("/traffic_lights", MarkerArray, queue_size=10)
# default control command
self.cur_control = {'steer': 0.0, 'throttle': 0.0, 'brake': 0.0, 'hand_brake': False, 'reverse': False}
self.cmd_vel_subscriber = rospy.Subscriber('/ackermann_cmd', AckermannDrive, self.set_new_control_callback)
self.world_link = 'map'
self.sensors = {}
self.tf_to_publish = []
def set_new_control_callback(self, data):
"""
Convert a Ackerman drive msg into carla control msg
Right now the control is really simple and don't enforce acceleration and jerk command, nor the steering acceleration too
:param data: AckermannDrive msg
:return:
"""
steering_angle_ctrl = data.steering_angle
speed_ctrl = data.speed
max_steering_angle = math.radians(500) # 500 degrees is the max steering angle that I have on my car,
# would be nice if we could use the value provided by carla
max_speed = 27 # just a value for me, 27 m/sec seems to be a reasonable max speed for now
control = {}
if abs(steering_angle_ctrl) > max_steering_angle:
rospy.logerr("Max steering angle reached, clipping value")
steering_angle_ctrl = np.clip(steering_angle_ctrl, -max_steering_angle, max_steering_angle)
if abs(speed_ctrl) > max_speed:
rospy.logerr("Max speed reached, clipping value")
speed_ctrl = np.clip(speed_ctrl, -max_speed, max_speed)
if speed_ctrl == 0:
control['brake'] = True
control['steer'] = steering_angle_ctrl / max_steering_angle
control['throttle'] = abs(speed_ctrl / max_speed)
control['reverse'] = True if speed_ctrl < 0 else False
self.cur_control = control
def send_msgs(self):
"""
Publish all message store then clean the list of message to publish
:return:
"""
for name, message in self.message_to_publish:
self.publishers[name].publish(message)
self.message_to_publish = []
def add_publishers(self):
self.publishers['clock'] = rospy.Publisher("clock", Clock, queue_size=10)
for name, _ in self.param_sensors.items():
self.add_sensor(name)
def compute_cur_time_msg(self):
self.message_to_publish.append(('clock', Clock(self.cur_time)))
def compute_sensor_msg(self, name, sensor_data):
if isinstance(sensor_data, Image):
self.compute_camera_transform(name, sensor_data)
self.compute_camera_sensor_msg(name, sensor_data)
elif isinstance(sensor_data, LidarMeasurement):
self.compute_lidar_transform(name, sensor_data)
self.compute_lidar_sensor_msg(name, sensor_data)
else:
rospy.logerr("{}, {} is not handled yet".format(name, sensor_data))
def compute_camera_sensor_msg(self, name, sensor):
if sensor.type == 'Depth':
# ROS PEP 0118 : Depth images are published as sensor_msgs/Image encoded as 32-bit float. Each pixel is a depth (along the camera Z axis) in meters.
data = np.float32(sensor.data * 1000.0) # in carla 1.0 = 1km
encoding = 'passthrough'
elif sensor.type == 'SemanticSegmentation':
encoding = 'mono16' # for semantic segmentation we use mono16 in order to be able to limit range in rviz
data = np.uint16(sensor.data)
else:
encoding = 'rgb8'
data = sensor.data
img_msg = self.cv_bridge.cv2_to_imgmsg(data, encoding=encoding)
img_msg.header.frame_id = name
img_msg.header.stamp = self.cur_time
self.message_to_publish.append((name + '/image_raw', img_msg))
cam_info = self._camera_infos[name]
cam_info.header = img_msg.header
self.message_to_publish.append((name + '/camera_info', cam_info))
def compute_lidar_sensor_msg(self, name, sensor):
header = Header()
header.frame_id = name
header.stamp = self.cur_time
# we take the oposite of y axis (as lidar point are express in left handed coordinate system, and ros need right handed)
# we need a copy here, because the data are read only in carla numpy array
new_sensor_data = sensor.data.copy()
new_sensor_data = -new_sensor_data
# we also need to permute x and y , todo find why
new_sensor_data = new_sensor_data[..., [1, 0, 2]]
point_cloud_msg = create_cloud_xyz32(header, new_sensor_data)
self.message_to_publish.append((name, point_cloud_msg))
def compute_player_pose_msg(self, player_measurement):
#print("Player measurement is {}".format(player_measurement))
t = TransformStamped()
t.header.stamp = self.cur_time
t.header.frame_id = self.world_link
t.child_frame_id = "base_link"
t.transform = carla_transform_to_ros_transform(carla_Transform(player_measurement.transform))
header = Header()
header.stamp = self.cur_time
header.frame_id = self.world_link
marker = get_vehicle_marker(player_measurement, header=header, agent_id=0, player=True)
self.message_to_publish.append(('player_vehicle', marker))
self.tf_to_publish.append(t)
def compute_non_player_agent_msg(self, non_player_agents):
"""
:param non_player_agents: list of carla_server_pb2.Agent return by carla API,
with field 'id', 'vehicle', 'pedestrian', 'traffic_light', 'speed_limit_sign'
:return:
"""
vehicles = [(agent.id, agent.vehicle) for agent in non_player_agents if agent.HasField('vehicle')]
pedestrians = [(agent.id, agent.pedestrian) for agent in non_player_agents if agent.HasField('pedestrian')]
traffic_lights = [(agent.id, agent.traffic_light) for agent in non_player_agents if agent.HasField('traffic_light')]
# TODO: add traffic signs
#traffic_lights = [(agent.id, agent.traffic_light) for agent in non_player_agents if agent.HasField('traffic_light')]
header = Header(stamp=self.cur_time, frame_id=self.world_link)
self.compute_vehicle_msgs(vehicles, header)
#self.compute_pedestrian_msgs(pedestrians)
#self.compute_traffic_light_msgs(traffic_lights)
def compute_vehicle_msgs(self, vehicles, header, agent_id=8):
"""
Add MarkerArray msg for vehicle to the list of message to be publish
:param vehicles: list of carla pb2 vehicle
"""
if not(vehicles):
return
markers = [get_vehicle_marker(vehicle, header, agent_id) for agent_id, vehicle in vehicles]
marker_array = MarkerArray(markers)
self.message_to_publish.append(('vehicles', marker_array))
# adding text in rviz (TODO: refactor)
markers_text = [copy.copy(marker) for marker in markers]
for marker in markers_text:
marker.type = Marker.TEXT_VIEW_FACING
marker_array = MarkerArray(markers_text)
self.message_to_publish.append(('vehicles_text', marker_array))
def run(self):
# creating ros publishers, and adding sensors to carla settings
self.add_publishers()
# load settings into the server
scene = self.client.load_settings(self.carla_settings)
# Choose one player start at random.
number_of_player_starts = len(scene.player_start_spots)
player_start = random.randint(0, max(0, number_of_player_starts - 1))
# start
self.client.start_episode(player_start)
while not rospy.is_shutdown():
measurements, sensor_data = self.client.read_data()
self.carla_game_stamp = measurements.game_timestamp
self.carla_platform_stamp = measurements.platform_timestamp
self.cur_time = rospy.Time.from_sec(self.carla_game_stamp * 1000.0)
self.compute_cur_time_msg()
self.compute_player_pose_msg(measurements.player_measurements)
self.compute_non_player_agent_msg(measurements.non_player_agents)
self.send_msgs()
for name, sensor in sensor_data.items():
self.compute_sensor_msg(name, sensor)
tf_msg = TFMessage(self.tf_to_publish)
self.publishers['tf'].publish(tf_msg)
self.tf_to_publish = []
if rospy.get_param('carla_autopilot', True):
control = measurements.player_measurements.autopilot_control
self.client.send_control(control)
else:
control = self.cur_control
self.client.send_control(**control)
#self.rate.sleep() # <-- no need of sleep the read call should already be blocking
def add_sensor(self, name):
"""
Add sensor base on sensor name in settings
:param name:
:return:
"""
sensor_type = self.param_sensors[name]['SensorType']
params = self.param_sensors[name]['carla_settings']
rospy.loginfo("Adding publisher for sensor {}".format(name))
if sensor_type == 'LIDAR_RAY_CAST':
self.add_lidar_sensor(name, params)
elif sensor_type == 'CAMERA':
self.add_camera_sensor(name, params)
else:
rospy.logerr("{sensor_type} not implemented".format(sensor_type=sensor_type))
def compute_camera_transform(self, name, sensor_data):
parent_frame_id = "base_link"
child_frame_id = name
t = TransformStamped()
t.header.stamp = self.cur_time
t.header.frame_id = parent_frame_id
t.child_frame_id = child_frame_id
# for camera we reorient it to look at the same axis as the opencv projection
# in order to get easy depth cloud for rgbd camera
t.transform = carla_transform_to_ros_transform(self.sensors[name].get_transform())
rotation = t.transform.rotation
quat = [rotation.x, rotation.y, rotation.z, rotation.w]
roll, pitch, yaw = tf.transformations.euler_from_quaternion(quat)
roll -= math.pi/2.0
yaw -= math.pi/2.0
quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
t.transform.rotation.x = quat[0]
t.transform.rotation.y = quat[1]
t.transform.rotation.z = quat[2]
t.transform.rotation.w = quat[3]
self.tf_to_publish.append(t)
def compute_lidar_transform(self, name, sensor_data):
parent_frame_id = "base_link"
child_frame_id = name
t = TransformStamped()
t.header.stamp = self.cur_time
t.header.frame_id = parent_frame_id
t.child_frame_id = child_frame_id
t.transform = carla_transform_to_ros_transform(self.sensors[name].get_transform())
self.tf_to_publish.append(t)
def add_camera_sensor(self, name, params):
"""
:param name:
:param params:
:return:
"""
# The default camera captures RGB images of the scene.
cam = Camera(name, **params)
self.carla_settings.add_sensor(cam)
self.sensors[name] = cam # we also add the sensor to our lookup
topic_image = name + '/image_raw'
topic_camera = name + '/camera_info'
self.publishers[topic_image] = rospy.Publisher(topic_image, RosImage, queue_size=10)
self.publishers[topic_camera] = rospy.Publisher(topic_camera, CameraInfo, queue_size=10)
# computing camera info, when publishing update the stamp
camera_info = CameraInfo()
camera_info.header.frame_id = name
camera_info.width = cam.ImageSizeX
camera_info.height = cam.ImageSizeY
camera_info.distortion_model = 'plumb_bob'
cx = cam.ImageSizeX / 2.0
cy = cam.ImageSizeY / 2.0
fx = cam.ImageSizeX / (2.0 * math.tan(cam.FOV * math.pi / 360.0))
fy = fx
camera_info.K = [fx, 0, cx,
0, fy, cy,
0, 0, 1]
camera_info.D = [0, 0, 0, 0, 0]
camera_info.R = [1.0, 0, 0,
0, 1.0, 0,
0, 0, 1.0]
camera_info.P = [fx, 0, cx, 0,
0, fy, cy, 0,
0, 0, 1.0, 0]
self._camera_infos[name] = camera_info
def add_lidar_sensor(self, name, params):
"""
:param name:
:param params:
:return:
"""
lidar = Lidar(name, **params)
self.carla_settings.add_sensor(lidar)
self.sensors[name] = lidar # we also add the sensor to our lookup
self.publishers[name] = rospy.Publisher(name, PointCloud2, queue_size=10)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
rospy.loginfo("Exiting Bridge")
return None
class CarlaRosBridgeWithBag(CarlaRosBridge):
def __init__(self, *args, **kwargs):
super(CarlaRosBridgeWithBag, self).__init__(*args, **kwargs)
timestr = time.strftime("%Y%m%d-%H%M%S")
self.bag = rosbag.Bag('/tmp/output_{}.bag'.format(timestr), mode='w')
def send_msgs(self):
for name, msg in self.message_to_publish:
self.bag.write(name, msg, self.cur_time)
super(CarlaRosBridgeWithBag, self).send_msgs()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
rospy.loginfo("Closing the bag file")
self.bag.close()
super(CarlaRosBridgeWithBag, self).__exit__(exc_type, exc_value, traceback)
def main():
rospy.init_node("carla_client", anonymous=True)
params = rospy.get_param('carla')
host = params['host']
port = params['port']
rospy.loginfo("Trying to connect to {host}:{port}".format(host=host, port=port))
with make_carla_client(host, port) as client:
rospy.loginfo("Connection is ok")
bridge_cls = CarlaRosBridgeWithBag if rospy.get_param('enable_rosbag') else CarlaRosBridge
with bridge_cls(client=client, params=params) as carla_ros_bridge:
carla_ros_bridge.run()
if __name__ == "__main__":
main()