Optical Flow
This commit is contained in:
parent
c1407a7ddf
commit
fa9f55a592
|
@ -19,6 +19,7 @@
|
|||
#include "carla/sensor/s11n/EpisodeStateSerializer.h"
|
||||
#include "carla/sensor/s11n/GnssSerializer.h"
|
||||
#include "carla/sensor/s11n/ImageSerializer.h"
|
||||
#include "carla/sensor/s11n/Image16bitSerializer.h"
|
||||
#include "carla/sensor/s11n/IMUSerializer.h"
|
||||
#include "carla/sensor/s11n/LidarSerializer.h"
|
||||
#include "carla/sensor/s11n/NoopSerializer.h"
|
||||
|
@ -34,6 +35,7 @@ class AGnssSensor;
|
|||
class AInertialMeasurementUnit;
|
||||
class ALaneInvasionSensor;
|
||||
class AObstacleDetectionSensor;
|
||||
class AOpticalFlowCamera;
|
||||
class ARadar;
|
||||
class ARayCastSemanticLidar;
|
||||
class ARayCastLidar;
|
||||
|
@ -60,6 +62,7 @@ namespace sensor {
|
|||
std::pair<AInertialMeasurementUnit *, s11n::IMUSerializer>,
|
||||
std::pair<ALaneInvasionSensor *, s11n::NoopSerializer>,
|
||||
std::pair<AObstacleDetectionSensor *, s11n::ObstacleDetectionEventSerializer>,
|
||||
std::pair<AOpticalFlowCamera *, s11n::Image16bitSerializer>,
|
||||
std::pair<ARadar *, s11n::RadarSerializer>,
|
||||
std::pair<ARayCastSemanticLidar *, s11n::SemanticLidarSerializer>,
|
||||
std::pair<ARayCastLidar *, s11n::LidarSerializer>,
|
||||
|
@ -84,6 +87,7 @@ namespace sensor {
|
|||
#include "Carla/Sensor/InertialMeasurementUnit.h"
|
||||
#include "Carla/Sensor/LaneInvasionSensor.h"
|
||||
#include "Carla/Sensor/ObstacleDetectionSensor.h"
|
||||
#include "Carla/Sensor/OpticalFlowCamera.h"
|
||||
#include "Carla/Sensor/Radar.h"
|
||||
#include "Carla/Sensor/RayCastLidar.h"
|
||||
#include "Carla/Sensor/RayCastSemanticLidar.h"
|
||||
|
|
|
@ -46,6 +46,38 @@ namespace data {
|
|||
|
||||
static_assert(sizeof(Color) == sizeof(uint32_t), "Invalid color size!");
|
||||
|
||||
#pragma pack(push, 1)
|
||||
/// A 64-bit PF_FloatRGBA color [16bit / channel]. (still uses uint16 types to transport 16bit floats)
|
||||
struct Color16bit {
|
||||
Color16bit() = default;
|
||||
Color16bit(const Color16bit &) = default;
|
||||
|
||||
Color16bit(uint16_t r, uint16_t g, uint16_t b, uint16_t a = 255u)
|
||||
: b(b), g(g), r(r), a(a) {}
|
||||
|
||||
Color16bit &operator=(const Color16bit &) = default;
|
||||
|
||||
bool operator==(const Color16bit &rhs) const {
|
||||
return (r == rhs.r) && (g == rhs.g) && (b == rhs.b) && (a == rhs.a);
|
||||
}
|
||||
|
||||
bool operator!=(const Color16bit &rhs) const {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
// operator rpc::Color16bit() const {
|
||||
// return {r, g, b};
|
||||
// }
|
||||
|
||||
uint16_t b = 0u;
|
||||
uint16_t g = 0u;
|
||||
uint16_t r = 0u;
|
||||
uint16_t a = 0u;
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
static_assert(sizeof(Color16bit) == sizeof(uint64_t), "Invalid 16bit color size!");
|
||||
|
||||
} // namespace data
|
||||
} // namespace sensor
|
||||
} // namespace carla
|
||||
|
|
|
@ -13,9 +13,12 @@ namespace carla {
|
|||
namespace sensor {
|
||||
namespace data {
|
||||
|
||||
/// An image of 32-bit BGRA colors.
|
||||
/// An image of 32-bit BGRA colors (8-bit channels)
|
||||
using Image = ImageTmpl<Color>;
|
||||
|
||||
/// An image of 64-bit BGRA colors (16-bit channels)
|
||||
using Image16bit = ImageTmpl<Color16bit>;
|
||||
|
||||
} // namespace data
|
||||
} // namespace sensor
|
||||
} // namespace carla
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "carla/Debug.h"
|
||||
#include "carla/sensor/data/Array.h"
|
||||
#include "carla/sensor/s11n/ImageSerializer.h"
|
||||
#include "carla/sensor/s11n/Image16bitSerializer.h"
|
||||
|
||||
namespace carla {
|
||||
namespace sensor {
|
||||
|
@ -21,8 +22,10 @@ namespace data {
|
|||
protected:
|
||||
|
||||
using Serializer = s11n::ImageSerializer;
|
||||
using Serializer16bit = s11n::Image16bitSerializer;
|
||||
|
||||
friend Serializer;
|
||||
friend Serializer16bit;
|
||||
|
||||
explicit ImageTmpl(RawData &&data)
|
||||
: Super(Serializer::header_offset, std::move(data)) {
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
//
|
||||
// Created by flo on 09.11.20.
|
||||
//
|
||||
|
||||
#include "Image16bitSerializer.h"
|
||||
#include "carla/sensor/s11n/Image16bitSerializer.h"
|
||||
|
||||
#include "carla/sensor/data/Image.h"
|
||||
|
||||
namespace carla {
|
||||
namespace sensor {
|
||||
namespace s11n {
|
||||
|
||||
SharedPtr<SensorData> Image16bitSerializer::Deserialize(RawData &&data) {
|
||||
auto image = SharedPtr<data::Image16bit>(new data::Image16bit{std::move(data)});
|
||||
return image;
|
||||
}
|
||||
|
||||
} // namespace s11n
|
||||
} // namespace sensor
|
||||
} // namespace carla
|
|
@ -0,0 +1,59 @@
|
|||
//
|
||||
// Created by flo on 09.11.20.
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "carla/Memory.h"
|
||||
#include "carla/sensor/RawData.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
|
||||
namespace carla {
|
||||
namespace sensor {
|
||||
|
||||
class SensorData;
|
||||
|
||||
namespace s11n {
|
||||
|
||||
/// Serializes image buffers generated by camera sensors.
|
||||
class Image16bitSerializer {
|
||||
public:
|
||||
|
||||
#pragma pack(push, 1)
|
||||
struct ImageHeader {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
float fov_angle;
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
constexpr static auto header_offset = sizeof(ImageHeader);
|
||||
|
||||
static const ImageHeader &DeserializeHeader(const RawData &data) {
|
||||
return *reinterpret_cast<const ImageHeader *>(data.begin());
|
||||
}
|
||||
|
||||
template <typename Sensor>
|
||||
static Buffer Serialize(const Sensor &sensor, Buffer &&bitmap);
|
||||
|
||||
static SharedPtr<SensorData> Deserialize(RawData &&data);
|
||||
};
|
||||
|
||||
template <typename Sensor>
|
||||
inline Buffer Image16bitSerializer::Serialize(const Sensor &sensor, Buffer &&bitmap) {
|
||||
DEBUG_ASSERT(bitmap.size() > sizeof(ImageHeader));
|
||||
ImageHeader header = {
|
||||
sensor.GetImageWidth(),
|
||||
sensor.GetImageHeight(),
|
||||
sensor.GetFOVAngle()
|
||||
};
|
||||
std::memcpy(bitmap.data(), reinterpret_cast<const void *>(&header), sizeof(header));
|
||||
return std::move(bitmap);
|
||||
}
|
||||
|
||||
} // namespace s11n
|
||||
} // namespace sensor
|
||||
} // namespace carla
|
||||
|
|
@ -40,6 +40,14 @@ namespace data {
|
|||
return out;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &out, const Image16bit &image) {
|
||||
out << "Image16bit(frame=" << std::to_string(image.GetFrame())
|
||||
<< ", timestamp=" << std::to_string(image.GetTimestamp())
|
||||
<< ", size=" << std::to_string(image.GetWidth()) << 'x' << std::to_string(image.GetHeight())
|
||||
<< ')';
|
||||
return out;
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &out, const LidarMeasurement &meas) {
|
||||
out << "LidarMeasurement(frame=" << std::to_string(meas.GetFrame())
|
||||
<< ", timestamp=" << std::to_string(meas.GetTimestamp())
|
||||
|
@ -272,6 +280,24 @@ void export_sensor_data() {
|
|||
.def(self_ns::str(self_ns::self))
|
||||
;
|
||||
|
||||
class_<csd::Image16bit, bases<cs::SensorData>, boost::noncopyable, boost::shared_ptr<csd::Image16bit>>("Image16bit", no_init)
|
||||
.add_property("width", &csd::Image16bit::GetWidth)
|
||||
.add_property("height", &csd::Image16bit::GetHeight)
|
||||
.add_property("fov", &csd::Image16bit::GetFOVAngle)
|
||||
.add_property("raw_data", &GetRawDataAsBuffer<csd::Image16bit>)
|
||||
// .def("convert", &ConvertImage<csd::Image>, (arg("color_converter"))) unimplemented
|
||||
// .def("save_to_disk", &SaveImageToDisk<csd::Image>, (arg("path"), arg("color_converter")=EColorConverter::Raw)) unimplemented
|
||||
.def("__len__", &csd::Image16bit::size)
|
||||
.def("__iter__", iterator<csd::Image16bit>())
|
||||
.def("__getitem__", +[](const csd::Image16bit &self, size_t pos) -> csd::Color16bit {
|
||||
return self.at(pos);
|
||||
})
|
||||
.def("__setitem__", +[](csd::Image16bit &self, size_t pos, csd::Color16bit color) {
|
||||
self.at(pos) = color;
|
||||
})
|
||||
.def(self_ns::str(self_ns::self))
|
||||
;
|
||||
|
||||
class_<csd::LidarMeasurement, bases<cs::SensorData>, boost::noncopyable, boost::shared_ptr<csd::LidarMeasurement>>("LidarMeasurement", no_init)
|
||||
.add_property("horizontal_angle", &csd::LidarMeasurement::GetHorizontalAngle)
|
||||
.add_property("channels", &csd::LidarMeasurement::GetChannelCount)
|
||||
|
|
|
@ -63,6 +63,13 @@ import glob
|
|||
import os
|
||||
import sys
|
||||
|
||||
OPENCV_INSTALLED = True
|
||||
try:
|
||||
import cv2
|
||||
except ImportError:
|
||||
OPENCV_INSTALLED = False
|
||||
pass
|
||||
|
||||
try:
|
||||
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
|
||||
sys.version_info.major,
|
||||
|
@ -155,6 +162,27 @@ def get_actor_display_name(actor, truncate=250):
|
|||
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
|
||||
|
||||
|
||||
def render_optical_flow_data(data):
|
||||
intensity = np.linalg.norm(data, axis=2)
|
||||
angle = np.arctan2(data[:, :, 0], data[:, :, 1])
|
||||
max_intensity = 100
|
||||
# N.B.: an intensity of exactly 1.0 makes the output black (perhaps showing the over-saturation), so keep it < 1
|
||||
intensity = np.clip(intensity, 0, max_intensity - 1) / max_intensity
|
||||
# log scaling
|
||||
basis = 30
|
||||
intensity = np.log1p((basis - 1) * intensity) / np.log1p(basis - 1)
|
||||
# for the angle they use 360° scale, see https://stackoverflow.com/a/57203497/14467327
|
||||
angle = (np.pi + angle) * 360 / (2 * np.pi)
|
||||
# print(F"Ranges, angle: [{np.min(angle)}, {np.max(angle)}], "
|
||||
# F"intensity: [{np.min(intensity)}, {np.max(intensity)}]")
|
||||
intensity = intensity[:, :, np.newaxis]
|
||||
angle = angle[:, :, np.newaxis]
|
||||
hsv_img = np.concatenate((angle, np.ones_like(intensity), intensity), axis=2)
|
||||
img_out = np.array(cv2.cvtColor(np.array(hsv_img, dtype=np.float32), cv2.COLOR_HSV2RGB) * 256,
|
||||
dtype=np.dtype("uint8"))
|
||||
return img_out
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# -- World ---------------------------------------------------------------------
|
||||
# ==============================================================================
|
||||
|
@ -981,6 +1009,7 @@ class CameraManager(object):
|
|||
self.transform_index = 1
|
||||
self.sensors = [
|
||||
['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],
|
||||
['sensor.camera.optical_flow', cc.Raw, 'Optical Flow', {}],
|
||||
['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)', {}],
|
||||
['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}],
|
||||
['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)', {}],
|
||||
|
@ -1079,6 +1108,22 @@ class CameraManager(object):
|
|||
# Blue is positive, red is negative
|
||||
dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255
|
||||
self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))
|
||||
elif self.sensors[self.index][0].startswith('sensor.camera.optical_flow'):
|
||||
# print(image)
|
||||
array = np.frombuffer(image.raw_data, dtype=np.uint16)
|
||||
array = np.reshape(array, (image.height, image.width, 4))
|
||||
if OPENCV_INSTALLED:
|
||||
data_array = np.array(array[:, :, 0:2], dtype=np.float32)
|
||||
data_array[:, :, 0] = (data_array[:, :, 0] - 32767) * (2 * image.width / 65535)
|
||||
data_array[:, :, 1] = (32767 - data_array[:, :, 1]) * (2 * image.height / 65535)
|
||||
|
||||
img_out = render_optical_flow_data(data_array)
|
||||
|
||||
self.surface = pygame.surfarray.make_surface(img_out.swapaxes(0, 1))
|
||||
else:
|
||||
array = array[:, :, :3]
|
||||
array = array[:, :, ::-1]
|
||||
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
|
||||
else:
|
||||
image.convert(self.sensors[self.index][1])
|
||||
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
|
||||
|
|
|
@ -19,6 +19,8 @@ GlobalDefaultServerGameMode=/Game/Carla/Blueprints/Game/CarlaGameMode.CarlaGameM
|
|||
|
||||
[/Script/Engine.RendererSettings]
|
||||
r.DefaultFeature.MotionBlur=True
|
||||
r.BasePassOutputsVelocity=True
|
||||
r.BasePassForceOutputsVelocity=True
|
||||
r.AllowStaticLighting=False
|
||||
r.DiscardUnusedQuality=True
|
||||
r.DefaultFeature.Bloom=False
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
#include "Carla.h"
|
||||
#include "Carla/Sensor/OpticalFlowCamera.h"
|
||||
|
||||
#include "Carla/Sensor/PixelReader.h"
|
||||
|
||||
FActorDefinition AOpticalFlowCamera::GetSensorDefinition()
|
||||
{
|
||||
return UActorBlueprintFunctionLibrary::MakeCameraDefinition(TEXT("optical_flow"));
|
||||
}
|
||||
|
||||
AOpticalFlowCamera::AOpticalFlowCamera(const FObjectInitializer &ObjectInitializer)
|
||||
: Super(ObjectInitializer)
|
||||
{
|
||||
Enable16BitFormat(true);
|
||||
AddPostProcessingMaterial(
|
||||
TEXT("Material'/Carla/PostProcessingMaterials/PhysicLensDistortion.PhysicLensDistortion'"));
|
||||
// AddPostProcessingMaterial(
|
||||
//#if PLATFORM_LINUX
|
||||
// TEXT("Material'/Carla/PostProcessingMaterials/DepthEffectMaterial_GLSL.DepthEffectMaterial_GLSL'")
|
||||
//#else
|
||||
// TEXT("Material'/Carla/PostProcessingMaterials/DepthEffectMaterial.DepthEffectMaterial'")
|
||||
//#endif
|
||||
// );
|
||||
AddPostProcessingMaterial(
|
||||
TEXT("Material'/Carla/PostProcessingMaterials/VelocityMaterial.VelocityMaterial'"));
|
||||
}
|
||||
|
||||
void AOpticalFlowCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds)
|
||||
{
|
||||
FPixelReader::SendPixelsInRenderThread(*this, true);
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
#pragma once
|
||||
|
||||
#include "Carla/Sensor/ShaderBasedSensor.h"
|
||||
|
||||
#include "Carla/Actor/ActorDefinition.h"
|
||||
|
||||
#include "OpticalFlowCamera.generated.h"
|
||||
|
||||
/// Sensor that produces "optical flow" images.
|
||||
UCLASS()
|
||||
class CARLA_API AOpticalFlowCamera : public AShaderBasedSensor
|
||||
{
|
||||
GENERATED_BODY()
|
||||
|
||||
public:
|
||||
|
||||
static FActorDefinition GetSensorDefinition();
|
||||
|
||||
AOpticalFlowCamera(const FObjectInitializer &ObjectInitializer);
|
||||
|
||||
protected:
|
||||
|
||||
void PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds) override;
|
||||
};
|
|
@ -73,6 +73,36 @@ static void WritePixelsToBuffer_Vulkan(
|
|||
}
|
||||
}
|
||||
|
||||
// Temporal; this avoid allocating the array each time and also avoids checking
|
||||
// for a bigger texture, ReadSurfaceData will allocate the space needed.
|
||||
TArray<FFloat16Color> gFloatPixels;
|
||||
|
||||
static void WriteFloatPixelsToBuffer_Vulkan(
|
||||
const UTextureRenderTarget2D &RenderTarget,
|
||||
carla::Buffer &Buffer,
|
||||
uint32 Offset,
|
||||
FRHICommandListImmediate &InRHICmdList)
|
||||
{
|
||||
check(IsInRenderingThread());
|
||||
auto RenderResource =
|
||||
static_cast<const FTextureRenderTarget2DResource *>(RenderTarget.Resource);
|
||||
FTexture2DRHIRef Texture = RenderResource->GetRenderTargetTexture();
|
||||
if (!Texture)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
FIntPoint Rect = RenderResource->GetSizeXY();
|
||||
|
||||
// NS: Extra copy here, don't know how to avoid it.
|
||||
InRHICmdList.ReadSurfaceFloatData(
|
||||
Texture,
|
||||
FIntRect(0, 0, Rect.X, Rect.Y),
|
||||
gFloatPixels,
|
||||
FReadSurfaceDataFlags(RCM_UNorm, CubeFace_MAX));
|
||||
Buffer.copy_from(Offset, gFloatPixels);
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// -- FPixelReader -------------------------------------------------------------
|
||||
// =============================================================================
|
||||
|
@ -134,7 +164,8 @@ void FPixelReader::WritePixelsToBuffer(
|
|||
UTextureRenderTarget2D &RenderTarget,
|
||||
carla::Buffer &Buffer,
|
||||
uint32 Offset,
|
||||
FRHICommandListImmediate &InRHICmdList
|
||||
FRHICommandListImmediate &InRHICmdList,
|
||||
bool use16BitFormat
|
||||
)
|
||||
{
|
||||
TRACE_CPUPROFILER_EVENT_SCOPE_STR(__FUNCTION__);
|
||||
|
@ -142,7 +173,14 @@ void FPixelReader::WritePixelsToBuffer(
|
|||
|
||||
if (IsVulkanPlatform(GMaxRHIShaderPlatform) || IsD3DPlatform(GMaxRHIShaderPlatform, false))
|
||||
{
|
||||
WritePixelsToBuffer_Vulkan(RenderTarget, Buffer, Offset, InRHICmdList);
|
||||
if (use16BitFormat)
|
||||
{
|
||||
WriteFloatPixelsToBuffer_Vulkan(RenderTarget, Buffer, Offset, InRHICmdList);
|
||||
}
|
||||
else
|
||||
{
|
||||
WritePixelsToBuffer_Vulkan(RenderTarget, Buffer, Offset, InRHICmdList);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -155,7 +193,7 @@ void FPixelReader::WritePixelsToBuffer(
|
|||
FRHITexture2D *Texture = RenderTargetResource->GetRenderTargetTexture();
|
||||
checkf(Texture != nullptr, TEXT("FPixelReader: UTextureRenderTarget2D missing render target texture"));
|
||||
|
||||
const uint32 BytesPerPixel = 4u; // PF_R8G8B8A8
|
||||
const uint32 BytesPerPixel = use16BitFormat ? 8u : 4u; // PF_R8G8B8A8 or PF_FloatRGBA
|
||||
const uint32 Width = Texture->GetSizeX();
|
||||
const uint32 Height = Texture->GetSizeY();
|
||||
const uint32 ExpectedStride = Width * BytesPerPixel;
|
||||
|
|
|
@ -62,7 +62,7 @@ public:
|
|||
///
|
||||
/// @pre To be called from game-thread.
|
||||
template <typename TSensor>
|
||||
static void SendPixelsInRenderThread(TSensor &Sensor);
|
||||
static void SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat = false);
|
||||
|
||||
private:
|
||||
|
||||
|
@ -73,7 +73,8 @@ private:
|
|||
UTextureRenderTarget2D &RenderTarget,
|
||||
carla::Buffer &Buffer,
|
||||
uint32 Offset,
|
||||
FRHICommandListImmediate &InRHICmdList);
|
||||
FRHICommandListImmediate &InRHICmdList,
|
||||
bool use16BitFormat = false);
|
||||
|
||||
};
|
||||
|
||||
|
@ -82,7 +83,7 @@ private:
|
|||
// =============================================================================
|
||||
|
||||
template <typename TSensor>
|
||||
void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor)
|
||||
void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat)
|
||||
{
|
||||
TRACE_CPUPROFILER_EVENT_SCOPE(FPixelReader::SendPixelsInRenderThread);
|
||||
check(Sensor.CaptureRenderTarget != nullptr);
|
||||
|
@ -100,7 +101,7 @@ void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor)
|
|||
// game-thread.
|
||||
ENQUEUE_RENDER_COMMAND(FWritePixels_SendPixelsInRenderThread)
|
||||
(
|
||||
[&Sensor, Stream=Sensor.GetDataStream(Sensor)](auto &InRHICmdList) mutable
|
||||
[&Sensor, Stream=Sensor.GetDataStream(Sensor), use16BitFormat](auto &InRHICmdList) mutable
|
||||
{
|
||||
TRACE_CPUPROFILER_EVENT_SCOPE_STR("FWritePixels_SendPixelsInRenderThread");
|
||||
|
||||
|
@ -112,7 +113,8 @@ void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor)
|
|||
*Sensor.CaptureRenderTarget,
|
||||
Buffer,
|
||||
carla::sensor::SensorRegistry::get<TSensor *>::type::header_offset,
|
||||
InRHICmdList);
|
||||
InRHICmdList, use16BitFormat);
|
||||
|
||||
if(Buffer.data())
|
||||
{
|
||||
SCOPE_CYCLE_COUNTER(STAT_CarlaSensorStreamSend);
|
||||
|
@ -122,6 +124,7 @@ void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor)
|
|||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Blocks until the render thread has finished all it's tasks
|
||||
Sensor.WaitForRenderThreadToFinsih();
|
||||
}
|
||||
|
|
|
@ -467,7 +467,8 @@ void ASceneCaptureSensor::BeginPlay()
|
|||
// Determine the gamma of the player.
|
||||
const bool bInForceLinearGamma = !bEnablePostProcessingEffects;
|
||||
|
||||
CaptureRenderTarget->InitCustomFormat(ImageWidth, ImageHeight, PF_B8G8R8A8, bInForceLinearGamma);
|
||||
CaptureRenderTarget->InitCustomFormat(ImageWidth, ImageHeight, bEnable16BitFormat ? PF_A16B16G16R16 : PF_B8G8R8A8,
|
||||
bInForceLinearGamma);
|
||||
|
||||
if (bEnablePostProcessingEffects)
|
||||
{
|
||||
|
|
|
@ -63,6 +63,18 @@ public:
|
|||
return bEnablePostProcessingEffects;
|
||||
}
|
||||
|
||||
UFUNCTION(BlueprintCallable)
|
||||
void Enable16BitFormat(bool Enable = false)
|
||||
{
|
||||
bEnable16BitFormat = Enable;
|
||||
}
|
||||
|
||||
UFUNCTION(BlueprintCallable)
|
||||
bool Is16BitFormatEnabled() const
|
||||
{
|
||||
return bEnable16BitFormat;
|
||||
}
|
||||
|
||||
UFUNCTION(BlueprintCallable)
|
||||
void SetFOVAngle(float FOVAngle);
|
||||
|
||||
|
@ -320,6 +332,10 @@ protected:
|
|||
UPROPERTY(EditAnywhere)
|
||||
bool bEnablePostProcessingEffects = true;
|
||||
|
||||
/// Whether to change render target format to PF_A16B16G16R16, offering 16bit / channel
|
||||
UPROPERTY(EditAnywhere)
|
||||
bool bEnable16BitFormat = false;
|
||||
|
||||
FRenderCommandFence RenderFence;
|
||||
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue