Add frame number to sensor data

This commit is contained in:
nsubiron 2018-04-13 16:33:13 +02:00
parent 51f4d1271d
commit 2417d82dfd
9 changed files with 246 additions and 172 deletions

View File

@ -35,6 +35,11 @@ to a more human readable palette of colors. It can be found at
["Util/ImageConverter"][imgconvlink]. Alternatively, they can also be converted
using the functions at `carla.image_converter` Python module.
Note that all the sensor data comes with a _frame number_ stamp, this _frame
number_ matches the one received in the measurements. This is especially useful
for running the simulator in asynchronous mode and synchronize sensor data on
the client side.
[clientexamplelink]: https://github.com/carla-simulator/carla/blob/master/PythonClient/client_example.py
[settingslink]: https://github.com/carla-simulator/carla/blob/master/Docs/Example.CarlaSettings.ini
[imgconvlink]: https://github.com/carla-simulator/carla/tree/master/Util/ImageConverter

View File

@ -183,31 +183,36 @@ class CarlaClient(object):
def _make_sensor_parsers(sensors):
image_types = ['None', 'SceneFinal', 'Depth', 'SemanticSegmentation']
getimgtype = lambda id: image_types[id] if len(image_types) > id else 'Unknown'
getint = lambda data, index: struct.unpack('<L', data[index*4:index*4+4])[0]
getint32 = lambda data, index: struct.unpack('<L', data[index*4:index*4+4])[0]
getint64 = lambda data, index: struct.unpack('<Q', data[index*4:index*4+8])[0]
getfloat = lambda data, index: struct.unpack('<f', data[index*4:index*4+4])[0]
def parse_image(data):
width = getint(data, 0)
height = getint(data, 1)
image_type = getimgtype(getint(data, 2))
fov = getfloat(data, 3)
return sensor.Image(width, height, image_type, fov, data[16:])
frame_number = getint64(data, 0)
width = getint32(data, 2)
height = getint32(data, 3)
image_type = getimgtype(getint32(data, 4))
fov = getfloat(data, 5)
return sensor.Image(frame_number, width, height, image_type, fov, data[24:])
def parse_lidar(data):
horizontal_angle = getfloat(data, 0)
channels = getint(data, 1)
frame_number = getint64(data, 0)
horizontal_angle = getfloat(data, 2)
channels = getint32(data, 3)
header_size = 16
point_count_by_channel = numpy.frombuffer(
data[8:8+channels*4],
data[header_size:header_size+channels*4],
dtype=numpy.dtype('uint32'))
points = numpy.frombuffer(
data[8+channels*4:],
data[header_size+channels*4:],
dtype=numpy.dtype('f4'))
points = numpy.reshape(points, (int(points.shape[0]/3), 3))
return sensor.LidarMeasurement(
frame_number,
horizontal_angle,
channels,
point_count_by_channel,
sensor.PointCloud(points))
sensor.PointCloud(frame_number, points))
class SensorDefinition(object):
def __init__(self, s):

View File

@ -154,6 +154,9 @@ def depth_to_local_point_cloud(image, color=None, max_depth=0.9):
# [[X1,Y1,Z1,R1,G1,B1],[X2,Y2,Z2,R2,G2,B2], ... [Xn,Yn,Zn,Rn,Gn,Bn]]
if color is not None:
# numpy.concatenate((numpy.transpose(p3d), color), axis=1)
return sensor.PointCloud(numpy.transpose(p3d), color_array=color)
return sensor.PointCloud(
image.frame_number,
numpy.transpose(p3d),
color_array=color)
# [[X1,Y1,Z1],[X2,Y2,Z2], ... [Xn,Yn,Zn]]
return sensor.PointCloud(numpy.transpose(p3d))
return sensor.PointCloud(image.frame_number, numpy.transpose(p3d))

View File

@ -134,15 +134,18 @@ class Lidar(Sensor):
# -- SensorData ----------------------------------------------------------------
# ==============================================================================
class SensorData(object):
"""Base class for sensor data returned from the server."""
pass
def __init__(self, frame_number):
self.frame_number = frame_number
class Image(SensorData):
"""Data generated by a Camera."""
def __init__(self, width, height, image_type, fov, raw_data):
def __init__(self, frame_number, width, height, image_type, fov, raw_data):
super(Image, self).__init__(frame_number=frame_number)
assert len(raw_data) == 4 * width * height
self.width = width
self.height = height
@ -195,7 +198,8 @@ class Image(SensorData):
class PointCloud(SensorData):
"""A list of points."""
def __init__(self, array, color_array=None):
def __init__(self, frame_number, array, color_array=None):
super(PointCloud, self).__init__(frame_number=frame_number)
self._array = array
self._color_array = color_array
self._has_colors = color_array is not None
@ -306,7 +310,8 @@ class PointCloud(SensorData):
class LidarMeasurement(SensorData):
"""Data generated by a Lidar."""
def __init__(self, horizontal_angle, channels, point_count_by_channel, point_cloud):
def __init__(self, frame_number, horizontal_angle, channels, point_count_by_channel, point_cloud):
super(LidarMeasurement, self).__init__(frame_number=frame_number)
assert numpy.sum(point_count_by_channel) == len(point_cloud.array)
self.horizontal_angle = horizontal_angle
self.channels = channels

View File

@ -15,6 +15,7 @@
/// The header consists of an array of uint32's in the following layout
///
/// {
/// Frame number (uint64)
/// Horizontal angle (float),
/// Channel count,
/// Point count of channel 0,
@ -37,8 +38,8 @@ public:
explicit FLidarMeasurement(uint32 SensorId = 0u, uint32 ChannelCount = 0u)
: SensorId(SensorId)
{
Header.AddDefaulted(2u + ChannelCount);
Header[1] = ChannelCount;
Header.AddDefaulted(4u + ChannelCount);
Header[3] = ChannelCount;
}
FLidarMeasurement &operator=(FLidarMeasurement &&Other)
@ -50,31 +51,36 @@ public:
return *this;
}
void SetFrameNumber(uint64 FrameNumber)
{
std::memcpy(Header.GetData(), reinterpret_cast<const void *>(&FrameNumber), 2u);
}
float GetHorizontalAngle() const
{
return reinterpret_cast<const float &>(Header[0]);
return reinterpret_cast<const float &>(Header[2]);
}
void SetHorizontalAngle(float HorizontalAngle)
{
Header[0] = reinterpret_cast<const uint32 &>(HorizontalAngle);
Header[2] = reinterpret_cast<const uint32 &>(HorizontalAngle);
}
uint32 GetChannelCount() const
{
return Header[1];
return Header[3];
}
void Reset(uint32 TotalPointCount)
{
std::memset(Header.GetData() + 2u, 0, sizeof(uint32) * GetChannelCount());
std::memset(Header.GetData() + 4u, 0, sizeof(uint32) * GetChannelCount());
Points.Reset(3u * TotalPointCount);
}
void WritePoint(uint32 Channel, const FVector &Point)
{
check(Header[1] > Channel);
Header[2u + Channel] += 1u;
check(Header[3] > Channel);
Header[4u + Channel] += 1u;
constexpr float TO_METERS = 1e-2f;
Points.Emplace(TO_METERS * Point.X);
Points.Emplace(TO_METERS * Point.Y);

View File

@ -8,40 +8,63 @@
#include "SceneCaptureCamera.h"
#include "Sensor/SensorDataView.h"
#include "Game/CarlaGameInstance.h"
#include "Settings/CarlaSettings.h"
#include "Components/DrawFrustumComponent.h"
#include "Components/SceneCaptureComponent2D.h"
#include "Components/StaticMeshComponent.h"
#include "ConstructorHelpers.h"
#include "CoreGlobals.h"
#include "Engine/CollisionProfile.h"
#include "Engine/TextureRenderTarget2D.h"
#include "Materials/Material.h"
#include "Game/CarlaGameInstance.h"
#include "Kismet/KismetSystemLibrary.h"
#include <memory>
#include "ConstructorHelpers.h"
#include "Materials/Material.h"
#include <memory>
// =============================================================================
// -- Local static variables ---------------------------------------------------
// =============================================================================
static constexpr auto DEPTH_MAT_PATH =
#if PLATFORM_LINUX
TEXT("Material'/Carla/PostProcessingMaterials/DepthEffectMaterial_GLSL.DepthEffectMaterial_GLSL'");
TEXT("Material'/Carla/PostProcessingMaterials/DepthEffectMaterial_GLSL.DepthEffectMaterial_GLSL'");
#elif PLATFORM_WINDOWS
TEXT("Material'/Carla/PostProcessingMaterials/DepthEffectMaterial.DepthEffectMaterial'");
TEXT("Material'/Carla/PostProcessingMaterials/DepthEffectMaterial.DepthEffectMaterial'");
#else
# error No depth material defined for this platform
#endif
static constexpr auto SEMANTIC_SEGMENTATION_MAT_PATH =
TEXT("Material'/Carla/PostProcessingMaterials/GTMaterial.GTMaterial'");
TEXT("Material'/Carla/PostProcessingMaterials/GTMaterial.GTMaterial'");
// =============================================================================
// -- Local static methods and types -------------------------------------------
// =============================================================================
struct FImageHeaderData
{
uint64 FrameNumber;
uint32 Width;
uint32 Height;
uint32 Type;
float FOV;
};
static void RemoveShowFlags(FEngineShowFlags &ShowFlags);
// =============================================================================
// -- ASceneCaptureCamera ------------------------------------------------------
// =============================================================================
uint32 ASceneCaptureCamera::NumSceneCapture = 0;
ASceneCaptureCamera::ASceneCaptureCamera(const FObjectInitializer& ObjectInitializer) :
Super(ObjectInitializer),
SizeX(720u),
SizeY(512u),
PostProcessEffect(EPostProcessEffect::SceneFinal)
ASceneCaptureCamera::ASceneCaptureCamera(const FObjectInitializer &ObjectInitializer)
: Super(ObjectInitializer),
SizeX(720u),
SizeY(512u),
PostProcessEffect(EPostProcessEffect::SceneFinal)
{
PrimaryActorTick.bCanEverTick = true;
PrimaryActorTick.TickGroup = TG_PrePhysics;
@ -59,24 +82,28 @@ ASceneCaptureCamera::ASceneCaptureCamera(const FObjectInitializer& ObjectInitial
DrawFrustum->bIsEditorOnly = true;
DrawFrustum->SetupAttachment(MeshComp);
CaptureRenderTarget = CreateDefaultSubobject<UTextureRenderTarget2D>(FName(*FString::Printf(TEXT("CaptureRenderTarget%d"),NumSceneCapture)));
#if WITH_EDITORONLY_DATA
CaptureRenderTarget->CompressionNoAlpha = true;
CaptureRenderTarget->MipGenSettings = TextureMipGenSettings::TMGS_NoMipmaps;
CaptureRenderTarget->bUseLegacyGamma = false;
#endif
CaptureRenderTarget = CreateDefaultSubobject<UTextureRenderTarget2D>(
FName(*FString::Printf(TEXT("CaptureRenderTarget%d"), NumSceneCapture)));
#if WITH_EDITORONLY_DATA
CaptureRenderTarget->CompressionNoAlpha = true;
CaptureRenderTarget->MipGenSettings = TextureMipGenSettings::TMGS_NoMipmaps;
CaptureRenderTarget->bUseLegacyGamma = false;
#endif
CaptureRenderTarget->CompressionSettings = TextureCompressionSettings::TC_Default;
CaptureRenderTarget->SRGB = false;
CaptureRenderTarget->bAutoGenerateMips = false;
CaptureRenderTarget->AddressX = TextureAddress::TA_Clamp;
CaptureRenderTarget->AddressY = TextureAddress::TA_Clamp;
CaptureComponent2D = CreateDefaultSubobject<USceneCaptureComponent2D>(TEXT("SceneCaptureComponent2D"));
CaptureComponent2D->SetupAttachment(MeshComp);
CaptureComponent2D = CreateDefaultSubobject<USceneCaptureComponent2D>(
TEXT("SceneCaptureComponent2D"));
CaptureComponent2D->SetupAttachment(MeshComp);
// Load post-processing materials.
static ConstructorHelpers::FObjectFinder<UMaterial> DEPTH(DEPTH_MAT_PATH);
static ConstructorHelpers::FObjectFinder<UMaterial> DEPTH(
DEPTH_MAT_PATH);
PostProcessDepth = DEPTH.Object;
static ConstructorHelpers::FObjectFinder<UMaterial> SEMANTIC_SEGMENTATION(SEMANTIC_SEGMENTATION_MAT_PATH);
static ConstructorHelpers::FObjectFinder<UMaterial> SEMANTIC_SEGMENTATION(
SEMANTIC_SEGMENTATION_MAT_PATH);
PostProcessSemanticSegmentation = SEMANTIC_SEGMENTATION.Object;
NumSceneCapture++;
}
@ -87,18 +114,23 @@ void ASceneCaptureCamera::PostActorCreated()
// no need load the editor mesh when there is no editor
#if WITH_EDITOR
if(MeshComp)
if (MeshComp)
{
if (!IsRunningCommandlet())
{
if( !MeshComp->GetStaticMesh())
if (!MeshComp->GetStaticMesh())
{
UStaticMesh* CamMesh = LoadObject<UStaticMesh>(NULL, TEXT("/Engine/EditorMeshes/MatineeCam_SM.MatineeCam_SM"), NULL, LOAD_None, NULL);
UStaticMesh *CamMesh = LoadObject<UStaticMesh>(
NULL,
TEXT("/Engine/EditorMeshes/MatineeCam_SM.MatineeCam_SM"),
NULL,
LOAD_None,
NULL);
MeshComp->SetStaticMesh(CamMesh);
}
}
}
#endif // WITH_EDITOR
#endif // WITH_EDITOR
// Sync component with CameraActor frustum settings.
UpdateDrawFrustum();
@ -111,51 +143,53 @@ void ASceneCaptureCamera::BeginPlay()
// Setup render target.
const bool bInForceLinearGamma = bRemovePostProcessing;
CaptureRenderTarget->InitCustomFormat(SizeX, SizeY, PF_B8G8R8A8, bInForceLinearGamma);
if(!IsValid(CaptureComponent2D)||CaptureComponent2D->IsPendingKill())
if (!IsValid(CaptureComponent2D) || CaptureComponent2D->IsPendingKill())
{
CaptureComponent2D = NewObject<USceneCaptureComponent2D>(this,TEXT("SceneCaptureComponent2D"));
CaptureComponent2D = NewObject<USceneCaptureComponent2D>(this, TEXT("SceneCaptureComponent2D"));
CaptureComponent2D->SetupAttachment(MeshComp);
}
CaptureComponent2D->Deactivate();
CaptureComponent2D->TextureTarget = CaptureRenderTarget;
// Setup camera post-processing depending on the quality level:
const UCarlaGameInstance* GameInstance = Cast<UCarlaGameInstance>(GetWorld()->GetGameInstance());
check(GameInstance!=nullptr);
const UCarlaSettings& CarlaSettings = GameInstance->GetCarlaSettings();
switch(PostProcessEffect)
// Setup camera post-processing depending on the quality level:
const UCarlaGameInstance *GameInstance = Cast<UCarlaGameInstance>(GetWorld()->GetGameInstance());
check(GameInstance != nullptr);
const UCarlaSettings &CarlaSettings = GameInstance->GetCarlaSettings();
switch (PostProcessEffect)
{
case EPostProcessEffect::None: break;
case EPostProcessEffect::SceneFinal:
{
//we set LDR for high quality because it will include post-fx
//and HDR for low quality to avoid high contrast
switch(CarlaSettings.GetQualitySettingsLevel())
{
case EQualitySettingsLevel::Low:
CaptureComponent2D->CaptureSource = ESceneCaptureSource::SCS_SceneColorHDRNoAlpha;
break;
default:
//LDR is faster than HDR (smaller bitmap array)
CaptureComponent2D->CaptureSource = ESceneCaptureSource::SCS_FinalColorLDR;
break;
}
case EPostProcessEffect::None:
break;
case EPostProcessEffect::SceneFinal:
{
// We set LDR for high quality because it will include post-fx and HDR for
// low quality to avoid high contrast.
switch (CarlaSettings.GetQualitySettingsLevel())
{
case EQualitySettingsLevel::Low:
CaptureComponent2D->CaptureSource = ESceneCaptureSource::SCS_SceneColorHDRNoAlpha;
break;
default:
// LDR is faster than HDR (smaller bitmap array).
CaptureComponent2D->CaptureSource = ESceneCaptureSource::SCS_FinalColorLDR;
break;
}
break;
}
default:
CaptureComponent2D->CaptureSource = SCS_FinalColorLDR;
break;
}
default:
CaptureComponent2D->CaptureSource = SCS_FinalColorLDR;
break;
}
if (bRemovePostProcessing)
if (bRemovePostProcessing)
{
RemoveShowFlags(CaptureComponent2D->ShowFlags);
}
if (PostProcessEffect == EPostProcessEffect::Depth)
if (PostProcessEffect == EPostProcessEffect::Depth)
{
CaptureComponent2D->PostProcessSettings.AddBlendable(PostProcessDepth, 1.0f);
} else if (PostProcessEffect == EPostProcessEffect::SemanticSegmentation)
}
else if (PostProcessEffect == EPostProcessEffect::SemanticSegmentation)
{
CaptureComponent2D->PostProcessSettings.AddBlendable(PostProcessSemanticSegmentation, 1.0f);
}
@ -163,37 +197,50 @@ void ASceneCaptureCamera::BeginPlay()
CaptureComponent2D->UpdateContent();
CaptureComponent2D->Activate();
//Make sure that there is enough time in the render queue
UKismetSystemLibrary::ExecuteConsoleCommand(GetWorld(), FString("g.TimeoutForBlockOnRenderFence 300000"));
// Make sure that there is enough time in the render queue.
UKismetSystemLibrary::ExecuteConsoleCommand(
GetWorld(),
FString("g.TimeoutForBlockOnRenderFence 300000"));
Super::BeginPlay();
}
void ASceneCaptureCamera::EndPlay(const EEndPlayReason::Type EndPlayReason)
{
if(NumSceneCapture!=0) NumSceneCapture = 0;
if (NumSceneCapture != 0)
{
NumSceneCapture = 0;
}
}
void ASceneCaptureCamera::Tick(const float DeltaSeconds)
{
Super::Tick(DeltaSeconds);
if(IsVulkanPlatform(GMaxRHIShaderPlatform))
const auto FrameNumber = GFrameCounter;
if (IsVulkanPlatform(GMaxRHIShaderPlatform))
{
auto fn = [=](FRHICommandListImmediate& RHICmdList){WritePixelsNonBlocking(DeltaSeconds,RHICmdList);};
auto fn = [=](FRHICommandListImmediate &RHICmdList) {
WritePixelsNonBlocking(FrameNumber, RHICmdList);
};
ENQUEUE_UNIQUE_RENDER_COMMAND_ONEPARAMETER(
FWritePixelsNonBlocking,
decltype(fn),write_function_vulkan,fn,
{
write_function_vulkan(RHICmdList);
});
} else
{
auto fn = [=](){WritePixels(DeltaSeconds);};
ENQUEUE_UNIQUE_RENDER_COMMAND_ONEPARAMETER(
FWritePixels,
decltype(fn),write_function,fn,
FWritePixelsNonBlocking,
decltype(fn), write_function_vulkan, fn,
{
write_function();
write_function_vulkan(RHICmdList);
});
}
else
{
auto fn = [=]() {
WritePixels(FrameNumber);
};
ENQUEUE_UNIQUE_RENDER_COMMAND_ONEPARAMETER(
FWritePixels,
decltype(fn), write_function, fn,
{
write_function();
});
}
}
@ -214,7 +261,8 @@ void ASceneCaptureCamera::SetPostProcessEffect(EPostProcessEffect otherPostProce
{
PostProcessEffect = otherPostProcessEffect;
auto &PostProcessSettings = CaptureComponent2D->PostProcessSettings;
if (PostProcessEffect != EPostProcessEffect::SceneFinal) {
if (PostProcessEffect != EPostProcessEffect::SceneFinal)
{
PostProcessSettings.bOverride_AutoExposureMethod = false;
PostProcessSettings.bOverride_AutoExposureMinBrightness = false;
PostProcessSettings.bOverride_AutoExposureMaxBrightness = false;
@ -238,7 +286,8 @@ void ASceneCaptureCamera::Set(const UCameraDescription &CameraDescription)
{
Super::Set(CameraDescription);
if (CameraDescription.bOverrideCameraPostProcessParameters) {
if (CameraDescription.bOverrideCameraPostProcessParameters)
{
auto &Override = CameraDescription.CameraPostProcessParameters;
auto &PostProcessSettings = CaptureComponent2D->PostProcessSettings;
PostProcessSettings.bOverride_AutoExposureMethod = true;
@ -257,13 +306,15 @@ void ASceneCaptureCamera::Set(const UCameraDescription &CameraDescription)
bool ASceneCaptureCamera::ReadPixels(TArray<FColor> &BitMap) const
{
if(!CaptureRenderTarget)
if (!CaptureRenderTarget)
{
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render target"));
return false;
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render target"));
return false;
}
FTextureRenderTargetResource* RTResource = CaptureRenderTarget->GameThread_GetRenderTargetResource();
if (RTResource == nullptr) {
FTextureRenderTargetResource *RTResource =
CaptureRenderTarget->GameThread_GetRenderTargetResource();
if (RTResource == nullptr)
{
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render target"));
return false;
}
@ -272,76 +323,62 @@ bool ASceneCaptureCamera::ReadPixels(TArray<FColor> &BitMap) const
return RTResource->ReadPixels(BitMap, ReadPixelFlags);
}
void ASceneCaptureCamera::WritePixelsNonBlocking(float DeltaTime, FRHICommandListImmediate& rhi_cmd_list) const
{
void ASceneCaptureCamera::WritePixelsNonBlocking(
const uint64 FrameNumber,
FRHICommandListImmediate &rhi_cmd_list) const
{
check(IsInRenderingThread());
if(!CaptureRenderTarget)
if (!CaptureRenderTarget)
{
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render target"));
return ;
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render target"));
return;
}
FTextureRenderTarget2DResource* RenderResource = (FTextureRenderTarget2DResource*)CaptureRenderTarget->Resource;
FTextureRenderTarget2DResource *RenderResource =
(FTextureRenderTarget2DResource *) CaptureRenderTarget->Resource;
FTextureRHIParamRef texture = RenderResource->GetRenderTargetTexture();
if(!texture)
if (!texture)
{
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render target texture"));
return;
}
struct {
uint32 Width;
uint32 Height;
uint32 Type;
float FOV;
} ImageHeader = {
FImageHeaderData ImageHeader = {
FrameNumber,
SizeX,
SizeY,
PostProcessEffect::ToUInt(PostProcessEffect),
CaptureComponent2D->FOVAngle
};
struct FReadSurfaceContext
{
FRenderTarget* SrcRenderTarget;
TArray<FColor>* OutData;
FIntRect Rect;
FReadSurfaceDataFlags Flags;
};
TArray<FColor> Pixels;
TArray<FColor> Pixels;
rhi_cmd_list.ReadSurfaceData(
texture,
FIntRect(0, 0, RenderResource->GetSizeXY().X, RenderResource->GetSizeXY().Y),
Pixels,
FReadSurfaceDataFlags(RCM_UNorm, CubeFace_MAX)
);
texture,
FIntRect(0, 0, RenderResource->GetSizeXY().X, RenderResource->GetSizeXY().Y),
Pixels,
FReadSurfaceDataFlags(RCM_UNorm, CubeFace_MAX));
FSensorDataView DataView(
GetId(),
FReadOnlyBufferView{reinterpret_cast<const void *>(&ImageHeader), sizeof(ImageHeader)},
FReadOnlyBufferView{Pixels}
);
WriteSensorData(DataView);
GetId(),
FReadOnlyBufferView{reinterpret_cast<const void *>(&ImageHeader), sizeof(ImageHeader)},
FReadOnlyBufferView{Pixels});
WriteSensorData(DataView);
}
void ASceneCaptureCamera::WritePixels(float DeltaTime) const
void ASceneCaptureCamera::WritePixels(const uint64 FrameNumber) const
{
FRHITexture2D *texture = CaptureRenderTarget->GetRenderTargetResource()->GetRenderTargetTexture();
if(!texture)
if (!texture)
{
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render texture"));
return ;
UE_LOG(LogCarla, Error, TEXT("SceneCaptureCamera: Missing render texture"));
return;
}
const uint32 num_bytes_per_pixel = 4; // PF_R8G8B8A8
const uint32 width = texture->GetSizeX();
const uint32 height = texture->GetSizeY();
const uint32 dest_stride = width * height * num_bytes_per_pixel;
uint32 src_stride;
uint8 *src = reinterpret_cast<uint8*>(RHILockTexture2D(texture, 0, RLM_ReadOnly, src_stride, false));
struct {
uint32 Width;
uint32 Height;
uint32 Type;
float FOV;
} ImageHeader = {
uint8 *src = reinterpret_cast<uint8 *>(
RHILockTexture2D(texture, 0, RLM_ReadOnly, src_stride, false));
FImageHeaderData ImageHeader = {
FrameNumber,
width,
height,
PostProcessEffect::ToUInt(PostProcessEffect),
@ -349,49 +386,53 @@ void ASceneCaptureCamera::WritePixels(float DeltaTime) const
};
std::unique_ptr<uint8[]> dest = nullptr;
//Direct 3D uses additional rows in the buffer,so we need check the result stride from the lock:
if(IsD3DPlatform(GMaxRHIShaderPlatform,false) && (dest_stride!=src_stride))
// Direct 3D uses additional rows in the buffer,so we need check the result
// stride from the lock:
if (IsD3DPlatform(GMaxRHIShaderPlatform, false) && (dest_stride != src_stride))
{
const uint32 copy_row_stride = width * num_bytes_per_pixel;
dest = std::make_unique<uint8[]>(dest_stride);
// Copy per row
uint8* dest_row = dest.get();
uint8* src_row = src;
uint8 *dest_row = dest.get();
uint8 *src_row = src;
for (uint32 Row = 0; Row < height; ++Row)
{
FMemory::Memcpy(dest_row, src_row, copy_row_stride);
dest_row += copy_row_stride;
src_row += src_stride;
FMemory::Memcpy(dest_row, src_row, copy_row_stride);
dest_row += copy_row_stride;
src_row += src_stride;
}
src = dest.get();
}
}
const FSensorDataView DataView(
GetId(),
FReadOnlyBufferView{reinterpret_cast<const void *>(&ImageHeader), sizeof(ImageHeader)},
FReadOnlyBufferView{src,dest_stride}
);
GetId(),
FReadOnlyBufferView{reinterpret_cast<const void *>(&ImageHeader), sizeof(ImageHeader)},
FReadOnlyBufferView{src, dest_stride});
WriteSensorData(DataView);
RHIUnlockTexture2D(texture, 0, false);
}
void ASceneCaptureCamera::UpdateDrawFrustum()
{
if(DrawFrustum && CaptureComponent2D)
if (DrawFrustum && CaptureComponent2D)
{
DrawFrustum->FrustumStartDist = GNearClippingPlane;
// 1000 is the default frustum distance, ideally this would be infinite but that might cause rendering issues
DrawFrustum->FrustumEndDist = (CaptureComponent2D->MaxViewDistanceOverride > DrawFrustum->FrustumStartDist)
// 1000 is the default frustum distance, ideally this would be infinite but
// that might cause rendering issues.
DrawFrustum->FrustumEndDist =
(CaptureComponent2D->MaxViewDistanceOverride > DrawFrustum->FrustumStartDist)
? CaptureComponent2D->MaxViewDistanceOverride : 1000.0f;
DrawFrustum->FrustumAngle = CaptureComponent2D->FOVAngle;
//DrawFrustum->FrustumAspectRatio = CaptureComponent2D->AspectRatio;
}
}
// =============================================================================
// -- Local static functions implementations -----------------------------------
// =============================================================================
// Remove the show flags that might interfere with post-processing effects like
// depth and semantic segmentation.
static void RemoveShowFlags(FEngineShowFlags &ShowFlags)

View File

@ -73,10 +73,16 @@ protected:
static uint32 NumSceneCapture;
private:
///Read the camera buffer and write it to the client with no lock of the resources (for Vulkan API)
void WritePixelsNonBlocking(float DeltaTime,FRHICommandListImmediate& rhi_cmd_list) const;
///Read the camera buffer and write it to the client with opengl or direct3d
void WritePixels(float DeltaTime) const;
/// Read the camera buffer and write it to the client with no lock of the
/// resources (for Vulkan API).
void WritePixelsNonBlocking(
uint64 FrameNumber,
FRHICommandListImmediate& rhi_cmd_list) const;
/// Read the camera buffer and write it to the client with opengl or direct3d.
void WritePixels(uint64 FrameNumber) const;
/// Used to synchronize the DrawFrustumComponent with the
/// SceneCaptureComponent2D settings.
void UpdateDrawFrustum();

View File

@ -17,11 +17,12 @@ namespace test {
std::lock_guard<std::mutex> lock(_mutex);
const struct {
uint64_t FrameNumber;
uint32_t Width;
uint32_t Height;
uint32_t Type;
float FOV;
} ImageHeader = {300u, 200u, 1u, 90.0f};
} ImageHeader = {++_frame_number, 300u, 200u, 1u, 90.0f};
_data.header_size = sizeof(ImageHeader);
auto header = std::make_unique<unsigned char[]>(_data.header_size);

View File

@ -34,6 +34,8 @@ namespace test {
Sensor(uint32_t id);
uint64_t _frame_number = 0u;
mutable std::mutex _mutex;
const std::string _name;