Changing classes of sensors a little

This commit is contained in:
bernatx 2022-07-18 09:27:48 +02:00 committed by bernat
parent 2ff36dbfda
commit a6ed1f9453
12 changed files with 132 additions and 226 deletions

View File

@ -19,6 +19,7 @@
#include "carla/sensor/s11n/EpisodeStateSerializer.h" #include "carla/sensor/s11n/EpisodeStateSerializer.h"
#include "carla/sensor/s11n/GnssSerializer.h" #include "carla/sensor/s11n/GnssSerializer.h"
#include "carla/sensor/s11n/ImageSerializer.h" #include "carla/sensor/s11n/ImageSerializer.h"
#include "carla/sensor/s11n/NormalsImageSerializer.h"
#include "carla/sensor/s11n/OpticalFlowImageSerializer.h" #include "carla/sensor/s11n/OpticalFlowImageSerializer.h"
#include "carla/sensor/s11n/IMUSerializer.h" #include "carla/sensor/s11n/IMUSerializer.h"
#include "carla/sensor/s11n/LidarSerializer.h" #include "carla/sensor/s11n/LidarSerializer.h"
@ -59,7 +60,7 @@ namespace sensor {
using SensorRegistry = CompositeSerializer< using SensorRegistry = CompositeSerializer<
std::pair<ACollisionSensor *, s11n::CollisionEventSerializer>, std::pair<ACollisionSensor *, s11n::CollisionEventSerializer>,
std::pair<ADepthCamera *, s11n::ImageSerializer>, std::pair<ADepthCamera *, s11n::ImageSerializer>,
std::pair<ANormalsCamera *, s11n::ImageSerializer>, std::pair<ANormalsCamera *, s11n::NormalsImageSerializer>,
std::pair<ADVSCamera *, s11n::DVSEventArraySerializer>, std::pair<ADVSCamera *, s11n::DVSEventArraySerializer>,
std::pair<AGnssSensor *, s11n::GnssSerializer>, std::pair<AGnssSensor *, s11n::GnssSerializer>,
std::pair<AInertialMeasurementUnit *, s11n::IMUSerializer>, std::pair<AInertialMeasurementUnit *, s11n::IMUSerializer>,

View File

@ -72,6 +72,7 @@ namespace data {
float x = 0; float x = 0;
float y = 0; float y = 0;
MSGPACK_DEFINE_ARRAY(x, y);
}; };
#pragma pack(pop) #pragma pack(pop)

View File

@ -13,12 +13,15 @@ namespace carla {
namespace sensor { namespace sensor {
namespace data { namespace data {
/// An image of 32-bit BGRA colors (8-bit channels) /// An image of 32-bit BGRA colors (8-bit channels, 4 bytes)
using Image = ImageTmpl<Color>; using Image = ImageTmpl<Color>;
/// An image of 64-bit BGRA colors (16-bit channels) /// An image of 64-bit BGRA colors (16-bit channels, 2 floats)
using OpticalFlowImage = ImageTmpl<OpticalFlowPixel>; using OpticalFlowImage = ImageTmpl<OpticalFlowPixel>;
/// An image of 32-bit BGRA colors (8-bit channels, 4 bytes)
using NormalsImage = ImageTmpl<Color>;
} // namespace data } // namespace data
} // namespace sensor } // namespace sensor
} // namespace carla } // namespace carla

View File

@ -10,6 +10,7 @@
#include "carla/sensor/data/Array.h" #include "carla/sensor/data/Array.h"
#include "carla/sensor/s11n/ImageSerializer.h" #include "carla/sensor/s11n/ImageSerializer.h"
#include "carla/sensor/s11n/OpticalFlowImageSerializer.h" #include "carla/sensor/s11n/OpticalFlowImageSerializer.h"
#include "carla/sensor/s11n/NormalsImageSerializer.h"
namespace carla { namespace carla {
namespace sensor { namespace sensor {
@ -23,9 +24,11 @@ namespace data {
using Serializer = s11n::ImageSerializer; using Serializer = s11n::ImageSerializer;
using SerializerOpticalFlow = s11n::OpticalFlowImageSerializer; using SerializerOpticalFlow = s11n::OpticalFlowImageSerializer;
using SerializerNormals = s11n::NormalsImageSerializer;
friend Serializer; friend Serializer;
friend SerializerOpticalFlow; friend SerializerOpticalFlow;
friend SerializerNormals;
explicit ImageTmpl(RawData &&data) explicit ImageTmpl(RawData &&data)
: Super(Serializer::header_offset, std::move(data)) { : Super(Serializer::header_offset, std::move(data)) {

View File

@ -31,5 +31,5 @@ ADepthCamera::ADepthCamera(const FObjectInitializer &ObjectInitializer)
void ADepthCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds) void ADepthCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds)
{ {
TRACE_CPUPROFILER_EVENT_SCOPE(ADepthCamera::PostPhysTick); TRACE_CPUPROFILER_EVENT_SCOPE(ADepthCamera::PostPhysTick);
FPixelReader::SendPixelsInRenderThread(*this); FPixelReader::SendPixelsInRenderThread<ADepthCamera, FColor>(*this);
} }

View File

@ -62,6 +62,6 @@ void AInstanceSegmentationCamera::PostPhysTick(UWorld *World, ELevelTick TickTyp
SceneCapture->ShowOnlyComponents.Emplace(Component); SceneCapture->ShowOnlyComponents.Emplace(Component);
} }
FPixelReader::SendPixelsInRenderThread(*this); FPixelReader::SendPixelsInRenderThread<AInstanceSegmentationCamera, FColor>(*this);
} }

View File

@ -17,7 +17,7 @@ FActorDefinition ANormalsCamera::GetSensorDefinition()
ANormalsCamera::ANormalsCamera(const FObjectInitializer &ObjectInitializer) ANormalsCamera::ANormalsCamera(const FObjectInitializer &ObjectInitializer)
: Super(ObjectInitializer) : Super(ObjectInitializer)
{ {
Enable16BitFormat(true); Enable16BitFormat(false);
AddPostProcessingMaterial( AddPostProcessingMaterial(
TEXT("Material'/Carla/PostProcessingMaterials/PhysicLensDistortion.PhysicLensDistortion'")); TEXT("Material'/Carla/PostProcessingMaterials/PhysicLensDistortion.PhysicLensDistortion'"));
AddPostProcessingMaterial( AddPostProcessingMaterial(
@ -27,5 +27,5 @@ ANormalsCamera::ANormalsCamera(const FObjectInitializer &ObjectInitializer)
void ANormalsCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds) void ANormalsCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds)
{ {
TRACE_CPUPROFILER_EVENT_SCOPE(ANormalsCamera::PostPhysTick); TRACE_CPUPROFILER_EVENT_SCOPE(ANormalsCamera::PostPhysTick);
FPixelReader::SendPixelsInRenderThread(*this); FPixelReader::SendPixelsInRenderThread<ANormalsCamera, FColor>(*this);
} }

View File

@ -24,6 +24,25 @@ void AOpticalFlowCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float
auto CVarForceOutputsVelocity = IConsoleManager::Get().FindConsoleVariable(TEXT("r.BasePassForceOutputsVelocity")); auto CVarForceOutputsVelocity = IConsoleManager::Get().FindConsoleVariable(TEXT("r.BasePassForceOutputsVelocity"));
int32 OldValue = CVarForceOutputsVelocity->GetInt(); int32 OldValue = CVarForceOutputsVelocity->GetInt();
CVarForceOutputsVelocity->Set(1); CVarForceOutputsVelocity->Set(1);
FPixelReader::SendPixelsInRenderThread(*this, true);
std::function<TArray<float>(void *, uint32)> Conversor = [](void *Data, uint32 Size)
{
TArray<float> IntermediateBuffer;
int32 Count = Size / sizeof(FFloat16Color);
DEBUG_ASSERT(Count * sizeof(FFloat16Color) == Size);
FFloat16Color *Buf = reinterpret_cast<FFloat16Color *>(Data);
IntermediateBuffer.Reserve(Count * 2);
for (int i=0; i<Count; ++i)
{
float x = (Buf->R.GetFloat() - 0.5f) * 4.f;
float y = (Buf->G.GetFloat() - 0.5f) * 4.f;
IntermediateBuffer.Add(x);
IntermediateBuffer.Add(y);
++Buf;
}
return std::move(IntermediateBuffer);
};
FPixelReader::SendPixelsInRenderThread<AOpticalFlowCamera, float>(*this, true, Conversor);
CVarForceOutputsVelocity->Set(OldValue); CVarForceOutputsVelocity->Set(OldValue);
} }

View File

@ -33,146 +33,68 @@ struct LockTexture
const uint8 *Source; const uint8 *Source;
}; };
// =============================================================================
// -- Static local functions ---------------------------------------------------
// =============================================================================
static void WritePixelsToBuffer_Vulkan(
const UTextureRenderTarget2D &RenderTarget,
uint32 Offset,
FRHICommandListImmediate &RHICmdList,
FPixelReader::Payload FuncForSending)
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("WritePixelsToBuffer_Vulkan");
check(IsInRenderingThread());
auto RenderResource =
static_cast<const FTextureRenderTarget2DResource *>(RenderTarget.Resource);
FTexture2DRHIRef Texture = RenderResource->GetRenderTargetTexture();
if (!Texture)
{
return;
}
auto BackBufferReadback = std::make_unique<FRHIGPUTextureReadback>(TEXT("CameraBufferReadback"));
FIntPoint BackBufferSize = Texture->GetSizeXY();
EPixelFormat BackBufferPixelFormat = Texture->GetFormat();
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("EnqueueCopy");
BackBufferReadback->EnqueueCopy(RHICmdList, Texture, FResolveRect(0, 0, BackBufferSize.X, BackBufferSize.Y));
}
// workaround to force RHI with Vulkan to refresh the fences state in the middle of frame
{
FRenderQueryRHIRef Query = RHICreateRenderQuery(RQT_AbsoluteTime);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("create query");
RHICmdList.EndRenderQuery(Query);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Flush");
RHICmdList.ImmediateFlush(EImmediateFlushType::FlushRHIThread);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("query result");
uint64 OldAbsTime = 0;
RHICmdList.GetRenderQueryResult(Query, OldAbsTime, true);
}
AsyncTask(ENamedThreads::AnyNormalThreadNormalTask, [=, Readback=std::move(BackBufferReadback)]() mutable {
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Wait GPU transfer");
while (!Readback->IsReady())
{
std::this_thread::yield();
}
}
FPixelFormatInfo PixelFormat = GPixelFormats[BackBufferPixelFormat];
int32 Count = (BackBufferSize.Y * (PixelFormat.BlockBytes * BackBufferSize.X));
void* LockedData = Readback->Lock(Count);
if (LockedData)
{
FuncForSending(LockedData, Count, Offset);
}
Readback->Unlock();
Readback.reset();
});
}
// Temporal; this avoid allocating the array each time
TArray<FFloat16Color> gFloatPixels;
static void WriteFloatPixelsToBuffer_Vulkan(
const UTextureRenderTarget2D &RenderTarget,
uint32 Offset,
FRHICommandListImmediate &RHICmdList,
FPixelReader::Payload FuncForSending)
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("WritePixelsToBuffer_Vulkan");
check(IsInRenderingThread());
auto RenderResource =
static_cast<const FTextureRenderTarget2DResource *>(RenderTarget.Resource);
FTexture2DRHIRef Texture = RenderResource->GetRenderTargetTexture();
if (!Texture)
{
return;
}
auto BackBufferReadback = std::make_unique<FRHIGPUTextureReadback>(TEXT("CameraBufferReadback"));
FIntPoint BackBufferSize = Texture->GetSizeXY();
EPixelFormat BackBufferPixelFormat = Texture->GetFormat();
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("EnqueueCopy");
BackBufferReadback->EnqueueCopy(RHICmdList, Texture, FResolveRect(0, 0, BackBufferSize.X, BackBufferSize.Y));
}
// workaround to force RHI with Vulkan to refresh the fences state in the middle of frame
{
FRenderQueryRHIRef Query = RHICreateRenderQuery(RQT_AbsoluteTime);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("create query");
RHICmdList.EndRenderQuery(Query);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Flush");
RHICmdList.ImmediateFlush(EImmediateFlushType::FlushRHIThread);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("query result");
uint64 OldAbsTime = 0;
RHICmdList.GetRenderQueryResult(Query, OldAbsTime, true);
}
AsyncTask(ENamedThreads::AnyNormalThreadNormalTask, [=, Readback=std::move(BackBufferReadback)]() mutable {
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Wait GPU transfer");
while (!Readback->IsReady())
{
std::this_thread::yield();
}
}
FPixelFormatInfo PixelFormat = GPixelFormats[BackBufferPixelFormat];
int32 Count = (BackBufferSize.Y * (PixelFormat.BlockBytes * BackBufferSize.X));
int32 TotalPixels = (BackBufferSize.Y * BackBufferSize.X);
void* LockedData = Readback->Lock(Count);
if (LockedData)
{
TArray<float> IntermediateBuffer;
FFloat16Color *Data = reinterpret_cast<FFloat16Color *>(LockedData);
IntermediateBuffer.Reserve(TotalPixels * 2);
for (int i=0; i<TotalPixels; ++i)
{
float x = (Data->R.GetFloat() - 0.5f) * 4.f;
float y = (Data->G.GetFloat() - 0.5f) * 4.f;
IntermediateBuffer.Add(x);
IntermediateBuffer.Add(y);
++Data;
}
FuncForSending(reinterpret_cast<void *>(IntermediateBuffer.GetData()), TotalPixels * sizeof(float) * 2 , Offset);
}
Readback->Unlock();
Readback.reset();
});
}
// ============================================================================= // =============================================================================
// -- FPixelReader ------------------------------------------------------------- // -- FPixelReader -------------------------------------------------------------
// ============================================================================= // =============================================================================
void FPixelReader::WritePixelsToBuffer(
const UTextureRenderTarget2D &RenderTarget,
uint32 Offset,
FRHICommandListImmediate &RHICmdList,
FPixelReader::Payload FuncForSending)
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("WritePixelsToBuffer");
check(IsInRenderingThread());
auto RenderResource =
static_cast<const FTextureRenderTarget2DResource *>(RenderTarget.Resource);
FTexture2DRHIRef Texture = RenderResource->GetRenderTargetTexture();
if (!Texture)
{
return;
}
auto BackBufferReadback = std::make_unique<FRHIGPUTextureReadback>(TEXT("CameraBufferReadback"));
FIntPoint BackBufferSize = Texture->GetSizeXY();
EPixelFormat BackBufferPixelFormat = Texture->GetFormat();
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("EnqueueCopy");
BackBufferReadback->EnqueueCopy(RHICmdList, Texture, FResolveRect(0, 0, BackBufferSize.X, BackBufferSize.Y));
}
// workaround to force RHI with Vulkan to refresh the fences state in the middle of frame
{
FRenderQueryRHIRef Query = RHICreateRenderQuery(RQT_AbsoluteTime);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("create query");
RHICmdList.EndRenderQuery(Query);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Flush");
RHICmdList.ImmediateFlush(EImmediateFlushType::FlushRHIThread);
TRACE_CPUPROFILER_EVENT_SCOPE_STR("query result");
uint64 OldAbsTime = 0;
RHICmdList.GetRenderQueryResult(Query, OldAbsTime, true);
}
AsyncTask(ENamedThreads::GameThread, [=, Readback=std::move(BackBufferReadback)]() mutable {
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Wait GPU transfer");
while (!Readback->IsReady())
{
std::this_thread::yield();
}
}
FPixelFormatInfo PixelFormat = GPixelFormats[BackBufferPixelFormat];
int32 Size = (BackBufferSize.Y * (PixelFormat.BlockBytes * BackBufferSize.X));
void* LockedData = Readback->Lock(Size);
if (LockedData)
{
FuncForSending(LockedData, Size, Offset);
}
Readback->Unlock();
Readback.reset();
});
}
bool FPixelReader::WritePixelsToArray( bool FPixelReader::WritePixelsToArray(
UTextureRenderTarget2D &RenderTarget, UTextureRenderTarget2D &RenderTarget,
TArray<FColor> &BitMap) TArray<FColor> &BitMap)
@ -226,70 +148,19 @@ TFuture<bool> FPixelReader::SavePixelsToDisk(
return HighResScreenshotConfig.ImageWriteQueue->Enqueue(MoveTemp(ImageTask)); return HighResScreenshotConfig.ImageWriteQueue->Enqueue(MoveTemp(ImageTask));
} }
void FPixelReader::WritePixelsToBuffer( // void FPixelReader::WritePixelsToBuffer(
UTextureRenderTarget2D &RenderTarget, // UTextureRenderTarget2D &RenderTarget,
uint32 Offset, // uint32 Offset,
FRHICommandListImmediate &InRHICmdList, // FRHICommandListImmediate &InRHICmdList,
FPixelReader::Payload FuncForSending, // FPixelReader::Payload FuncForSending,
bool use16BitFormat) // bool use16BitFormat)
{ // {
TRACE_CPUPROFILER_EVENT_SCOPE_STR("WritePixelsToBuffer"); // TRACE_CPUPROFILER_EVENT_SCOPE_STR("WritePixelsToBuffer");
check(IsInRenderingThread()); // check(IsInRenderingThread());
if (IsVulkanPlatform(GMaxRHIShaderPlatform) || IsD3DPlatform(GMaxRHIShaderPlatform, false)) // if (IsVulkanPlatform(GMaxRHIShaderPlatform) || IsD3DPlatform(GMaxRHIShaderPlatform, false))
{ // {
if (use16BitFormat) // WritePixelsToBuffer(RenderTarget, Offset, InRHICmdList, std::move(FuncForSending));
{ // return;
WriteFloatPixelsToBuffer_Vulkan(RenderTarget, Offset, InRHICmdList, std::move(FuncForSending)); // }
} // }
else
{
WritePixelsToBuffer_Vulkan(RenderTarget, Offset, InRHICmdList, std::move(FuncForSending));
}
return;
}
/*
FTextureRenderTargetResource* RenderTargetResource = RenderTarget.GetRenderTargetResource();
if(!RenderTargetResource)
{
return;
}
FRHITexture2D *Texture = RenderTargetResource->GetRenderTargetTexture();
checkf(Texture != nullptr, TEXT("FPixelReader: UTextureRenderTarget2D missing render target texture"));
const uint32 BytesPerPixel = use16BitFormat ? 8u : 4u; // PF_R8G8B8A8 or PF_FloatRGBA
const uint32 Width = Texture->GetSizeX();
const uint32 Height = Texture->GetSizeY();
const uint32 ExpectedStride = Width * BytesPerPixel;
uint32 SrcStride;
LockTexture Lock(Texture, SrcStride);
#ifdef PLATFORM_WINDOWS
// JB: Direct 3D uses additional rows in the buffer, so we need check the
// result stride from the lock:
if (IsD3DPlatform(GMaxRHIShaderPlatform, false) && (ExpectedStride != SrcStride))
{
Buffer.reset(Offset + ExpectedStride * Height);
auto DstRow = Buffer.begin() + Offset;
const uint8 *SrcRow = Lock.Source;
for (uint32 Row = 0u; Row < Height; ++Row)
{
FMemory::Memcpy(DstRow, SrcRow, ExpectedStride);
DstRow += ExpectedStride;
SrcRow += SrcStride;
}
}
else
#endif // PLATFORM_WINDOWS
{
check(ExpectedStride == SrcStride);
const uint8 *Source = Lock.Source;
if(Source)
{
Buffer.copy_from(Offset, Source, ExpectedStride * Height);
}
}
*/
}

View File

@ -64,8 +64,8 @@ public:
/// allocated in front of the buffer. /// allocated in front of the buffer.
/// ///
/// @pre To be called from game-thread. /// @pre To be called from game-thread.
template <typename TSensor> template <typename TSensor, typename TPixel>
static void SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat = false); static void SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat = false, std::function<TArray<TPixel>(void *, uint32)> Conversor = {});
private: private:
@ -73,11 +73,10 @@ private:
/// ///
/// @pre To be called from render-thread. /// @pre To be called from render-thread.
static void WritePixelsToBuffer( static void WritePixelsToBuffer(
UTextureRenderTarget2D &RenderTarget, const UTextureRenderTarget2D &RenderTarget,
uint32 Offset, uint32 Offset,
FRHICommandListImmediate &InRHICmdList, FRHICommandListImmediate &InRHICmdList,
FPixelReader::Payload FuncForSending, FPixelReader::Payload FuncForSending);
bool use16BitFormat = false);
}; };
@ -85,8 +84,8 @@ private:
// -- FPixelReader::SendPixelsInRenderThread ----------------------------------- // -- FPixelReader::SendPixelsInRenderThread -----------------------------------
// ============================================================================= // =============================================================================
template <typename TSensor> template <typename TSensor, typename TPixel>
void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat) void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat, std::function<TArray<TPixel>(void *, uint32)> Conversor)
{ {
TRACE_CPUPROFILER_EVENT_SCOPE(FPixelReader::SendPixelsInRenderThread); TRACE_CPUPROFILER_EVENT_SCOPE(FPixelReader::SendPixelsInRenderThread);
check(Sensor.CaptureRenderTarget != nullptr); check(Sensor.CaptureRenderTarget != nullptr);
@ -104,24 +103,34 @@ void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat
// game-thread. // game-thread.
ENQUEUE_RENDER_COMMAND(FWritePixels_SendPixelsInRenderThread) ENQUEUE_RENDER_COMMAND(FWritePixels_SendPixelsInRenderThread)
( (
[&Sensor, use16BitFormat](auto &InRHICmdList) mutable [&Sensor, use16BitFormat, Conversor = std::move(Conversor)](auto &InRHICmdList) mutable
{ {
TRACE_CPUPROFILER_EVENT_SCOPE_STR("FWritePixels_SendPixelsInRenderThread"); TRACE_CPUPROFILER_EVENT_SCOPE_STR("FWritePixels_SendPixelsInRenderThread");
/// @todo Can we make sure the sensor is not going to be destroyed? /// @todo Can we make sure the sensor is not going to be destroyed?
if (!Sensor.IsPendingKill()) if (!Sensor.IsPendingKill())
{ {
FPixelReader::Payload FuncForSending = [&Sensor, Frame = FCarlaEngine::GetFrameCounter()](void *LockedData, uint32 Count, uint32 Offset) FPixelReader::Payload FuncForSending = [&Sensor, Conversor = std::move(Conversor)](void *LockedData, uint32 Size, uint32 Offset)
{ {
if (Sensor.IsPendingKill()) return; if (Sensor.IsPendingKill()) return;
TArray<TPixel> Converted;
// optional conversion of data
if (Conversor)
{
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Data conversion");
Converted = Conversor(LockedData, Size);
LockedData = reinterpret_cast<void *>(Converted.GetData());
Size = Converted.Num() * Converted.GetTypeSize();
}
auto Stream = Sensor.GetDataStream(Sensor); auto Stream = Sensor.GetDataStream(Sensor);
// Stream.SetFrameNumber(Frame);
auto Buffer = Stream.PopBufferFromPool(); auto Buffer = Stream.PopBufferFromPool();
{ {
TRACE_CPUPROFILER_EVENT_SCOPE_STR("Buffer Copy"); TRACE_CPUPROFILER_EVENT_SCOPE_STR("Buffer Copy");
Buffer.copy_from(Offset, boost::asio::buffer(LockedData, Count)); Buffer.copy_from(Offset, boost::asio::buffer(LockedData, Size));
} }
{ {
// send // send
@ -139,8 +148,7 @@ void FPixelReader::SendPixelsInRenderThread(TSensor &Sensor, bool use16BitFormat
*Sensor.CaptureRenderTarget, *Sensor.CaptureRenderTarget,
carla::sensor::SensorRegistry::get<TSensor *>::type::header_offset, carla::sensor::SensorRegistry::get<TSensor *>::type::header_offset,
InRHICmdList, InRHICmdList,
std::move(FuncForSending), std::move(FuncForSending));
use16BitFormat);
} }
} }
); );

View File

@ -60,5 +60,5 @@ void ASceneCaptureCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float
TRACE_CPUPROFILER_EVENT_SCOPE_TEXT(*ProfilerText); TRACE_CPUPROFILER_EVENT_SCOPE_TEXT(*ProfilerText);
} }
); );
FPixelReader::SendPixelsInRenderThread(*this); FPixelReader::SendPixelsInRenderThread<ASceneCaptureCamera, FColor>(*this);
} }

View File

@ -27,5 +27,5 @@ ASemanticSegmentationCamera::ASemanticSegmentationCamera(
void ASemanticSegmentationCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds) void ASemanticSegmentationCamera::PostPhysTick(UWorld *World, ELevelTick TickType, float DeltaSeconds)
{ {
TRACE_CPUPROFILER_EVENT_SCOPE(ASemanticSegmentationCamera::PostPhysTick); TRACE_CPUPROFILER_EVENT_SCOPE(ASemanticSegmentationCamera::PostPhysTick);
FPixelReader::SendPixelsInRenderThread(*this); FPixelReader::SendPixelsInRenderThread<ASemanticSegmentationCamera, FColor>(*this);
} }