// Copyright (c) 2012-2021 Wojciech Figat. All rights reserved.
#pragma once
#include "Engine/Platform/Platform.h"
#include "Engine/Core/NonCopyable.h"
#include "Engine/Core/Types/String.h"
#include "Engine/Core/Collections/Array.h"
#include "Engine/Core/Math/Math.h"
#include "Engine/Scripting/ScriptingType.h"
#if COMPILE_WITH_PROFILER
// Profiler events buffers capacity (tweaked manually)
#define PROFILER_CPU_EVENTS_FRAMES 10
#define PROFILER_CPU_EVENTS_PER_FRAME 1000
///
/// Provides CPU performance measuring methods.
///
API_CLASS(Static) class FLAXENGINE_API ProfilerCPU
{
DECLARE_SCRIPTING_TYPE_NO_SPAWN(ProfilerCPU);
public:
///
/// Represents single CPU profiling event data.
///
API_STRUCT() struct Event
{
DECLARE_SCRIPTING_TYPE_MINIMAL(Event);
///
/// The start time (in milliseconds).
///
API_FIELD() double Start;
///
/// The end time (in milliseconds).
///
API_FIELD() double End;
///
/// The event depth. Value 0 is used for the root event.
///
API_FIELD() int32 Depth;
///
/// The native dynamic memory allocation size during this event (excluding the child events). Given value is in bytes.
///
API_FIELD() int32 NativeMemoryAllocation;
///
/// The managed memory allocation size during this event (excluding the child events). Given value is in bytes.
///
API_FIELD() int32 ManagedMemoryAllocation;
///
/// The name of the event.
///
API_FIELD() const Char* Name;
};
///
/// Implements simple profiling events ring-buffer.
///
class EventBuffer : public NonCopyable
{
private:
Event* _data;
int32 _capacity;
int32 _capacityMask;
int32 _head;
int32 _count;
public:
EventBuffer()
{
_capacity = Math::RoundUpToPowerOf2(PROFILER_CPU_EVENTS_FRAMES * PROFILER_CPU_EVENTS_PER_FRAME);
_capacityMask = _capacity - 1;
_data = NewArray(_capacity);
_head = 0;
_count = 0;
}
~EventBuffer()
{
DeleteArray(_data, _capacity);
}
public:
///
/// Gets the amount of the events in the buffer.
///
/// The events count.
FORCE_INLINE int32 GetCount() const
{
return _count;
}
///
/// Gets the event at the specified index.
///
/// The index.
/// The event
Event& Get(int32 index) const
{
ASSERT(index >= 0 && index < _capacity);
return _data[index];
}
///
/// Adds new event to the buffer.
///
/// The event index.
int32 Add()
{
const int32 index = _head;
_head = (_head + 1) & _capacityMask;
_count = Math::Min(_count + 1, _capacity);
return index;
}
///
/// Extracts the buffer data (only ended events starting from the root level with depth=0).
///
/// The output data.
/// True if also remove extracted events to prevent double-gather, false if don't modify the buffer data.
void Extract(Array& data, bool withRemove);
public:
///
/// Ring buffer iterator
///
struct Iterator
{
friend EventBuffer;
private:
EventBuffer* _buffer;
int32 _index;
Iterator(EventBuffer* buffer, const int32 index)
: _buffer(buffer)
, _index(index)
{
}
Iterator(const Iterator& i) = default;
public:
FORCE_INLINE int32 Index() const
{
return _index;
}
FORCE_INLINE Event& Event() const
{
ASSERT(_buffer && _index >= 0 && _index < _buffer->_capacity);
return _buffer->Get(_index);
}
public:
///
/// Checks if iterator is in the end of the collection
///
/// True if is in the end, otherwise false
bool IsEnd() const
{
ASSERT(_buffer);
return _index == _buffer->_head;
}
///
/// Checks if iterator is not in the end of the collection
///
/// True if is not in the end, otherwise false
bool IsNotEnd() const
{
ASSERT(_buffer);
return _index != _buffer->_head;
}
public:
FORCE_INLINE bool operator==(const Iterator& v) const
{
return _buffer == v._buffer && _index == v._index;
}
FORCE_INLINE bool operator!=(const Iterator& v) const
{
return _buffer != v._buffer || _index != v._index;
}
public:
Iterator& operator++()
{
ASSERT(_buffer);
_index = (_index + 1) & _buffer->_capacityMask;
return *this;
}
Iterator operator++(int)
{
ASSERT(_buffer);
Iterator temp = *this;
_index = (_index + 1) & _buffer->_capacityMask;
return temp;
}
Iterator& operator--()
{
ASSERT(_buffer);
_index = (_index - 1) & _buffer->_capacityMask;
return *this;
}
Iterator operator--(int)
{
ASSERT(_buffer);
Iterator temp = *this;
_index = (_index - 1) & _buffer->_capacityMask;
return temp;
}
};
public:
FORCE_INLINE Iterator Begin()
{
return Iterator(this, (_head - _count) & _capacityMask);
}
FORCE_INLINE Iterator Last()
{
ASSERT(_count > 0);
return Iterator(this, (_head - 1) & _capacityMask);
}
FORCE_INLINE Iterator End()
{
return Iterator(this, _head);
}
};
///
/// Thread registered for profiling. Holds events data with read/write support.
///
class Thread
{
private:
String _name;
int32 _depth = 0;
public:
Thread(const Char* name)
{
_name = name;
}
Thread(const String& name)
{
_name = name;
}
public:
///
/// The current thread.
///
static THREADLOCAL Thread* Current;
public:
///
/// Gets the name.
///
/// The name.
FORCE_INLINE const String& GetName() const
{
return _name;
}
///
/// The events buffer.
///
EventBuffer Buffer;
public:
///
/// Begins the event running on a this thread. Call EndEvent with index parameter equal to the returned value by BeginEvent function.
///
/// The event name.
/// The event token.
int32 BeginEvent(const Char* name);
///
/// Ends the event running on a this thread.
///
/// The event index returned by the BeginEvent method.
void EndEvent(int32 index);
};
public:
///
/// The registered threads.
///
static Array Threads;
///
/// The profiling tools usage flag. Can be used to disable profiler. Engine turns it down before the exit and before platform startup.
///
static bool Enabled;
public:
///
/// Determines whether the current (calling) thread is being profiled by the service (it may has no active profile block but is registered).
///
/// true if service is profiling the current thread; otherwise, false.
static bool IsProfilingCurrentThread();
///
/// Gets the current thread (profiler service shadow object).
///
/// The current thread object or null if not profiled yet.
static Thread* GetCurrentThread();
///
/// Begins the event. Call EndEvent with index parameter equal to the returned value by BeginEvent function.
///
/// The event name.
/// The event token.
static int32 BeginEvent(const Char* name);
///
/// Ends the event.
///
/// The event index returned by the BeginEvent method.
static void EndEvent(int32 index);
///
/// Releases resources. Calls to the profiling API after Dispose are not valid
///
static void Dispose();
};
///
/// Helper structure used to call BeginEvent/EndEvent within single code block.
///
struct ScopeProfileBlockCPU
{
///
/// The event token index.
///
int32 Index;
///
/// Initializes a new instance of the struct.
///
/// The event name.
ScopeProfileBlockCPU(const Char* name)
{
Index = ProfilerCPU::BeginEvent(name);
}
///
/// Finalizes an instance of the class.
///
~ScopeProfileBlockCPU()
{
ProfilerCPU::EndEvent(Index);
}
};
template<>
struct TIsPODType
{
enum { Value = true };
};
// Shortcut macros for profiling a single code block execution on CPU
#define PROFILE_CPU_NAMED(name) ScopeProfileBlockCPU ProfileBlockCPU(TEXT(name))
#if defined(_MSC_VER)
#define PROFILE_CPU() ScopeProfileBlockCPU ProfileBlockCPU(TEXT(__FUNCTION__))
#else
#define PROFILE_CPU() \
const char* _functionName = __FUNCTION__; \
const int32 _functionNameLength = ARRAY_COUNT(__FUNCTION__); \
Char _functionNameBuffer[_functionNameLength + 1]; \
StringUtils::ConvertANSI2UTF16(_functionName, _functionNameBuffer, _functionNameLength); \
_functionNameBuffer[_functionNameLength] = 0; \
ScopeProfileBlockCPU ProfileBlockCPU(_functionNameBuffer)
#endif
#else
// Empty macros for disabled profiler
#define PROFILE_CPU_NAMED(name)
#define PROFILE_CPU()
#endif