Bug 1476757 - Add methods to change the capacity of the ProfileBuffer.
MozReview-Commit-ID: 1sHsJz0oBvU
--- a/tools/profiler/core/ProfileBuffer.cpp
+++ b/tools/profiler/core/ProfileBuffer.cpp
@@ -11,38 +11,113 @@
#include "ProfilerMarker.h"
#include "jsfriendapi.h"
#include "nsScriptSecurityManager.h"
#include "nsJSPrincipals.h"
using namespace mozilla;
ProfileBuffer::ProfileBuffer(uint32_t aEntrySize)
- : mEntryIndexMask(0)
+ : mEntries(nullptr)
+ , mEntryIndexMask(0)
, mRangeStart(0)
, mRangeEnd(0)
, mEntrySize(0)
{
- // Round aEntrySize up to the nearest power of two, so that we can index
- // mEntries with a simple mask and don't need to do a slow modulo operation.
- const uint32_t UINT32_MAX_POWER_OF_TWO = 1 << 31;
- MOZ_RELEASE_ASSERT(aEntrySize <= UINT32_MAX_POWER_OF_TWO,
- "aEntrySize is larger than what we support");
- mEntrySize = RoundUpPow2(aEntrySize);
- mEntryIndexMask = mEntrySize - 1;
- mEntries = MakeUnique<ProfileBufferEntry[]>(mEntrySize);
+ bool succeeded = SetMinCapacity(aEntrySize);
+ MOZ_RELEASE_ASSERT(succeeded, "Couldn't allocate initial ProfileBuffer storage");
}
ProfileBuffer::~ProfileBuffer()
{
while (mStoredMarkers.peek()) {
delete mStoredMarkers.popHead();
}
}
+bool
+ProfileBuffer::SetMinCapacity(uint32_t aMinCapacity)
+{
+ // Round aMinCapacity up to the nearest power of two, so that we can index
+ // mEntries with a simple mask and don't need to do a slow modulo operation.
+ const uint32_t UINT32_MAX_POWER_OF_TWO = 1 << 31;
+ MOZ_RELEASE_ASSERT(aMinCapacity <= UINT32_MAX_POWER_OF_TWO,
+ "aMinCapacity is larger than what we support");
+ return SetCapacityPow2(RoundUpPow2(aMinCapacity));
+}
+
+static uint64_t
+RoundDownToMultipleOfPow2(uint64_t aNumber, uint64_t aMultiplier)
+{
+ return aNumber & ~(aMultiplier - 1);
+}
+
+bool
+ProfileBuffer::SetCapacityPow2(uint32_t aCapacity)
+{
+ MOZ_RELEASE_ASSERT(aCapacity != 0, "can't set ProfileBuffer capacity to zero");
+ MOZ_RELEASE_ASSERT((aCapacity & (aCapacity - 1)) == 0, "aCapacity needs to be a power of two");
+
+ if (aCapacity == mEntrySize) {
+ return true;
+ }
+
+ MOZ_RELEASE_ASSERT(UsedSize() <= aCapacity, "can't make the capacity smaller than the used size");
+
+ auto newStorage = MakeUniqueFallible<ProfileBufferEntry[]>(aCapacity);
+ if (!newStorage) {
+ return false;
+ }
+
+ uint32_t newIndexMask = aCapacity - 1;
+
+ if (mEntrySize != 0 && mRangeStart != mRangeEnd) {
+ // Copy existing data from mEntries into newStorage. Make sure that every
+ // entry preserves its position in buffer space.
+ // If the range wraps around in the old or in the new buffer, we need to
+ // copy the data in two chunks: [start, wrapIndex), [wrapIndex, end)
+ // The range between mRangeStart and mRangeEnd will only wrap at most once,
+ // because both the old and the new capacity are large enough to contain it.
+ // If the range wraps in both the old and the new buffer, the wrap index
+ // will be the same in both buffers.
+ // +- mRangeStart
+ // | +- wrapIndex
+ // | | +- mRangeEnd
+ // v v v
+ // ...-+-----+-----+-----+-----+-----+-----+-----+-----+-...
+ // | | | | [---+-] | | | |
+ // ...-+-----+-----+-----+-----+-----+-----+-----+-----+-...
+ // | [---+-] |
+ // ...-+-----+-----+-----+-----+-----+-----+-----+-----+-...
+ // ^^^^^^^ smaller capacity
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^ larger capacity
+ uint64_t wrapIndex =
+ RoundDownToMultipleOfPow2(mRangeEnd, std::min(aCapacity, mEntrySize));
+ if (wrapIndex <= mRangeStart) {
+ // There is no wrapping. Copy the entire range as one chunk.
+ PodCopy(&newStorage[mRangeStart & newIndexMask],
+ &mEntries[mRangeStart & mEntryIndexMask],
+ mRangeEnd - mRangeStart);
+ } else {
+ // Copy the range in two separate chunks.
+ PodCopy(&newStorage[mRangeStart & newIndexMask],
+ &mEntries[mRangeStart & mEntryIndexMask],
+ wrapIndex - mRangeStart);
+ PodCopy(&newStorage[wrapIndex & newIndexMask],
+ &mEntries[wrapIndex & mEntryIndexMask],
+ mRangeEnd - wrapIndex);
+ }
+ }
+
+ mEntrySize = aCapacity;
+ mEntryIndexMask = newIndexMask;
+ mEntries = std::move(newStorage);
+ return true;
+}
+
// Called from signal, call only reentrant functions
void
ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry)
{
GetEntry(mRangeEnd++) = aEntry;
// The distance between mRangeStart and mRangeEnd must never exceed
// mEntrySize, so advance mRangeStart if necessary.
--- a/tools/profiler/core/ProfileBuffer.h
+++ b/tools/profiler/core/ProfileBuffer.h
@@ -9,18 +9,19 @@
#include "platform.h"
#include "ProfileBufferEntry.h"
#include "ProfilerMarker.h"
#include "ProfileJSONWriter.h"
#include "mozilla/RefPtr.h"
#include "mozilla/RefCounted.h"
// A fixed-capacity circular buffer.
-// This class is used as a queue of entries which, after construction, never
-// allocates. This makes it safe to use in the profiler's "critical section".
+// This class is used as a queue of entries which, outside of construction and
+// calls to SetCapacity, never allocates. This makes it safe to use in the
+// profiler's "critical section".
// Entries are appended at the end. Once the queue capacity has been reached,
// adding a new entry will evict an old entry from the start of the queue.
// Positions in the queue are represented as 64-bit unsigned integers which
// only increase and never wrap around.
// mRangeStart and mRangeEnd describe the range in that uint64_t space which is
// covered by the queue contents.
// Internally, the buffer uses a fixed-size storage and applies a modulo
// operation when accessing entries in that storage buffer. "Evicting" an entry
@@ -31,16 +32,28 @@ class ProfileBuffer final
public:
// ProfileBuffer constructor
// @param aEntrySize The minimum capacity of the buffer. The actual buffer
// capacity will be rounded up to the next power of two.
explicit ProfileBuffer(uint32_t aEntrySize);
~ProfileBuffer();
+ uint32_t UsedSize() { return mRangeEnd - mRangeStart; }
+
+ // Set the buffer capacity to at least aMinCapacity. aMinCapacity must not be
+ // zero and at least UsedSize(). This method allocates.
+ // The allocation is fallible and the return value indicates success.
+ bool SetMinCapacity(uint32_t aMinCapacity);
+
+ // Set the buffer capacity to exactly aCapacity. aCapacity must be a power of
+ // two, non-zero, and at least UsedSize(). This method allocates.
+ // The allocation is fallible and the return value indicates success.
+ bool SetCapacityPow2(uint32_t aCapacity);
+
// Add |aEntry| to the buffer, ignoring what kind of entry it is.
void AddEntry(const ProfileBufferEntry& aEntry);
// Add to the buffer a sample start (ThreadId) entry for aThreadId.
// Returns the position of the entry.
uint64_t AddThreadIdEntry(int aThreadId);
void CollectCodeLocation(