Fixed HashSet compact rehash under heavy collisions

- Compact now iterates over the old bucket array using the saved oldSize, and frees with that size, avoiding out-of-bounds when _size changes.
- If reinsertion finds no free slot during compaction (pathological collisions), the table grows once and retries, preventing AVs.
- This fix addresses problems with weak hash keys (like #3824).
This commit is contained in:
Michael Herzog
2025-11-25 21:20:20 +01:00
parent 2a55cda583
commit d9a18b1d31

View File

@@ -409,26 +409,33 @@ protected:
else
{
// Rebuild entire table completely
const int32 oldSize = _size;
AllocationData oldAllocation;
AllocationUtils::MoveToEmpty<BucketType, AllocationType>(oldAllocation, _allocation, _size, _size);
AllocationUtils::MoveToEmpty<BucketType, AllocationType>(oldAllocation, _allocation, oldSize, oldSize);
_allocation.Allocate(_size);
BucketType* data = _allocation.Get();
for (int32 i = 0; i < _size; ++i)
data[i]._state = HashSetBucketState::Empty;
BucketType* oldData = oldAllocation.Get();
FindPositionResult pos;
for (int32 i = 0; i < _size; ++i)
for (int32 i = 0; i < oldSize; ++i)
{
BucketType& oldBucket = oldData[i];
if (oldBucket.IsOccupied())
{
FindPosition(oldBucket.GetKey(), pos);
if (pos.FreeSlotIndex == -1)
{
// Grow and retry to handle pathological cases (eg. heavy collisions)
EnsureCapacity(_size + 1, true);
FindPosition(oldBucket.GetKey(), pos);
}
ASSERT(pos.FreeSlotIndex != -1);
BucketType& bucket = _allocation.Get()[pos.FreeSlotIndex];
bucket = MoveTemp(oldBucket);
}
}
for (int32 i = 0; i < _size; ++i)
for (int32 i = 0; i < oldSize; ++i)
oldData[i].Free();
}
_deletedCount = 0;