Skip to content

Commit

Permalink
Move SOO processing inside of InitializeSlots and move it once.
Browse files Browse the repository at this point in the history
That reduces binary size by moving more code to type erased InitializeSlots.

It also make SOO element to be moved once that can be useful for expensive to move types.

PiperOrigin-RevId: 721938625
Change-Id: I449753440fe91cb1bb5a1569e23d1986f5b0a642
  • Loading branch information
goldvitaly authored and copybara-github committed Feb 1, 2025
1 parent 0f102ad commit 7253ff8
Show file tree
Hide file tree
Showing 3 changed files with 200 additions and 155 deletions.
66 changes: 16 additions & 50 deletions absl/container/internal/raw_hash_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
assert(common.capacity() == NextCapacity(SooCapacity()));
// After resize from capacity 1 to 3, we always have exactly the slot with
// index 1 occupied, so we need to insert either at index 0 or index 2.
assert(HashSetResizeHelper::SooSlotIndex() == 1);
static_assert(SooSlotIndex() == 1, "");
PrepareInsertCommon(common);
const size_t offset = SingleGroupTableH1(hash, common.control()) & 2;
common.growth_info().OverwriteEmptyAsFull();
Expand Down Expand Up @@ -478,49 +478,6 @@ void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
// new_ctrl after 2nd store = E0123456EEEEEEESE0123456EEEEEEE
}

void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
size_t new_capacity) {
assert(is_single_group(new_capacity));
static_assert(HashSetResizeHelper::SooSlotIndex() == 1, "");
// This allows us to avoid branching on had_soo_slot_.
assert(had_soo_slot_ || h2 == ctrl_t::kEmpty);

if (Group::kWidth == 16) {
// Initialize the second 8 bytes in the original and mirrored control bytes.
// The ranges can overlap.
absl::little_endian::Store64(new_ctrl + 8, kMsbs8Bytes);
absl::little_endian::Store64(new_ctrl + new_capacity + 8, kMsbs8Bytes);
}
static constexpr uint64_t kAllEmptyExceptSoo =
kMsbs8Bytes ^ (static_cast<uint64_t>(static_cast<uint8_t>(ctrl_t::kEmpty))
<< (8 * HashSetResizeHelper::SooSlotIndex()));
// Initialize the first 8 bytes in the original control bytes.
// The first 8 bytes are all empty except the SOO slot.
// The range may overlap with the mirrored control bytes. These bytes will be
// overwritten later.
uint64_t first_ctrl_bytes =
kAllEmptyExceptSoo ^ (static_cast<uint64_t>(static_cast<uint8_t>(h2))
<< (8 * HashSetResizeHelper::SooSlotIndex()));
absl::little_endian::Store64(new_ctrl, first_ctrl_bytes);
// Initialize Sentinel byte and the first 7 bytes in the mirrored control
// bytes.
// We are adding kSentinel as the first byte of the mirrored control bytes.
uint64_t mirrored_ctrl_bytes =
(first_ctrl_bytes << 8) ^
static_cast<uint64_t>(static_cast<uint8_t>(ctrl_t::kSentinel));
absl::little_endian::Store64(new_ctrl + new_capacity, mirrored_ctrl_bytes);

// Example for capacity 3:
// new_ctrl after 2 stores = ????????EEEEEEEEEEE
// new_ctrl after 3rd store = E0EEEEEEEEEEEEEEEEE
// new_ctrl after 4th store = E0ESE0EEEEEEEEEEEEE

// Example for capacity 15:
// new_ctrl after 2 stores = ????????EEEEEEEE???????EEEEEEEE
// new_ctrl after 3rd store = E0EEEEEEEEEEEEEE???????EEEEEEEE
// new_ctrl after 4th store = E0EEEEEEEEEEEEESE0EEEEEEEEEEEEE
}

void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
void* new_slots, size_t slot_size) const {
ABSL_ASSUME(old_capacity_ > 0);
Expand All @@ -543,14 +500,23 @@ void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
PoisonSingleGroupEmptySlots(c, slot_size);
}

void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c,
size_t slot_size) {
void HashSetResizeHelper::InsertOldSooSlotAndInitializeControlBytesLarge(
CommonFields& c, size_t hash, ctrl_t* new_ctrl, void* new_slots,
const PolicyFunctions& policy) {
assert(was_soo_);
assert(had_soo_slot_);
assert(is_single_group(c.capacity()));
std::memcpy(SlotAddress(c.slot_array(), SooSlotIndex(), slot_size),
old_soo_data(), slot_size);
PoisonSingleGroupEmptySlots(c, slot_size);
size_t new_capacity = c.capacity();

size_t offset = probe(new_ctrl, new_capacity, hash).offset();
offset = offset == new_capacity ? 0 : offset;
SanitizerPoisonMemoryRegion(new_slots, policy.slot_size * new_capacity);
void* target_slot = SlotAddress(new_slots, offset, policy.slot_size);
SanitizerUnpoisonMemoryRegion(target_slot, policy.slot_size);
policy.transfer(&c, target_slot, c.soo_data());
c.set_control(new_ctrl);
c.set_slots(new_slots);
ResetCtrl(c, policy.slot_size);
SetCtrl(c, offset, H2(hash), policy.slot_size);
}

namespace {
Expand Down
Loading

0 comments on commit 7253ff8

Please sign in to comment.