From b310c8c3dcc8af643b285e68ea29ca2e4c33d9b9 Mon Sep 17 00:00:00 2001 From: Robbin Ehn Date: Mon, 4 Dec 2023 09:41:00 +0100 Subject: [PATCH] Fixed size --- .../cpu/riscv/macroAssembler_riscv.cpp | 30 +++++++++---------- .../cpu/riscv/macroAssembler_riscv.hpp | 2 +- src/hotspot/cpu/riscv/riscv.ad | 21 ++++++------- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index fe530affa8210..ee7bd64f1268e 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -3329,41 +3329,41 @@ address MacroAssembler::ic_call(address entry, jint method_index) { } int MacroAssembler::ic_check_size() { - return NativeInstruction::instruction_size * 9; + // No compressed + return (NativeInstruction::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) + + far_branch_size(); } int MacroAssembler::ic_check(int end_alignment) { IncompressibleRegion ir(this); Register receiver = j_rarg0; Register data = t1; - Register tmp1 = t0; - Register tmp2 = t2; - int start_offset = offset(); + Register tmp1 = t0; // t0 always scratch + // t2 is saved on call, thus should have been saved before this check. + // Hence we can clobber it. + Register tmp2 = t2; align(end_alignment, offset() + ic_check_size()); - int uep_offset = offset(); if (UseCompressedClassPointers) { - lw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); - lw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); + lwu(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); + lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset())); } else { ld(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); ld(tmp2, Address(data, CompiledICData::speculated_klass_offset())); } - Label dont; - beq(tmp1, tmp2, dont); + Label ic_hit; + beq(tmp1, tmp2, ic_hit); + // Note, far_jump is not fixed size. + // Is this ever generates a movptr alignment/size will be off. far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); - bind(dont); - - int beoffs = offset(); - align(end_alignment, 0); + bind(ic_hit); int offs = offset(); - assert((offs % end_alignment) == 0, "Misaligned verified entry point: %d %d %d %d %d %d", - start_offset, uep_offset, beoffs, offs, ic_check_size(), end_alignment); + assert((offs % end_alignment) == 0, "Misaligned verified entry point"); return uep_offset; } diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index 8c8f6d180906e..6ee13bee0aedd 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -1071,7 +1071,7 @@ class MacroAssembler: public Assembler { static int far_branch_size() { if (far_branches()) { - return 2 * 4; // auipc + jalr, see far_call() & far_jump() + return 2 * NativeInstruction::instruction_size; // auipc + jalr, see far_call() & far_jump() } else { return 4; } diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index c1581f27a61d2..56efecc7fdeda 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1807,14 +1807,13 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const assert_cond(st != NULL); st->print_cr("# MachUEPNode"); if (UseCompressedClassPointers) { - st->print_cr("\tlwu t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass"); - if (CompressedKlassPointers::shift() != 0) { - st->print_cr("\tdecode_klass_not_null t0, t0"); - } + st->print_cr("\tlwu t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); + st->print_cr("\tlwu t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); } else { - st->print_cr("\tld t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass"); + st->print_cr("\tld t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); + st->print_cr("\tld t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass"); } - st->print_cr("\tbeq t0, t1, ic_hit"); + st->print_cr("\tbeq t0, t2, ic_hit"); st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check"); st->print_cr("\tic_hit:"); } @@ -1825,10 +1824,12 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const // This is the unverified entry point. C2_MacroAssembler _masm(&cbuf); __ ic_check(CodeEntryAlignment); - - // These NOPs are critical so that verified entry point is properly - // 4 bytes aligned for patching by NativeJump::patch_verified_entry() - __ align(NativeInstruction::instruction_size); + + // Verified entry point must be properly 4 bytes aligned for + // patching by NativeJump::patch_verified_entry() + // ic_check() aligns to CodeEntryAlignment, + // which is >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4). + assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point(NativeInstruction::instruction_size)"); } uint MachUEPNode::size(PhaseRegAlloc* ra_) const