From d81501a437a42719ea9050ba3f2bf5a76566498d Mon Sep 17 00:00:00 2001 From: Robbin Ehn Date: Mon, 23 Sep 2024 11:33:03 +0200 Subject: [PATCH] Draft --- src/hotspot/cpu/riscv/assembler_riscv.hpp | 3 +- src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp | 6 +- .../cpu/riscv/c1_LIRAssembler_riscv.cpp | 4 +- .../gc/g1/g1BarrierSetAssembler_riscv.cpp | 4 +- .../riscv/gc/x/xBarrierSetAssembler_riscv.cpp | 4 +- .../riscv/gc/z/zBarrierSetAssembler_riscv.cpp | 12 ++-- src/hotspot/cpu/riscv/interp_masm_riscv.cpp | 8 +-- .../cpu/riscv/macroAssembler_riscv.cpp | 56 +++++++++++-------- .../cpu/riscv/macroAssembler_riscv.hpp | 18 +++--- src/hotspot/cpu/riscv/methodHandles_riscv.cpp | 12 ++-- src/hotspot/cpu/riscv/nativeInst_riscv.cpp | 20 +++---- src/hotspot/cpu/riscv/riscv.ad | 22 ++++---- src/hotspot/cpu/riscv/runtime_riscv.cpp | 6 +- src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp | 21 ++++--- src/hotspot/cpu/riscv/stubGenerator_riscv.cpp | 26 ++++----- .../templateInterpreterGenerator_riscv.cpp | 3 +- src/hotspot/cpu/riscv/templateTable_riscv.cpp | 34 +++++------ src/hotspot/cpu/riscv/upcallLinker_riscv.cpp | 4 +- src/hotspot/cpu/riscv/vtableStubs_riscv.cpp | 12 ++-- 19 files changed, 145 insertions(+), 130 deletions(-) diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp index a8675907907e7..0e1fb6207ceb2 100644 --- a/src/hotspot/cpu/riscv/assembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp @@ -2892,6 +2892,7 @@ enum Nf { protected: // All calls and jumps must go via MASM. void jalr(Register Rd, Register Rs, const int32_t offset) { + assert(Rd != x5 && Rs != x5, "Register x5 not used for calls/jumps."); /* jalr -> c.jr/c.jalr */ if (do_compress() && (offset == 0 && Rs != x0)) { if (Rd == x1) { @@ -2906,6 +2907,7 @@ enum Nf { } void jal(Register Rd, const int32_t offset) { + assert(Rd != x5, "Register x5 not used for calls/jumps."); /* jal -> c.j, note c.jal is RV32C only */ if (do_compress() && Rd == x0 && @@ -2913,7 +2915,6 @@ enum Nf { c_j(offset); return; } - _jal(Rd, offset); } diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp index fb81082072610..e737daf6ddfe7 100644 --- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp @@ -93,8 +93,8 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) { stub_id = C1StubId::throw_range_check_failed_id; } // t0 and t1 are used as args in generate_exception_throw, - // so use ra as the tmp register for rt_call. - __ rt_call(Runtime1::entry_for(stub_id), ra); + // so use x18 as the tmp register for rt_call. + __ rt_call(Runtime1::entry_for(stub_id), x18); ce->add_call_info_here(_info); ce->verify_oop_map(_info); debug_only(__ should_not_reach_here()); @@ -275,7 +275,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) { if (_obj->is_cpu_register()) { __ mv(t0, _obj->as_register()); } - __ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), t1); + __ far_call(RuntimeAddress(Runtime1::entry_for(_stub))); ce->add_call_info_here(_info); debug_only(__ should_not_reach_here()); } diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index 828f70e4decee..db6e9e7c03b11 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -96,8 +96,8 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) { Label L_skip_barrier; - __ mov_metadata(t1, method->holder()->constant_encoding()); - __ clinit_barrier(t1, t0, &L_skip_barrier /* L_fast_path */); + __ mov_metadata(t0, method->holder()->constant_encoding()); + __ clinit_barrier(t0, t1, &L_skip_barrier /* L_fast_path */); __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); __ bind(L_skip_barrier); } diff --git a/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp index 7036c44d99dc9..5493882be7264 100644 --- a/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp @@ -271,8 +271,8 @@ static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStub __ mv(c_rarg0, arg); } __ mv(c_rarg1, xthread); - __ mv(t0, runtime_path); - __ jalr(t0); + __ mv(t1, runtime_path); + __ jalr(t1); } void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm, diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp index 7306492970b81..eb8d4c44b88a1 100644 --- a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp @@ -339,8 +339,8 @@ void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, X XSaveLiveRegisters save_live_registers(masm, stub); XSetupArguments setup_arguments(masm, stub); - __ mv(t0, stub->slow_path()); - __ jalr(t0); + __ mv(t1, stub->slow_path()); + __ jalr(t1); } // Stub exit diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp index cbb918ade00fe..f07f850e795c4 100644 --- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp @@ -724,8 +724,8 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z { SaveLiveRegisters save_live_registers(masm, stub); ZSetupArguments setup_arguments(masm, stub); - __ mv(t0, stub->slow_path()); - __ jalr(t0); + __ mv(t1, stub->slow_path()); + __ jalr(t1); } // Stub exit @@ -758,13 +758,13 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, __ la(c_rarg0, stub->ref_addr()); if (stub->is_native()) { - __ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr())); + __ la(t1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr())); } else if (stub->is_atomic()) { - __ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr())); + __ la(t1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr())); } else { - __ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr())); + __ la(t1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr())); } - __ jalr(t0); + __ jalr(t1); } // Stub exit diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 3eb7abb5ee3b9..fd75bde7655e3 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -421,13 +421,13 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method) { // interp_only_mode if these events CAN be enabled. lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset())); beqz(t0, run_compiled_code); - ld(t0, Address(method, Method::interpreter_entry_offset())); - jr(t0); + ld(t1, Address(method, Method::interpreter_entry_offset())); + jr(t1); bind(run_compiled_code); } - ld(t0, Address(method, Method::from_interpreted_offset())); - jr(t0); + ld(t1, Address(method, Method::from_interpreted_offset())); + jr(t1); } // The following two routines provide a hook so that an implementation diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index 46701b6ede387..b39a39be16f0a 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -457,8 +457,8 @@ void MacroAssembler::call_VM_base(Register oop_result, RuntimeAddress target(StubRoutines::forward_exception_entry()); relocate(target.rspec(), [&] { int32_t offset; - la(t0, target.target(), offset); - jr(t0, offset); + la(t1, target.target(), offset); + jr(t1, offset); }); bind(ok); } @@ -760,21 +760,21 @@ void MacroAssembler::emit_static_call_stub() { // Jump to the entry point of the c2i stub. int32_t offset = 0; - movptr(t0, 0, offset, t1); // lui + lui + slli + add - jr(t0, offset); + movptr(t1, 0, offset, t0); // lui + lui + slli + add + jr(t1, offset); } void MacroAssembler::call_VM_leaf_base(address entry_point, int number_of_arguments, Label *retaddr) { int32_t offset = 0; - push_reg(RegSet::of(t0, xmethod), sp); // push << t0 & xmethod >> to sp - mv(t0, entry_point, offset); - jalr(t0, offset); + push_reg(RegSet::of(t1, xmethod), sp); // push << t0 & xmethod >> to sp + mv(t1, entry_point, offset); + jalr(t1, offset); if (retaddr != nullptr) { bind(*retaddr); } - pop_reg(RegSet::of(t0, xmethod), sp); // pop << t0 & xmethod >> from sp + pop_reg(RegSet::of(t1, xmethod), sp); // pop << t0 & xmethod >> from sp } void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { @@ -941,6 +941,8 @@ void MacroAssembler::li(Register Rd, int64_t imm) { void MacroAssembler::load_link_jump(const address source, Register temp) { assert(temp != noreg && temp != x0, "expecting a register"); + assert(temp != x5, "Register x5 not used for calls."); + assert(temp != x1, "Register x1 not used for calls."); assert_cond(source != nullptr); int64_t distance = source - pc(); assert(is_simm32(distance), "Must be"); @@ -968,7 +970,8 @@ void MacroAssembler::j(const address dest, Register temp) { if (is_simm21(distance) && ((distance % 2) == 0)) { Assembler::jal(x0, distance); } else { - assert(temp != noreg && temp != x0, "expecting a register"); + assert(temp != x5 && temp != x1, "Register x5/x1 not used for jumps."); + assert(temp != noreg && temp != x0, "Expecting a register"); int32_t offset = 0; la(temp, dest, offset); jr(temp, offset); @@ -1006,12 +1009,16 @@ void MacroAssembler::j(Label &lab, Register temp) { void MacroAssembler::jr(Register Rd, int32_t offset) { assert(Rd != noreg, "expecting a register"); + assert(Rd != x5, "Register x5 not used for jumps."); + assert(Rd != x1, "Register x1 not used for jumps."); Assembler::jalr(x0, Rd, offset); } void MacroAssembler::call(const address dest, Register temp) { assert_cond(dest != nullptr); assert(temp != noreg, "expecting a register"); + assert(temp != x5, "Register x5 not used for jumps."); + assert(temp != x1, "Register x1 not used for jumps."); int32_t offset = 0; la(temp, dest, offset); jalr(temp, offset); @@ -1019,10 +1026,14 @@ void MacroAssembler::call(const address dest, Register temp) { void MacroAssembler::jalr(Register Rs, int32_t offset) { assert(Rs != noreg, "expecting a register"); + assert(Rs != x1, "expecting a register"); + assert(Rs != x5, "expecting a register"); Assembler::jalr(x1, Rs, offset); } void MacroAssembler::rt_call(address dest, Register tmp) { + assert(tmp != x5, "Register x5 not used for jumps."); + assert(tmp != x1, "Register x1 not used for jumps."); CodeBlob *cb = CodeCache::find_blob(dest); RuntimeAddress target(dest); if (cb) { @@ -4072,7 +4083,7 @@ void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, Register tmp1, Register tmp2, Register tmp3) { - assert_different_registers(r_sub_klass, r_super_klass, tmp1, tmp2, tmp3, result, t0); + assert_different_registers(r_sub_klass, r_super_klass, tmp1, tmp2, tmp3, result, t0, t1); const Register r_array_base = tmp1, // X11 @@ -4139,8 +4150,8 @@ void MacroAssembler::get_thread(Register thread) { RegSet::range(x28, x31) + ra - thread; push_reg(saved_regs, sp); - mv(ra, CAST_FROM_FN_PTR(address, Thread::current)); - jalr(ra); + mv(t1, CAST_FROM_FN_PTR(address, Thread::current)); + jalr(t1); if (thread != c_rarg0) { mv(thread, c_rarg0); } @@ -4187,8 +4198,8 @@ void MacroAssembler::reserved_stack_check() { // We have already removed our own frame. // throw_delayed_StackOverflowError will think that it's been // called by our caller. - la(t0, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); - jr(t0); + la(t1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); + jr(t1); should_not_reach_here(); bind(no_reserved_zone_enabling); @@ -4299,7 +4310,7 @@ address MacroAssembler::load_and_call(Address entry) { } #endif relocate(entry.rspec(), [&] { - load_link_jump(target); + load_link_jump(target, t1); }); postcond(pc() != badAddress); @@ -4309,7 +4320,7 @@ address MacroAssembler::load_and_call(Address entry) { address MacroAssembler::ic_call(address entry, jint method_index) { RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); IncompressibleRegion ir(this); // relocations - movptr(t1, (address)Universe::non_oop_word(), t0); + movptr(t0, (address)Universe::non_oop_word(), t1); assert_cond(entry != nullptr); return reloc_call(Address(entry, rh)); } @@ -4323,9 +4334,9 @@ int MacroAssembler::ic_check_size() { int MacroAssembler::ic_check(int end_alignment) { IncompressibleRegion ir(this); Register receiver = j_rarg0; - Register data = t1; + Register data = t0; - Register tmp1 = t0; // t0 always scratch + Register tmp1 = t1; // scratch // t2 is saved on call, thus should have been saved before this check. // Hence we can clobber it. Register tmp2 = t2; @@ -4423,8 +4434,8 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, // - load the call // - call Label target; - ld(t0, target); // auipc + ld - jr(t0); // jalr + ld(t1, target); // auipc + ld + jr(t1); // jalr bind(target); assert(offset() - stub_start_offset == MacroAssembler::NativeShortCall::trampoline_data_offset, "should be"); @@ -5148,11 +5159,11 @@ const int MacroAssembler::zero_words_block_size = 8; // ptr: Address of a buffer to be zeroed. // cnt: Count in HeapWords. // -// ptr, cnt, and t0 are clobbered. +// ptr, cnt, t1, and t0 are clobbered. address MacroAssembler::zero_words(Register ptr, Register cnt) { assert(is_power_of_2(zero_words_block_size), "adjust this"); assert(ptr == x28 && cnt == x29, "mismatch in register usage"); - assert_different_registers(cnt, t0); + assert_different_registers(cnt, t0, t1); BLOCK_COMMENT("zero_words {"); @@ -5170,6 +5181,7 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) { return nullptr; } } else { + // Clobbers t1 rt_call(zero_blocks.target()); } } diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index fd174f241eb0b..3cbf4ef1d0a3a 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -627,7 +627,7 @@ class MacroAssembler: public Assembler { void bgtz(Register Rs, const address dest); private: - void load_link_jump(const address source, Register temp = t0); + void load_link_jump(const address source, Register temp); void jump_link(const address dest, Register temp); public: // We try to follow risc-v asm menomics. @@ -638,15 +638,15 @@ class MacroAssembler: public Assembler { // jump: jal x0, offset // For long reach uses temp register for: // la + jr - void j(const address dest, Register temp = t0); - void j(const Address &adr, Register temp = t0); - void j(Label &l, Register temp = t0); + void j(const address dest, Register temp = t1); + void j(const Address &adr, Register temp = t1); + void j(Label &l, Register temp = noreg); // jump register: jalr x0, offset(rs) void jr(Register Rd, int32_t offset = 0); // call: la + jalr x1 - void call(const address dest, Register temp = t0); + void call(const address dest, Register temp = t1); // jalr: jalr x1, offset(rs) void jalr(Register Rs, int32_t offset = 0); @@ -654,7 +654,8 @@ class MacroAssembler: public Assembler { // Emit a runtime call. Only invalidates the tmp register which // is used to keep the entry address for jalr/movptr. // Uses call() for intra code cache, else movptr + jalr. - void rt_call(address dest, Register tmp = t0); + // Clobebrs t1 + void rt_call(address dest, Register tmp = t1); // ret: jalr x0, 0(x1) inline void ret() { @@ -1165,8 +1166,9 @@ class MacroAssembler: public Assembler { // - relocInfo::external_word_type // - relocInfo::runtime_call_type // - relocInfo::none - void far_call(const Address &entry, Register tmp = t0); - void far_jump(const Address &entry, Register tmp = t0); + // Clobbers t1 default. + void far_call(const Address &entry, Register tmp = t1); + void far_jump(const Address &entry, Register tmp = t1); static int far_branch_size() { return 2 * 4; // auipc + jalr, see far_call() & far_jump() diff --git a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp index f638db9f0bfe4..8ed4b93ad4de9 100644 --- a/src/hotspot/cpu/riscv/methodHandles_riscv.cpp +++ b/src/hotspot/cpu/riscv/methodHandles_riscv.cpp @@ -109,17 +109,17 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth // compiled code in threads for which the event is enabled. Check here for // interp_only_mode if these events CAN be enabled. - __ lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset())); - __ beqz(t0, run_compiled_code); - __ ld(t0, Address(method, Method::interpreter_entry_offset())); - __ jr(t0); + __ lwu(t1, Address(xthread, JavaThread::interp_only_mode_offset())); + __ beqz(t1, run_compiled_code); + __ ld(t1, Address(method, Method::interpreter_entry_offset())); + __ jr(t1); __ BIND(run_compiled_code); } const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : Method::from_interpreted_offset(); - __ ld(t0,Address(method, entry_offset)); - __ jr(t0); + __ ld(t1, Address(method, entry_offset)); + __ jr(t1); __ bind(L_no_such_method); __ far_jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry())); } diff --git a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp index 6c9e0986869b6..05b40d238a051 100644 --- a/src/hotspot/cpu/riscv/nativeInst_riscv.cpp +++ b/src/hotspot/cpu/riscv/nativeInst_riscv.cpp @@ -91,10 +91,10 @@ bool NativeShortCallTrampolineStub::is_at(address addr) { if (MacroAssembler::is_auipc_at(addr) && MacroAssembler::is_ld_at(addr + instr_size) && MacroAssembler::is_jalr_at(addr + 2 * instr_size) && - (MacroAssembler::extract_rd(addr) == x5) && - (MacroAssembler::extract_rd(addr + instr_size) == x5) && - (MacroAssembler::extract_rs1(addr + instr_size) == x5) && - (MacroAssembler::extract_rs1(addr + 2 * instr_size) == x5) && + (MacroAssembler::extract_rd(addr) == x6) && + (MacroAssembler::extract_rd(addr + instr_size) == x6) && + (MacroAssembler::extract_rs1(addr + instr_size) == x6) && + (MacroAssembler::extract_rs1(addr + 2 * instr_size) == x6) && (Assembler::extract(Assembler::ld_instr(addr + 4), 31, 20) == trampoline_data_offset)) { return true; } @@ -460,10 +460,10 @@ bool NativeFarCall::is_at(address addr) { if (MacroAssembler::is_auipc_at(addr) && MacroAssembler::is_ld_at(addr + instr_size) && MacroAssembler::is_jalr_at(addr + 2 * instr_size) && - (MacroAssembler::extract_rd(addr) == x5) && - (MacroAssembler::extract_rd(addr + instr_size) == x5) && - (MacroAssembler::extract_rs1(addr + instr_size) == x5) && - (MacroAssembler::extract_rs1(addr + 2 * instr_size) == x5) && + (MacroAssembler::extract_rd(addr) == x6) && + (MacroAssembler::extract_rd(addr + instr_size) == x6) && + (MacroAssembler::extract_rs1(addr + instr_size) == x6) && + (MacroAssembler::extract_rs1(addr + 2 * instr_size) == x6) && (MacroAssembler::extract_rd(addr + 2 * instr_size) == x1)) { return true; } @@ -789,8 +789,8 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) { Assembler::IncompressibleRegion ir(&a); // Fixed length: see NativeGeneralJump::get_instruction_size() int32_t offset = 0; - a.movptr(t0, entry, offset, t1); // lui, lui, slli, add - a.jr(t0, offset); // jalr + a.movptr(t1, entry, offset, t0); // lui, lui, slli, add + a.jr(t1, offset); // jalr ICache::invalidate_range(code_pos, instruction_size); } diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 563dfd4cde972..59f9ce1e73d89 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1261,11 +1261,11 @@ int MachCallRuntimeNode::ret_addr_offset() { // jal(trampoline_stub) // for real runtime callouts it will be 11 instructions // see riscv_enc_java_to_runtime - // la(t1, retaddr) -> auipc + addi - // la(t0, RuntimeAddress(addr)) -> lui + addi + slli + addi + slli + addi + // la(t0, retaddr) -> auipc + addi + // la(t1, RuntimeAddress(addr)) -> lui + addi + slli + addi + slli + addi // addi(sp, sp, -2 * wordSize) -> addi // sd(t1, Address(sp, wordSize)) -> sd - // jalr(t0) -> jalr + // jalr(t1) -> jalr CodeBlob *cb = CodeCache::find_blob(_entry_point); if (cb != nullptr) { if (UseTrampolines) { @@ -1418,8 +1418,8 @@ void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const { Label L_skip_barrier; - __ mov_metadata(t1, C->method()->holder()->constant_encoding()); - __ clinit_barrier(t1, t0, &L_skip_barrier); + __ mov_metadata(t0, C->method()->holder()->constant_encoding()); + __ clinit_barrier(t0, t1, &L_skip_barrier); __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); __ bind(L_skip_barrier); } @@ -1857,8 +1857,8 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const // Emit exception handler code. int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm) { - // auipc t0, #exception_blob_entry_point - // jr (offset)t0 + // auipc t1, #exception_blob_entry_point + // jr (offset)t1 // Note that the code buffer's insts_mark is always relative to insts. // That's why we must use the macroassembler to generate a handler. address base = __ start_a_stub(size_exception_handler()); @@ -2504,12 +2504,12 @@ encode %{ __ post_call_nop(); } else { Label retaddr; - __ la(t1, retaddr); - __ la(t0, RuntimeAddress(entry)); + __ la(t0, retaddr); + __ la(t1, RuntimeAddress(entry)); // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc() __ addi(sp, sp, -2 * wordSize); - __ sd(t1, Address(sp, wordSize)); - __ jalr(t0); + __ sd(t0, Address(sp, wordSize)); + __ jalr(t1); __ bind(retaddr); __ post_call_nop(); __ addi(sp, sp, 2 * wordSize); diff --git a/src/hotspot/cpu/riscv/runtime_riscv.cpp b/src/hotspot/cpu/riscv/runtime_riscv.cpp index 9e16278c3b547..2f879b07e2694 100644 --- a/src/hotspot/cpu/riscv/runtime_riscv.cpp +++ b/src/hotspot/cpu/riscv/runtime_riscv.cpp @@ -351,7 +351,7 @@ void OptoRuntime::generate_exception_blob() { // x10: exception handler // We have a handler in x10 (could be deopt blob). - __ mv(t0, x10); + __ mv(t1, x10); // Get the exception oop __ ld(x10, Address(xthread, JavaThread::exception_oop_offset())); @@ -365,11 +365,11 @@ void OptoRuntime::generate_exception_blob() { __ sd(zr, Address(xthread, JavaThread::exception_oop_offset())); // x10: exception oop - // t0: exception handler + // t1: exception handler // x14: exception pc // Jump to handler - __ jr(t0); + __ jr(t1); // Make sure all code is generated masm->flush(); diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index 27da26d404cc0..fb836abcfc763 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -468,8 +468,8 @@ static void gen_c2i_adapter(MacroAssembler *masm, __ mv(esp, sp); // Interp expects args on caller's expression stack - __ ld(t0, Address(xmethod, in_bytes(Method::interpreter_entry_offset()))); - __ jr(t0); + __ ld(t1, Address(xmethod, in_bytes(Method::interpreter_entry_offset()))); + __ jr(t1); } void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm, @@ -610,7 +610,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm Label skip_fixup; const Register receiver = j_rarg0; - const Register data = t1; + const Register data = t0; const Register tmp = t2; // A call-clobbered register not used for arg passing // ------------------------------------------------------------------------- @@ -1127,8 +1127,8 @@ static void gen_continuation_yield(MacroAssembler* masm, Label ok; __ beqz(t0, ok); __ leave(); - __ la(t0, RuntimeAddress(StubRoutines::forward_exception_entry())); - __ jr(t0); + __ la(t1, RuntimeAddress(StubRoutines::forward_exception_entry())); + __ jr(t1); __ bind(ok); __ leave(); @@ -1439,8 +1439,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // restoring them except fp. fp is the only callee save register // as far as the interpreter and the compiler(s) are concerned. - - const Register ic_reg = t1; const Register receiver = j_rarg0; __ verify_oop(receiver); @@ -1460,8 +1458,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) { Label L_skip_barrier; - __ mov_metadata(t1, method->method_holder()); // InstanceKlass* - __ clinit_barrier(t1, t0, &L_skip_barrier); + __ mov_metadata(t0, method->method_holder()); // InstanceKlass* + __ clinit_barrier(t0, t1, &L_skip_barrier); __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); __ bind(L_skip_barrier); @@ -1724,6 +1722,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore); __ sw(t0, Address(t1)); + // Clobbers t1 __ rt_call(native_func); __ bind(native_return); @@ -2618,8 +2617,8 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti reg_saver.restore_live_registers(masm); // We are back to the original state on entry and ready to go. - - __ jr(t0); + __ mv(t1, t0); + __ jr(t1); // Pending exception after the safepoint diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index bc2816147a0d0..6fb6059b108be 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -1189,7 +1189,7 @@ class StubGenerator: public StubCodeGenerator { __ slli(t1, count, exact_log2(size)); Label L_continue; __ bltu(t0, t1, L_continue); - __ j(nooverlap_target); + __ j(nooverlap_target, t1); __ bind(L_continue); DecoratorSet decorators = IN_HEAP | IS_ARRAY; @@ -1689,17 +1689,17 @@ class StubGenerator: public StubCodeGenerator { __ beqz(t0, L_int_aligned); __ test_bit(t0, t0, 0); __ beqz(t0, L_short_aligned); - __ j(RuntimeAddress(byte_copy_entry)); + __ j(RuntimeAddress(byte_copy_entry), t1); __ BIND(L_short_aligned); __ srli(count, count, LogBytesPerShort); // size => short_count - __ j(RuntimeAddress(short_copy_entry)); + __ j(RuntimeAddress(short_copy_entry), t1); __ BIND(L_int_aligned); __ srli(count, count, LogBytesPerInt); // size => int_count - __ j(RuntimeAddress(int_copy_entry)); + __ j(RuntimeAddress(int_copy_entry), t1); __ BIND(L_long_aligned); __ srli(count, count, LogBytesPerLong); // size => long_count - __ j(RuntimeAddress(long_copy_entry)); + __ j(RuntimeAddress(long_copy_entry), t1); return start; } @@ -1880,13 +1880,13 @@ class StubGenerator: public StubCodeGenerator { __ add(from, src, src_pos); // src_addr __ add(to, dst, dst_pos); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ j(RuntimeAddress(byte_copy_entry)); + __ j(RuntimeAddress(byte_copy_entry), t1); __ BIND(L_copy_shorts); __ shadd(from, src_pos, src, t0, 1); // src_addr __ shadd(to, dst_pos, dst, t0, 1); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ j(RuntimeAddress(short_copy_entry)); + __ j(RuntimeAddress(short_copy_entry), t1); __ BIND(L_copy_ints); __ test_bit(t0, x30_elsize, 0); @@ -1894,7 +1894,7 @@ class StubGenerator: public StubCodeGenerator { __ shadd(from, src_pos, src, t0, 2); // src_addr __ shadd(to, dst_pos, dst, t0, 2); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ j(RuntimeAddress(int_copy_entry)); + __ j(RuntimeAddress(int_copy_entry), t1); __ BIND(L_copy_longs); #ifdef ASSERT @@ -1913,7 +1913,7 @@ class StubGenerator: public StubCodeGenerator { __ shadd(from, src_pos, src, t0, 3); // src_addr __ shadd(to, dst_pos, dst, t0, 3); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ j(RuntimeAddress(long_copy_entry)); + __ j(RuntimeAddress(long_copy_entry), t1); // ObjArrayKlass __ BIND(L_objArray); @@ -1934,7 +1934,7 @@ class StubGenerator: public StubCodeGenerator { __ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); __ sign_extend(count, scratch_length, 32); // length __ BIND(L_plain_copy); - __ j(RuntimeAddress(oop_copy_entry)); + __ j(RuntimeAddress(oop_copy_entry), t1); __ BIND(L_checkcast_copy); // live at this point: scratch_src_klass, scratch_length, t2 (dst_klass) @@ -1976,7 +1976,7 @@ class StubGenerator: public StubCodeGenerator { assert(c_rarg3 == sco_temp, "#3 already in place"); // Set up arguments for checkcast_copy_entry. __ mv(c_rarg4, dst_klass); // dst.klass.element_klass - __ j(RuntimeAddress(checkcast_copy_entry)); + __ j(RuntimeAddress(checkcast_copy_entry), t1); } __ BIND(L_failed); @@ -3782,8 +3782,8 @@ class StubGenerator: public StubCodeGenerator { Label thaw_success; // t1 contains the size of the frames to thaw, 0 if overflow or no more frames __ bnez(t1, thaw_success); - __ la(t0, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); - __ jr(t0); + __ la(t1, RuntimeAddress(SharedRuntime::throw_StackOverflowError_entry())); + __ jr(t1); __ bind(thaw_success); // make room for the thawed frames diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp index 7c811aa3a0c26..41f3b40db6cd9 100644 --- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp @@ -296,7 +296,8 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M ; } if (entry_point != nullptr) { - __ jr(continuation); + __ mv(t1, continuation); + __ jr(t1); } return entry_point; diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp index 2fede262057ce..2520bd9b565ae 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp @@ -718,8 +718,8 @@ void TemplateTable::index_check(Register array, Register index) { __ sign_extend(index, index, 32); __ bltu(index, length, ok); __ mv(x13, array); - __ mv(t0, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); - __ jr(t0); + __ mv(t1, Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); + __ jr(t1); __ bind(ok); } @@ -1085,7 +1085,7 @@ void TemplateTable::aastore() { // Come here on failure // object is at TOS - __ j(Interpreter::_throw_ArrayStoreException_entry); + __ j(Interpreter::_throw_ArrayStoreException_entry, t1); // Come here on success __ bind(ok_is_subtype); @@ -1313,8 +1313,8 @@ void TemplateTable::idiv() { // explicitly check for div0 Label no_div0; __ bnez(x10, no_div0); - __ mv(t0, Interpreter::_throw_ArithmeticException_entry); - __ jr(t0); + __ mv(t1, Interpreter::_throw_ArithmeticException_entry); + __ jr(t1); __ bind(no_div0); __ pop_i(x11); // x10 <== x11 idiv x10 @@ -1326,8 +1326,8 @@ void TemplateTable::irem() { // explicitly check for div0 Label no_div0; __ bnez(x10, no_div0); - __ mv(t0, Interpreter::_throw_ArithmeticException_entry); - __ jr(t0); + __ mv(t1, Interpreter::_throw_ArithmeticException_entry); + __ jr(t1); __ bind(no_div0); __ pop_i(x11); // x10 <== x11 irem x10 @@ -1345,8 +1345,8 @@ void TemplateTable::ldiv() { // explicitly check for div0 Label no_div0; __ bnez(x10, no_div0); - __ mv(t0, Interpreter::_throw_ArithmeticException_entry); - __ jr(t0); + __ mv(t1, Interpreter::_throw_ArithmeticException_entry); + __ jr(t1); __ bind(no_div0); __ pop_l(x11); // x10 <== x11 ldiv x10 @@ -1358,8 +1358,8 @@ void TemplateTable::lrem() { // explicitly check for div0 Label no_div0; __ bnez(x10, no_div0); - __ mv(t0, Interpreter::_throw_ArithmeticException_entry); - __ jr(t0); + __ mv(t1, Interpreter::_throw_ArithmeticException_entry); + __ jr(t1); __ bind(no_div0); __ pop_l(x11); // x10 <== x11 lrem x10 @@ -1768,8 +1768,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ andi(sp, esp, -16); // and begin the OSR nmethod - __ ld(t0, Address(x9, nmethod::osr_entry_point_offset())); - __ jr(t0); + __ ld(t1, Address(x9, nmethod::osr_entry_point_offset())); + __ jr(t1); } } @@ -2171,7 +2171,7 @@ void TemplateTable::_return(TosState state) { void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rcache, Register index) { - const Register temp = x9; + const Register temp = x9; // s1 assert_different_registers(Rcache, index, temp); assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); @@ -3672,7 +3672,7 @@ void TemplateTable::checkcast() { // Come here on failure __ push_reg(x13); // object is at TOS - __ j(Interpreter::_throw_ClassCastException_entry); + __ j(Interpreter::_throw_ClassCastException_entry, t1); // Come here on success __ bind(ok_is_subtype); @@ -3962,8 +3962,8 @@ void TemplateTable::wide() { __ load_unsigned_byte(x9, at_bcp(1)); __ mv(t0, (address)Interpreter::_wentry_point); __ shadd(t0, x9, t0, t1, 3); - __ ld(t0, Address(t0)); - __ jr(t0); + __ ld(t1, Address(t0)); + __ jr(t1); } // Multi arrays diff --git a/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp b/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp index 55160be99d0d8..3b4f26b6dc333 100644 --- a/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp +++ b/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp @@ -267,8 +267,8 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Symbol* signature, __ push_cont_fastpath(xthread); - __ ld(t0, Address(xmethod, Method::from_compiled_offset())); - __ jalr(t0); + __ ld(t1, Address(xmethod, Method::from_compiled_offset())); + __ jalr(t1); __ pop_cont_fastpath(xthread); diff --git a/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp b/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp index 5d945dbc32309..10e7deee7151a 100644 --- a/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp @@ -131,8 +131,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { // xmethod: Method* // x12: receiver address ame_addr = __ pc(); - __ ld(t0, Address(xmethod, Method::from_compiled_offset())); - __ jr(t0); + __ ld(t1, Address(xmethod, Method::from_compiled_offset())); + __ jr(t1); masm->flush(); bookkeeping(masm, tty, s, npe_addr, ame_addr, true, vtable_index, slop_bytes, 0); @@ -171,7 +171,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); // Entry arguments: - // t1: CompiledICData + // t0: CompiledICData // j_rarg0: Receiver // This stub is called from compiled code which has no callee-saved registers, @@ -181,7 +181,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { const Register resolved_klass_reg = x30; // resolved interface klass (REFC) const Register temp_reg = x28; const Register temp_reg2 = x29; - const Register icdata_reg = t1; + const Register icdata_reg = t0; Label L_no_such_interface; @@ -220,8 +220,8 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) { // xmethod: Method* // j_rarg0: receiver address ame_addr = __ pc(); - __ ld(t0, Address(xmethod, Method::from_compiled_offset())); - __ jr(t0); + __ ld(t1, Address(xmethod, Method::from_compiled_offset())); + __ jr(t1); __ bind(L_no_such_interface); // Handle IncompatibleClassChangeError in itable stubs.