Skip to content

Commit

Permalink
Draft
Browse files Browse the repository at this point in the history
  • Loading branch information
robehn committed Oct 8, 2024
1 parent 57c859e commit d81501a
Show file tree
Hide file tree
Showing 19 changed files with 145 additions and 130 deletions.
3 changes: 2 additions & 1 deletion src/hotspot/cpu/riscv/assembler_riscv.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2892,6 +2892,7 @@ enum Nf {
protected:
// All calls and jumps must go via MASM.
void jalr(Register Rd, Register Rs, const int32_t offset) {
assert(Rd != x5 && Rs != x5, "Register x5 not used for calls/jumps.");
/* jalr -> c.jr/c.jalr */
if (do_compress() && (offset == 0 && Rs != x0)) {
if (Rd == x1) {
Expand All @@ -2906,14 +2907,14 @@ enum Nf {
}

void jal(Register Rd, const int32_t offset) {
assert(Rd != x5, "Register x5 not used for calls/jumps.");
/* jal -> c.j, note c.jal is RV32C only */
if (do_compress() &&
Rd == x0 &&
is_simm12(offset) && ((offset % 2) == 0)) {
c_j(offset);
return;
}

_jal(Rd, offset);
}

Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
stub_id = C1StubId::throw_range_check_failed_id;
}
// t0 and t1 are used as args in generate_exception_throw,
// so use ra as the tmp register for rt_call.
__ rt_call(Runtime1::entry_for(stub_id), ra);
// so use x18 as the tmp register for rt_call.
__ rt_call(Runtime1::entry_for(stub_id), x18);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here());
Expand Down Expand Up @@ -275,7 +275,7 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
if (_obj->is_cpu_register()) {
__ mv(t0, _obj->as_register());
}
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)), t1);
__ far_call(RuntimeAddress(Runtime1::entry_for(_stub)));
ce->add_call_info_here(_info);
debug_only(__ should_not_reach_here());
}
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ void LIR_Assembler::clinit_barrier(ciMethod* method) {

Label L_skip_barrier;

__ mov_metadata(t1, method->holder()->constant_encoding());
__ clinit_barrier(t1, t0, &L_skip_barrier /* L_fast_path */);
__ mov_metadata(t0, method->holder()->constant_encoding());
__ clinit_barrier(t0, t1, &L_skip_barrier /* L_fast_path */);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
}
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -271,8 +271,8 @@ static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStub
__ mv(c_rarg0, arg);
}
__ mv(c_rarg1, xthread);
__ mv(t0, runtime_path);
__ jalr(t0);
__ mv(t1, runtime_path);
__ jalr(t1);
}

void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -339,8 +339,8 @@ void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, X
XSaveLiveRegisters save_live_registers(masm, stub);
XSetupArguments setup_arguments(masm, stub);

__ mv(t0, stub->slow_path());
__ jalr(t0);
__ mv(t1, stub->slow_path());
__ jalr(t1);
}

// Stub exit
Expand Down
12 changes: 6 additions & 6 deletions src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -724,8 +724,8 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
{
SaveLiveRegisters save_live_registers(masm, stub);
ZSetupArguments setup_arguments(masm, stub);
__ mv(t0, stub->slow_path());
__ jalr(t0);
__ mv(t1, stub->slow_path());
__ jalr(t1);
}

// Stub exit
Expand Down Expand Up @@ -758,13 +758,13 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm,
__ la(c_rarg0, stub->ref_addr());

if (stub->is_native()) {
__ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr()));
__ la(t1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_native_oop_field_without_healing_addr()));
} else if (stub->is_atomic()) {
__ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr()));
__ la(t1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_with_healing_addr()));
} else {
__ la(t0, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr()));
__ la(t1, RuntimeAddress(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr()));
}
__ jalr(t0);
__ jalr(t1);
}

// Stub exit
Expand Down
8 changes: 4 additions & 4 deletions src/hotspot/cpu/riscv/interp_masm_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -421,13 +421,13 @@ void InterpreterMacroAssembler::jump_from_interpreted(Register method) {
// interp_only_mode if these events CAN be enabled.
lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
beqz(t0, run_compiled_code);
ld(t0, Address(method, Method::interpreter_entry_offset()));
jr(t0);
ld(t1, Address(method, Method::interpreter_entry_offset()));
jr(t1);
bind(run_compiled_code);
}

ld(t0, Address(method, Method::from_interpreted_offset()));
jr(t0);
ld(t1, Address(method, Method::from_interpreted_offset()));
jr(t1);
}

// The following two routines provide a hook so that an implementation
Expand Down
56 changes: 34 additions & 22 deletions src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -457,8 +457,8 @@ void MacroAssembler::call_VM_base(Register oop_result,
RuntimeAddress target(StubRoutines::forward_exception_entry());
relocate(target.rspec(), [&] {
int32_t offset;
la(t0, target.target(), offset);
jr(t0, offset);
la(t1, target.target(), offset);
jr(t1, offset);
});
bind(ok);
}
Expand Down Expand Up @@ -760,21 +760,21 @@ void MacroAssembler::emit_static_call_stub() {

// Jump to the entry point of the c2i stub.
int32_t offset = 0;
movptr(t0, 0, offset, t1); // lui + lui + slli + add
jr(t0, offset);
movptr(t1, 0, offset, t0); // lui + lui + slli + add
jr(t1, offset);
}

void MacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments,
Label *retaddr) {
int32_t offset = 0;
push_reg(RegSet::of(t0, xmethod), sp); // push << t0 & xmethod >> to sp
mv(t0, entry_point, offset);
jalr(t0, offset);
push_reg(RegSet::of(t1, xmethod), sp); // push << t0 & xmethod >> to sp
mv(t1, entry_point, offset);
jalr(t1, offset);
if (retaddr != nullptr) {
bind(*retaddr);
}
pop_reg(RegSet::of(t0, xmethod), sp); // pop << t0 & xmethod >> from sp
pop_reg(RegSet::of(t1, xmethod), sp); // pop << t0 & xmethod >> from sp
}

void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
Expand Down Expand Up @@ -941,6 +941,8 @@ void MacroAssembler::li(Register Rd, int64_t imm) {

void MacroAssembler::load_link_jump(const address source, Register temp) {
assert(temp != noreg && temp != x0, "expecting a register");
assert(temp != x5, "Register x5 not used for calls.");
assert(temp != x1, "Register x1 not used for calls.");
assert_cond(source != nullptr);
int64_t distance = source - pc();
assert(is_simm32(distance), "Must be");
Expand Down Expand Up @@ -968,7 +970,8 @@ void MacroAssembler::j(const address dest, Register temp) {
if (is_simm21(distance) && ((distance % 2) == 0)) {
Assembler::jal(x0, distance);
} else {
assert(temp != noreg && temp != x0, "expecting a register");
assert(temp != x5 && temp != x1, "Register x5/x1 not used for jumps.");
assert(temp != noreg && temp != x0, "Expecting a register");
int32_t offset = 0;
la(temp, dest, offset);
jr(temp, offset);
Expand Down Expand Up @@ -1006,23 +1009,31 @@ void MacroAssembler::j(Label &lab, Register temp) {

void MacroAssembler::jr(Register Rd, int32_t offset) {
assert(Rd != noreg, "expecting a register");
assert(Rd != x5, "Register x5 not used for jumps.");
assert(Rd != x1, "Register x1 not used for jumps.");
Assembler::jalr(x0, Rd, offset);
}

void MacroAssembler::call(const address dest, Register temp) {
assert_cond(dest != nullptr);
assert(temp != noreg, "expecting a register");
assert(temp != x5, "Register x5 not used for jumps.");
assert(temp != x1, "Register x1 not used for jumps.");
int32_t offset = 0;
la(temp, dest, offset);
jalr(temp, offset);
}

void MacroAssembler::jalr(Register Rs, int32_t offset) {
assert(Rs != noreg, "expecting a register");
assert(Rs != x1, "expecting a register");
assert(Rs != x5, "expecting a register");
Assembler::jalr(x1, Rs, offset);
}

void MacroAssembler::rt_call(address dest, Register tmp) {
assert(tmp != x5, "Register x5 not used for jumps.");
assert(tmp != x1, "Register x1 not used for jumps.");
CodeBlob *cb = CodeCache::find_blob(dest);
RuntimeAddress target(dest);
if (cb) {
Expand Down Expand Up @@ -4072,7 +4083,7 @@ void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass,
Register tmp1,
Register tmp2,
Register tmp3) {
assert_different_registers(r_sub_klass, r_super_klass, tmp1, tmp2, tmp3, result, t0);
assert_different_registers(r_sub_klass, r_super_klass, tmp1, tmp2, tmp3, result, t0, t1);

const Register
r_array_base = tmp1, // X11
Expand Down Expand Up @@ -4139,8 +4150,8 @@ void MacroAssembler::get_thread(Register thread) {
RegSet::range(x28, x31) + ra - thread;
push_reg(saved_regs, sp);

mv(ra, CAST_FROM_FN_PTR(address, Thread::current));
jalr(ra);
mv(t1, CAST_FROM_FN_PTR(address, Thread::current));
jalr(t1);
if (thread != c_rarg0) {
mv(thread, c_rarg0);
}
Expand Down Expand Up @@ -4187,8 +4198,8 @@ void MacroAssembler::reserved_stack_check() {
// We have already removed our own frame.
// throw_delayed_StackOverflowError will think that it's been
// called by our caller.
la(t0, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
jr(t0);
la(t1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry()));
jr(t1);
should_not_reach_here();

bind(no_reserved_zone_enabling);
Expand Down Expand Up @@ -4299,7 +4310,7 @@ address MacroAssembler::load_and_call(Address entry) {
}
#endif
relocate(entry.rspec(), [&] {
load_link_jump(target);
load_link_jump(target, t1);
});

postcond(pc() != badAddress);
Expand All @@ -4309,7 +4320,7 @@ address MacroAssembler::load_and_call(Address entry) {
address MacroAssembler::ic_call(address entry, jint method_index) {
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
IncompressibleRegion ir(this); // relocations
movptr(t1, (address)Universe::non_oop_word(), t0);
movptr(t0, (address)Universe::non_oop_word(), t1);
assert_cond(entry != nullptr);
return reloc_call(Address(entry, rh));
}
Expand All @@ -4323,9 +4334,9 @@ int MacroAssembler::ic_check_size() {
int MacroAssembler::ic_check(int end_alignment) {
IncompressibleRegion ir(this);
Register receiver = j_rarg0;
Register data = t1;
Register data = t0;

Register tmp1 = t0; // t0 always scratch
Register tmp1 = t1; // scratch
// t2 is saved on call, thus should have been saved before this check.
// Hence we can clobber it.
Register tmp2 = t2;
Expand Down Expand Up @@ -4423,8 +4434,8 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
// - load the call
// - call
Label target;
ld(t0, target); // auipc + ld
jr(t0); // jalr
ld(t1, target); // auipc + ld
jr(t1); // jalr
bind(target);
assert(offset() - stub_start_offset == MacroAssembler::NativeShortCall::trampoline_data_offset,
"should be");
Expand Down Expand Up @@ -5148,11 +5159,11 @@ const int MacroAssembler::zero_words_block_size = 8;
// ptr: Address of a buffer to be zeroed.
// cnt: Count in HeapWords.
//
// ptr, cnt, and t0 are clobbered.
// ptr, cnt, t1, and t0 are clobbered.
address MacroAssembler::zero_words(Register ptr, Register cnt) {
assert(is_power_of_2(zero_words_block_size), "adjust this");
assert(ptr == x28 && cnt == x29, "mismatch in register usage");
assert_different_registers(cnt, t0);
assert_different_registers(cnt, t0, t1);

BLOCK_COMMENT("zero_words {");

Expand All @@ -5170,6 +5181,7 @@ address MacroAssembler::zero_words(Register ptr, Register cnt) {
return nullptr;
}
} else {
// Clobbers t1
rt_call(zero_blocks.target());
}
}
Expand Down
18 changes: 10 additions & 8 deletions src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -627,7 +627,7 @@ class MacroAssembler: public Assembler {
void bgtz(Register Rs, const address dest);

private:
void load_link_jump(const address source, Register temp = t0);
void load_link_jump(const address source, Register temp);
void jump_link(const address dest, Register temp);
public:
// We try to follow risc-v asm menomics.
Expand All @@ -638,23 +638,24 @@ class MacroAssembler: public Assembler {
// jump: jal x0, offset
// For long reach uses temp register for:
// la + jr
void j(const address dest, Register temp = t0);
void j(const Address &adr, Register temp = t0);
void j(Label &l, Register temp = t0);
void j(const address dest, Register temp = t1);
void j(const Address &adr, Register temp = t1);
void j(Label &l, Register temp = noreg);

// jump register: jalr x0, offset(rs)
void jr(Register Rd, int32_t offset = 0);

// call: la + jalr x1
void call(const address dest, Register temp = t0);
void call(const address dest, Register temp = t1);

// jalr: jalr x1, offset(rs)
void jalr(Register Rs, int32_t offset = 0);

// Emit a runtime call. Only invalidates the tmp register which
// is used to keep the entry address for jalr/movptr.
// Uses call() for intra code cache, else movptr + jalr.
void rt_call(address dest, Register tmp = t0);
// Clobebrs t1
void rt_call(address dest, Register tmp = t1);

// ret: jalr x0, 0(x1)
inline void ret() {
Expand Down Expand Up @@ -1165,8 +1166,9 @@ class MacroAssembler: public Assembler {
// - relocInfo::external_word_type
// - relocInfo::runtime_call_type
// - relocInfo::none
void far_call(const Address &entry, Register tmp = t0);
void far_jump(const Address &entry, Register tmp = t0);
// Clobbers t1 default.
void far_call(const Address &entry, Register tmp = t1);
void far_jump(const Address &entry, Register tmp = t1);

static int far_branch_size() {
return 2 * 4; // auipc + jalr, see far_call() & far_jump()
Expand Down
12 changes: 6 additions & 6 deletions src/hotspot/cpu/riscv/methodHandles_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,17 +109,17 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
// compiled code in threads for which the event is enabled. Check here for
// interp_only_mode if these events CAN be enabled.

__ lwu(t0, Address(xthread, JavaThread::interp_only_mode_offset()));
__ beqz(t0, run_compiled_code);
__ ld(t0, Address(method, Method::interpreter_entry_offset()));
__ jr(t0);
__ lwu(t1, Address(xthread, JavaThread::interp_only_mode_offset()));
__ beqz(t1, run_compiled_code);
__ ld(t1, Address(method, Method::interpreter_entry_offset()));
__ jr(t1);
__ BIND(run_compiled_code);
}

const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
Method::from_interpreted_offset();
__ ld(t0,Address(method, entry_offset));
__ jr(t0);
__ ld(t1, Address(method, entry_offset));
__ jr(t1);
__ bind(L_no_such_method);
__ far_jump(RuntimeAddress(SharedRuntime::throw_AbstractMethodError_entry()));
}
Expand Down
Loading

0 comments on commit d81501a

Please sign in to comment.