diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp index 4e297d0017238..1a24f78ad12a6 100644 --- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp @@ -65,7 +65,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - __ jump(_continuation); + __ j(_continuation); } void RangeCheckStub::emit_code(LIR_Assembler* ce) { @@ -147,7 +147,7 @@ void NewInstanceStub::emit_code(LIR_Assembler* ce) { ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == x10, "result must in x10"); - __ jump(_continuation); + __ j(_continuation); } // Implementation of NewTypeArrayStub @@ -167,7 +167,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) { ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == x10, "result must in x10"); - __ jump(_continuation); + __ j(_continuation); } // Implementation of NewObjectArrayStub @@ -187,7 +187,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) { ce->add_call_info_here(_info); ce->verify_oop_map(_info); assert(_result->as_register() == x10, "result must in x10"); - __ jump(_continuation); + __ j(_continuation); } void MonitorEnterStub::emit_code(LIR_Assembler* ce) { @@ -204,7 +204,7 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) { __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id))); ce->add_call_info_here(_info); ce->verify_oop_map(_info); - __ jump(_continuation); + __ j(_continuation); } void MonitorExitStub::emit_code(LIR_Assembler* ce) { @@ -332,7 +332,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { } #endif - __ jump(_continuation); + __ j(_continuation); } #undef __ diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp index 04f64c30e13bf..4f407764f2028 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp @@ -72,7 +72,7 @@ void LIR_Assembler::generic_arraycopy(Register src, Register src_pos, Register l __ subw(length, length, t0); __ addw(src_pos, src_pos, t0); __ addw(dst_pos, dst_pos, t0); - __ jump(*stub->entry()); + __ j(*stub->entry()); __ bind(*stub->continuation()); } @@ -235,7 +235,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe arraycopy_checkcast(src, src_pos, length, dst, dst_pos, tmp, stub, basic_type, copyfunc_addr, flags); } - __ jump(*stub->entry()); + __ j(*stub->entry()); __ bind(cont); POP(src, dst); } diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp index df2a77e2e45de..e3ec023aef260 100644 --- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp @@ -341,7 +341,7 @@ int LIR_Assembler::emit_unwind_handler() { monitor_address(0, FrameMap::r10_opr); stub = new MonitorExitStub(FrameMap::r10_opr, true, 0); if (LockingMode == LM_MONITOR) { - __ jump(*stub->entry()); + __ j(*stub->entry()); } else { __ unlock_object(x15, x14, x10, x16, *stub->entry()); } @@ -884,7 +884,7 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L move_op(opr2, result, type, lir_patch_none, nullptr, false, // pop_fpu_stack false); // wide - __ jump(done); + __ j(done); __ bind(label); move_op(opr1, result, type, lir_patch_none, nullptr, false, // pop_fpu_stack @@ -909,7 +909,7 @@ void LIR_Assembler::emit_branch(LIR_Condition cmp_flag, LIR_Opr cmp1, LIR_Opr cm bool is_far, bool is_unordered) { if (cmp_flag == lir_cond_always) { - __ jump(label); + __ j(label); return; } @@ -1002,7 +1002,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { if (UseSlowPath || (!UseFastNewObjectArray && is_reference_type(op->type())) || (!UseFastNewTypeArray && !is_reference_type(op->type()))) { - __ jump(*op->stub()->entry()); + __ j(*op->stub()->entry()); } else { Register tmp1 = op->tmp1()->as_register(); Register tmp2 = op->tmp2()->as_register(); @@ -1037,7 +1037,7 @@ void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfil __ bne(recv, t1, next_test); Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))); __ increment(data_addr, DataLayout::counter_increment); - __ jump(*update_done); + __ j(*update_done); __ bind(next_test); } @@ -1050,7 +1050,7 @@ void LIR_Assembler::type_profile_helper(Register mdo, ciMethodData *md, ciProfil __ sd(recv, recv_addr); __ mv(t1, DataLayout::counter_increment); __ sd(t1, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)))); - __ jump(*update_done); + __ j(*update_done); __ bind(next_test); } } @@ -1123,7 +1123,7 @@ void LIR_Assembler::profile_object(ciMethodData* md, ciProfileData* data, Regist __ lbu(t0, data_addr); __ ori(t0, t0, BitData::null_seen_byte_constant()); __ sb(t0, data_addr); - __ jump(*obj_is_null); + __ j(*obj_is_null); __ bind(not_null); Label update_done; @@ -1196,7 +1196,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L typecheck_helper_slowcheck(k, obj, Rtmp1, k_RInfo, klass_RInfo, failure_target, success_target); } - __ jump(*success); + __ j(*success); } void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { @@ -1221,7 +1221,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { emit_typecheck_helper(op, &success, &failure, &failure); __ bind(failure); __ mv(dst, zr); - __ jump(done); + __ j(done); __ bind(success); __ mv(dst, 1); __ bind(done); @@ -1423,7 +1423,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { assert(exceptionOop->as_register() == x10, "must match"); - __ jump(_unwind_handler_entry); + __ j(_unwind_handler_entry); } void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { @@ -1497,7 +1497,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) { add_debug_info_for_null_check_here(op->info()); __ null_check(obj, -1); } - __ jump(*op->stub()->entry()); + __ j(*op->stub()->entry()); } else if (op->code() == lir_lock) { assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); // add debug info for NullPointerException only if one is possible @@ -1659,7 +1659,7 @@ void LIR_Assembler::check_conflict(ciKlass* exact_klass, intptr_t current_klass, __ sd(t1, mdo_addr); if (TypeEntries::is_type_none(current_klass)) { - __ jump(next); + __ j(next); __ bind(none); // first time here. Set profile type. @@ -1729,7 +1729,7 @@ void LIR_Assembler::check_null(Register tmp, Label &update, intptr_t current_kla __ sd(t1, mdo_addr); } if (do_update) { - __ jump(next); + __ j(next); } } diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp index e2e2c31627e94..2961b1a91ceab 100644 --- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp @@ -162,7 +162,7 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i if (UseTLAB) { tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, tmp1, tmp2, slow_case, /* is_far */ true); } else { - jump(slow_case); + j(slow_case); } } @@ -252,7 +252,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register la(t0, Address(obj, hdr_size_in_bytes + remainder * BytesPerWord)); Label entry_point, loop; - jump(entry_point); + j(entry_point); bind(loop); sub(index, index, 1); diff --git a/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp index 426405daa7823..7995750aba96b 100644 --- a/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_CodeStubs_riscv.cpp @@ -62,7 +62,7 @@ void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) { __ bind(entry()); __ rt_call(StubRoutines::method_entry_barrier()); - __ jump(continuation()); + __ j(continuation()); // make guard value 4-byte aligned so that it can be accessed by atomic instructions on RISC-V __ align(4); @@ -96,7 +96,7 @@ void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) { #endif __ sw(t, Address(xthread, JavaThread::lock_stack_top_offset())); - __ jump(continuation()); + __ j(continuation()); } #undef __ diff --git a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp index 5f3cf07ac77f9..9670bc987a304 100644 --- a/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/c2_MacroAssembler_riscv.cpp @@ -73,7 +73,7 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, if (LockingMode == LM_MONITOR) { mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow-path - jump(cont); + j(cont); } else if (LockingMode == LM_LEGACY) { // Set tmp to be (markWord of object | UNLOCK_VALUE). ori(tmp, disp_hdr, markWord::unlocked_value); @@ -105,7 +105,7 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, andr(tmp/*==0?*/, disp_hdr, tmp); sd(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes())); mv(flag, tmp); // we can use the value of tmp as the result here - jump(cont); + j(cont); } else { assert(LockingMode == LM_LIGHTWEIGHT, ""); Label slow; @@ -113,10 +113,10 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, // Indicate success on completion. mv(flag, zr); - jump(count); + j(count); bind(slow); mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow-path - jump(no_count); + j(no_count); } // Handle existing monitor. @@ -187,7 +187,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, if (LockingMode == LM_MONITOR) { mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow path - jump(cont); + j(cont); } else if (LockingMode == LM_LEGACY) { // Check if it is still a light weight lock, this is true if we // see the stack address of the basicLock in the markWord of the @@ -196,7 +196,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, cmpxchg(/*memory address*/oop, /*expected value*/box, /*new value*/disp_hdr, Assembler::int64, Assembler::relaxed, Assembler::rl, /*result*/tmp); xorr(flag, box, tmp); // box == tmp if cas succeeds - jump(cont); + j(cont); } else { assert(LockingMode == LM_LIGHTWEIGHT, ""); Label slow; @@ -204,10 +204,10 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, // Indicate success on completion. mv(flag, zr); - jump(count); + j(count); bind(slow); mv(flag, 1); // Set non-zero flag to indicate 'failure' -> take slow path - jump(no_count); + j(no_count); } assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); @@ -237,7 +237,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, addi(disp_hdr, disp_hdr, -1); sd(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset())); mv(flag, zr); - jump(cont); + j(cont); bind(notRecursive); ld(flag, Address(tmp, ObjectMonitor::EntryList_offset())); @@ -284,7 +284,7 @@ void C2_MacroAssembler::string_indexof_char_short(Register str1, Register cnt1, ble(t0, cnt1, LOOP8); addi(t0, index, 4); ble(t0, cnt1, LOOP4); - jump(LOOP1); + j(LOOP1); bind(LOOP8); isL ? lbu(ch1, Address(str1, 0)) : lhu(ch1, Address(str1, 0)); @@ -306,7 +306,7 @@ void C2_MacroAssembler::string_indexof_char_short(Register str1, Register cnt1, addi(index, index, 8); addi(str1, str1, isL ? 8 : 16); blt(index, cnt1, LOOP); - jump(NOMATCH); + j(NOMATCH); bind(LOOP4); isL ? lbu(ch1, Address(str1, 0)) : lhu(ch1, Address(str1, 0)); @@ -327,31 +327,31 @@ void C2_MacroAssembler::string_indexof_char_short(Register str1, Register cnt1, addi(index, index, 1); addi(str1, str1, isL ? 1 : 2); blt(index, cnt1, LOOP1); - jump(NOMATCH); + j(NOMATCH); bind(MATCH1); addi(index, index, 1); - jump(MATCH); + j(MATCH); bind(MATCH2); addi(index, index, 2); - jump(MATCH); + j(MATCH); bind(MATCH3); addi(index, index, 3); - jump(MATCH); + j(MATCH); bind(MATCH4); addi(index, index, 4); - jump(MATCH); + j(MATCH); bind(MATCH5); addi(index, index, 5); - jump(MATCH); + j(MATCH); bind(MATCH6); addi(index, index, 6); - jump(MATCH); + j(MATCH); bind(MATCH7); addi(index, index, 7); @@ -385,7 +385,7 @@ void C2_MacroAssembler::string_indexof_char(Register str1, Register cnt1, addi(t0, cnt1, isL ? -32 : -16); bgtz(t0, DO_LONG); string_indexof_char_short(str1, cnt1, ch, result, isL); - jump(DONE); + j(DONE); bind(DO_LONG); mv(orig_cnt, cnt1); @@ -434,7 +434,7 @@ void C2_MacroAssembler::string_indexof_char(Register str1, Register cnt1, compute_match_mask(ch1, ch, match_mask, mask1, mask2); bnez(match_mask, HIT); bgtz(cnt1, CH1_LOOP); - jump(NOMATCH); + j(NOMATCH); bind(HIT); ctzc_bit(trailing_char, match_mask, isL, ch1, result); @@ -449,7 +449,7 @@ void C2_MacroAssembler::string_indexof_char(Register str1, Register cnt1, sub(result, orig_cnt, cnt1); add(result, result, trailing_char); - jump(DONE); + j(DONE); bind(NOMATCH); mv(result, -1); @@ -713,10 +713,10 @@ void C2_MacroAssembler::string_indexof(Register haystack, Register needle, load_long_misaligned(ch2, Address(result), ch1, isLL ? 1 : 2); // can use ch1 as temp register here as it will be trashed by next mv anyway mv(ch1, tmp6); if (isLL) { - jump(BMLOOPSTR1_AFTER_LOAD); + j(BMLOOPSTR1_AFTER_LOAD); } else { sub(nlen_tmp, nlen_tmp, 1); // no need to branch for UU/UL case. cnt1 >= 8 - jump(BMLOOPSTR1_CMP); + j(BMLOOPSTR1_CMP); } bind(BMLOOPSTR1); @@ -753,7 +753,7 @@ void C2_MacroAssembler::string_indexof(Register haystack, Register needle, shadd(haystack, result_tmp, haystack, result, haystack_chr_shift); ble(haystack, haystack_end, BMLOOPSTR2); add(sp, sp, ASIZE); - jump(NOMATCH); + j(NOMATCH); bind(BMLOOPSTR1_LASTCMP); bne(ch1, ch2, BMSKIP); @@ -764,7 +764,7 @@ void C2_MacroAssembler::string_indexof(Register haystack, Register needle, srli(result, result, 1); } add(sp, sp, ASIZE); - jump(DONE); + j(DONE); bind(LINEARSTUB); sub(t0, needle_len, 16); // small patterns still should be handled by simple algorithm @@ -787,11 +787,11 @@ void C2_MacroAssembler::string_indexof(Register haystack, Register needle, ciEnv::current()->record_failure("CodeCache is full"); return; } - jump(DONE); + j(DONE); bind(NOMATCH); mv(result, -1); - jump(DONE); + j(DONE); bind(LINEARSEARCH); string_indexof_linearscan(haystack, needle, haystack_len, needle_len, tmp1, tmp2, tmp3, tmp4, -1, result, ae); @@ -865,7 +865,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne bind(STR2_NEXT); add(hlen_neg, hlen_neg, haystack_chr_size); blez(hlen_neg, FIRST_LOOP); - jump(NOMATCH); + j(NOMATCH); bind(STR1_LOOP); add(nlen_tmp, nlen_neg, needle_chr_size); @@ -881,7 +881,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne add(nlen_tmp, nlen_tmp, needle_chr_size); add(hlen_tmp, hlen_tmp, haystack_chr_size); bltz(nlen_tmp, STR1_NEXT); - jump(MATCH); + j(MATCH); bind(DOSHORT); if (needle_isL == haystack_isL) { @@ -925,7 +925,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne beq(ch1, ch2, MATCH); add(hlen_neg, hlen_neg, haystack_chr_size); blez(hlen_neg, CH1_LOOP); - jump(NOMATCH); + j(NOMATCH); } if ((needle_con_cnt == -1 && needle_isL == haystack_isL) || needle_con_cnt == 2) { @@ -959,7 +959,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne beq(ch1, ch2, MATCH); add(hlen_neg, hlen_neg, haystack_chr_size); blez(hlen_neg, CH1_LOOP); - jump(NOMATCH); + j(NOMATCH); BLOCK_COMMENT("} string_indexof DO2"); } @@ -992,14 +992,14 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne bind(STR2_NEXT); add(hlen_neg, hlen_neg, haystack_chr_size); blez(hlen_neg, FIRST_LOOP); - jump(NOMATCH); + j(NOMATCH); bind(STR1_LOOP); add(hlen_tmp, hlen_neg, 2 * haystack_chr_size); add(ch2, haystack, hlen_tmp); (this->*haystack_load_1chr)(ch2, Address(ch2), noreg); bne(ch1, ch2, STR2_NEXT); - jump(MATCH); + j(MATCH); BLOCK_COMMENT("} string_indexof DO3"); } @@ -1025,7 +1025,7 @@ void C2_MacroAssembler::string_indexof_linearscan(Register haystack, Register ne bind(NOMATCH); mv(result, -1); - jump(DONE); + j(DONE); bind(MATCH); srai(t0, hlen_neg, haystack_chr_shift); @@ -1197,7 +1197,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, andi(tmp2, tmp2, 0xFFFF); } sub(result, tmp1, tmp2); - jump(DONE); + j(DONE); } bind(STUB); @@ -1225,7 +1225,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, ciEnv::current()->record_failure("CodeCache is full"); return; } - jump(DONE); + j(DONE); bind(SHORT_STRING); // Is the minimum length zero? @@ -1238,7 +1238,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, beqz(cnt2, SHORT_LAST_INIT); (this->*str2_load_chr)(cnt1, Address(str2), t0); addi(str2, str2, str2_chr_size); - jump(SHORT_LOOP_START); + j(SHORT_LOOP_START); bind(SHORT_LOOP); addi(cnt2, cnt2, -1); beqz(cnt2, SHORT_LAST); @@ -1256,15 +1256,15 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2, addi(str2, str2, str2_chr_size); beq(tmp2, t0, SHORT_LOOP); sub(result, tmp2, t0); - jump(DONE); + j(DONE); bind(SHORT_LOOP_TAIL); sub(result, tmp1, cnt1); - jump(DONE); + j(DONE); bind(SHORT_LAST2); beq(tmp2, t0, DONE); sub(result, tmp2, t0); - jump(DONE); + j(DONE); bind(SHORT_LAST_INIT); (this->*str2_load_chr)(cnt1, Address(str2), t0); addi(str2, str2, str2_chr_size); @@ -1327,14 +1327,14 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, add(a2, a2, 2 * wordSize); ble(cnt1, elem_per_word, TAIL2); } beq(tmp1, tmp2, NEXT_DWORD); - jump(DONE); + j(DONE); bind(TAIL); xorr(tmp4, tmp3, tmp4); xorr(tmp2, tmp1, tmp2); sll(tmp2, tmp2, tmp5); orr(tmp5, tmp4, tmp2); - jump(IS_TMP5_ZR); + j(IS_TMP5_ZR); bind(TAIL2); bne(tmp1, tmp2, DONE); @@ -1404,7 +1404,7 @@ void C2_MacroAssembler::string_equals(Register a1, Register a2, add(tmp2, a2, cnt1); ld(tmp2, Address(tmp2, 0)); bne(tmp1, tmp2, DONE); - jump(SAME); + j(SAME); } else { add(tmp1, cnt1, wordSize); beqz(tmp1, SAME); @@ -1678,7 +1678,7 @@ void C2_MacroAssembler::minmax_fp(FloatRegister dst, FloatRegister src1, FloatRe beqz(t0, Compare); is_double ? fadd_d(dst, src1, src2) : fadd_s(dst, src1, src2); - jump(Done); + j(Done); bind(Compare); if (is_double) { @@ -1747,7 +1747,7 @@ void C2_MacroAssembler::round_double_mode(FloatRegister dst, FloatRegister src, fcvt_d_l(dst, tmp1, rm); // Add sign of input value to result for +/- 0 cases fsgnj_d(dst, dst, src); - jump(done); + j(done); // If got conversion overflow return src bind(bad_val); @@ -1797,7 +1797,7 @@ static void float16_to_float_slow_path(C2_MacroAssembler& masm, C2GeneralStubcontinuation(), /* is_far */ true); ce->store_parameter(stub->pre_val()->as_register(), 0); __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) { @@ -345,7 +345,7 @@ void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarri __ beqz(new_val_reg, *stub->continuation(), /* is_far */ true); ce->store_parameter(stub->addr()->as_pointer_register(), 0); __ far_call(RuntimeAddress(bs->post_barrier_c1_runtime_code_blob()->code_begin())); - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } #undef __ @@ -388,7 +388,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* __ add(tmp, tmp, t1); __ load_parameter(0, t1); __ sd(t1, Address(tmp, 0)); - __ jump(done); + __ j(done); __ bind(runtime); __ push_call_clobbered_registers(); @@ -458,7 +458,7 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* __ ld(buffer_addr, buffer); __ add(t0, buffer_addr, t0); __ sd(card_addr, Address(t0, 0)); - __ jump(done); + __ j(done); __ bind(runtime); __ push_call_clobbered_registers(); diff --git a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp index aba3902b80ab5..1ac1fc18f1ee0 100644 --- a/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp @@ -310,7 +310,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo __ rt_call(StubRoutines::method_entry_barrier()); - __ jump(skip_barrier); + __ j(skip_barrier); __ bind(local_guard); @@ -319,7 +319,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slo __ bind(skip_barrier); } else { __ beq(t0, t1, *continuation); - __ jump(*slow_path); + __ j(*slow_path); __ bind(*continuation); } } diff --git a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp index 82264442c44cc..26d60441c2d2c 100644 --- a/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/shenandoah/shenandoahBarrierSetAssembler_riscv.cpp @@ -146,7 +146,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm, // Record the previous value __ sd(pre_val, Address(tmp1, 0)); - __ jump(done); + __ j(done); __ bind(runtime); // save the live input values @@ -527,7 +527,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, } else { __ mv(result, 1); } - __ jump(done); + __ j(done); __ bind(fail); if (is_cae) { @@ -563,7 +563,7 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, Shen __ beqz(pre_val_reg, *stub->continuation(), /* is_far */ true); ce->store_parameter(stub->pre_val()->as_register(), 0); __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin())); - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, @@ -615,7 +615,7 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble __ far_call(RuntimeAddress(bs->load_reference_barrier_phantom_rt_code_blob()->code_begin())); } - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } #undef __ @@ -655,7 +655,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss __ add(tmp, tmp, t1); __ load_parameter(0, t1); __ sd(t1, Address(tmp, 0)); - __ jump(done); + __ j(done); __ bind(runtime); __ push_call_clobbered_registers(); diff --git a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp index 5a54db43bc620..7306492970b81 100644 --- a/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/x/xBarrierSetAssembler_riscv.cpp @@ -344,7 +344,7 @@ void XBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, X } // Stub exit - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } #endif // COMPILER2 @@ -413,7 +413,7 @@ void XBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, } // Stub exit - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } #undef __ diff --git a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad b/src/hotspot/cpu/riscv/gc/x/x_riscv.ad index 7b31e6f28f461..3d0273109ace3 100644 --- a/src/hotspot/cpu/riscv/gc/x/x_riscv.ad +++ b/src/hotspot/cpu/riscv/gc/x/x_riscv.ad @@ -45,7 +45,7 @@ static void x_load_barrier(MacroAssembler& _masm, const MachNode* node, Address static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register ref, Register tmp) { XLoadBarrierStubC2* const stub = XLoadBarrierStubC2::create(node, ref_addr, ref, tmp, XLoadBarrierStrong); - __ jump(*stub->entry()); + __ j(*stub->entry()); __ bind(*stub->continuation()); } diff --git a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp index 3aa3abc35c1d9..d0a281442250e 100644 --- a/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/z/zBarrierSetAssembler_riscv.cpp @@ -154,7 +154,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, } // Slow-path has already uncolored - __ jump(done); + __ j(done); __ bind(uncolor); @@ -271,9 +271,9 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm, // The reason to end up in the medium path is that the pre-value was not 'good'. if (is_native) { - __ jump(slow_path); + __ j(slow_path); __ bind(slow_path_continuation); - __ jump(medium_path_continuation); + __ j(medium_path_continuation); } else if (is_atomic) { // Atomic accesses can get to the medium fast path because the value was a // raw null value. If it was not null, then there is no doubt we need to take a slow path. @@ -292,7 +292,7 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm, rtmp3); __ beqz(rtmp3, slow_path); __ bind(slow_path_continuation); - __ jump(medium_path_continuation); + __ j(medium_path_continuation); } else { // A non-atomic relocatable object wont't get to the medium fast path due to a // raw null in the young generation. We only get here because the field is bad. @@ -304,7 +304,7 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm, rtmp2, slow_path); __ bind(slow_path_continuation); - __ jump(medium_path_continuation); + __ j(medium_path_continuation); } } @@ -343,7 +343,7 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, Label slow_continuation; store_barrier_fast(masm, dst, val, tmp1, tmp2, false, false, medium, medium_continuation); - __ jump(done); + __ j(done); __ bind(medium); store_barrier_medium(masm, dst, @@ -364,7 +364,7 @@ void ZBarrierSetAssembler::store_at(MacroAssembler* masm, __ MacroAssembler::call_VM_leaf(ZBarrierSetRuntime::store_barrier_on_oop_field_without_healing_addr(), 1); } - __ jump(slow_continuation); + __ j(slow_continuation); __ bind(done); } @@ -468,7 +468,7 @@ static void copy_store_barrier(MacroAssembler* masm, __ beqz(tmp1, done); store_barrier_buffer_add(masm, src, tmp1, tmp2, slow); - __ jump(done); + __ j(done); __ bind(slow); { @@ -568,7 +568,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, // Resolve local handle __ ld(robj, robj); - __ jump(done); + __ j(done); __ bind(tagged); @@ -582,7 +582,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, __ ld(tmp, tmp); __ andr(tmp, robj, tmp); __ bnez(tmp, slowpath); - __ jump(uncolor); + __ j(uncolor); __ bind(weak_tagged); @@ -788,7 +788,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z } // Stub exit - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const { @@ -827,7 +827,7 @@ void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, } // Stub exit - __ jump(slow_continuation); + __ j(slow_continuation); } #undef __ @@ -876,7 +876,7 @@ void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce, Label good; check_color(ce, ref, on_non_strong); __ beqz(t0, good); - __ jump(*stub->entry()); + __ j(*stub->entry()); __ bind(good); z_uncolor(ce, ref); @@ -935,7 +935,7 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_stub(LIR_Assembler* ce, } // Stub exit - __ jump(*stub->continuation()); + __ j(*stub->continuation()); } #undef __ @@ -1029,7 +1029,7 @@ void ZBarrierSetAssembler::generate_c1_store_barrier_stub(LIR_Assembler* ce, __ addi(sp, sp, 16); // Stub exit - __ jump(slow_continuation); + __ j(slow_continuation); } #undef __ @@ -1067,7 +1067,7 @@ void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Registe // Uncolor presumed zpointer __ srli(obj, obj, ZPointerLoadShift); - __ jump(check_zaddress); + __ j(check_zaddress); __ bind(check_oop); diff --git a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad index da8bd0214949e..29de823402980 100644 --- a/src/hotspot/cpu/riscv/gc/z/z_riscv.ad +++ b/src/hotspot/cpu/riscv/gc/z/z_riscv.ad @@ -70,7 +70,7 @@ static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address Label good; check_color(_masm, ref, on_non_strong, tmp); __ beqz(tmp, good); - __ jump(*stub->entry()); + __ j(*stub->entry()); __ bind(good); z_uncolor(_masm, node, ref); diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp index 5e5744a6ce005..19d665bd421d0 100644 --- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp +++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp @@ -65,19 +65,19 @@ void InterpreterMacroAssembler::narrow(Register result) { bne(t0, t1, notBool); andi(result, result, 0x1); - jump(done); + j(done); bind(notBool); mv(t1, T_BYTE); bne(t0, t1, notByte); sign_extend(result, result, 8); - jump(done); + j(done); bind(notByte); mv(t1, T_CHAR); bne(t0, t1, notChar); zero_extend(result, result, 16); - jump(done); + j(done); bind(notChar); sign_extend(result, result, 16); @@ -88,7 +88,7 @@ void InterpreterMacroAssembler::narrow(Register result) { void InterpreterMacroAssembler::jump_to_entry(address entry) { assert(entry != nullptr, "Entry must have been generated by now"); - jump(entry); + j(entry); } void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread) { @@ -543,7 +543,7 @@ void InterpreterMacroAssembler::remove_activation( Label slow_path; Label fast_path; safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */); - jump(fast_path); + j(fast_path); bind(slow_path); push(state); @@ -599,7 +599,7 @@ void InterpreterMacroAssembler::remove_activation( call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); } - jump(unlocked); + j(unlocked); } bind(unlock); @@ -631,7 +631,7 @@ void InterpreterMacroAssembler::remove_activation( la(x9, monitor_block_bot); // points to word before bottom of // monitor block - jump(entry); + j(entry); // Entry already locked, need to throw exception bind(exception); @@ -658,7 +658,7 @@ void InterpreterMacroAssembler::remove_activation( new_illegal_monitor_state_exception)); } - jump(restart); + j(restart); } bind(loop); @@ -765,7 +765,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) if (LockingMode == LM_LIGHTWEIGHT) { ld(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes())); lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case); - jump(count); + j(count); } else if (LockingMode == LM_LEGACY) { // Load (object->mark() | 1) into swap_reg ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes())); @@ -809,7 +809,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); } - jump(done); + j(done); bind(count); increment(Address(xthread, JavaThread::held_monitor_count_offset())); @@ -879,7 +879,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) test_bit(t0, header_reg, exact_log2(markWord::monitor_value)); bnez(t0, slow_case); lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case); - jump(count); + j(count); bind(slow_case); } else if (LockingMode == LM_LEGACY) { @@ -898,7 +898,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) sd(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); - jump(done); + j(done); bind(count); decrement(Address(xthread, JavaThread::held_monitor_count_offset())); @@ -1187,7 +1187,7 @@ void InterpreterMacroAssembler::profile_virtual_call(Register receiver, Label not_null; // We are making a call. Increment the count for null receiver. increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); - jump(skip_receiver_profile); + j(skip_receiver_profile); bind(not_null); } @@ -1251,7 +1251,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg // The item is item[n]. Increment count[n]. int count_offset = in_bytes(item_count_offset_fn(row)); increment_mdp_data_at(mdp, count_offset); - jump(done); + j(done); bind(next_test); if (test_for_null_also) { @@ -1263,7 +1263,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg // Item did not match any saved item and there is no empty row for it. // Increment total counter to indicate polymorphic case. increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); - jump(done); + j(done); bind(found_null); break; } @@ -1289,7 +1289,7 @@ void InterpreterMacroAssembler::record_item_in_profile_helper(Register item, Reg mv(reg2, DataLayout::counter_increment); set_mdp_data_at(mdp, count_offset, reg2); if (start_row > 0) { - jump(done); + j(done); } } @@ -1376,7 +1376,7 @@ void InterpreterMacroAssembler::profile_ret(Register return_bci, Register mdp) { // The method data pointer needs to be updated to reflect the new target. update_mdp_by_offset(mdp, in_bytes(RetData::bci_displacement_offset(row))); - jump(profile_continue); + j(profile_continue); bind(next_test); } @@ -1557,7 +1557,7 @@ void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, lwu(tmp2, mask); andr(tmp1, tmp1, tmp2); bnez(tmp1, done); - jump(*where); // offset is too large so we have to use j instead of beqz here + j(*where); // offset is too large so we have to use j instead of beqz here bind(done); } @@ -1622,7 +1622,7 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md bnez(obj, update); orptr(mdo_addr, TypeEntries::null_seen, t0, tmp); - jump(next); + j(next); bind(update); load_klass(obj, obj); @@ -1652,7 +1652,7 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md // different than before. Cannot keep accurate profile. orptr(mdo_addr, TypeEntries::type_unknown, t0, tmp); - jump(next); + j(next); bind(none); // first time here. Set profile type. diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp index c1ad4d059deb9..71ce5c907c72a 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp @@ -569,7 +569,7 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp // Resolve local handle access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); verify_oop(value); - jump(done); + j(done); bind(tagged); // Test for jweak tag. @@ -581,7 +581,7 @@ void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); verify_oop(value); - jump(done); + j(done); bind(weak_tagged); // Resolve jweak. @@ -901,7 +901,7 @@ void MacroAssembler::jump_link_relocate(const Address &adr, Register ra_reg, Reg } } -void MacroAssembler::jump(const address dest, Register temp) { +void MacroAssembler::j(const address dest, Register temp) { assert(CodeCache::contains(dest), "Must be"); assert_cond(dest != nullptr); int64_t distance = dest - pc(); @@ -916,21 +916,21 @@ void MacroAssembler::jump(const address dest, Register temp) { } } -void MacroAssembler::jump(Label &lab, Register temp) { +void MacroAssembler::j(Label &lab, Register temp) { assert_different_registers(x0, temp); if (lab.is_bound()) { - MacroAssembler::jump(target(lab), temp); + MacroAssembler::j(target(lab), temp); } else { lab.add_patch_at(code(), locator()); - MacroAssembler::jump(pc(), temp); + MacroAssembler::j(pc(), temp); } } -void MacroAssembler::jump(const Address &adr, Register temp) { +void MacroAssembler::j(const Address &adr, Register temp) { switch (adr.getMode()) { case Address::literal: { relocate(adr.rspec(), [&] { - jump(adr.target(), temp); + j(adr.target(), temp); }); break; } @@ -970,7 +970,7 @@ void MacroAssembler::wrap_label(Register r1, Register r2, Label &L, if (is_far) { Label done; (this->*neg_insn)(r1, r2, done, /* is_far */ false); - jump(L); + j(L); bind(done); } else { if (L.is_bound()) { @@ -2612,7 +2612,7 @@ void MacroAssembler::lookup_interface_method_stub(Register recv_klass, beq(holder_klass, temp_itbl_klass, L_holder_found); bnez(temp_itbl_klass, L_search_holder); - jump(L_no_such_interface); + j(L_no_such_interface); // Loop: Look for resolved_class record in itable // while (true) { @@ -2637,7 +2637,7 @@ void MacroAssembler::lookup_interface_method_stub(Register recv_klass, beq(resolved_klass, temp_itbl_klass, L_resolved_found); bne(holder_klass, temp_itbl_klass, L_loop_search_resolved); mv(holder_offset, scan_temp); - jump(L_loop_search_resolved); + j(L_loop_search_resolved); // See if we already have a holder klass. If not, go and scan for it. bind(L_resolved_found); @@ -2756,7 +2756,7 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg store_conditional(tmp, newv, addr, int64, Assembler::rl); beqz(tmp, succeed); // Retry only when the store conditional failed - jump(retry_load); + j(retry_load); bind(nope); } @@ -2766,7 +2766,7 @@ void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Reg mv(oldv, tmp); if (fail != nullptr) { - jump(*fail); + j(*fail); } } @@ -2888,7 +2888,7 @@ void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected, if (result_as_bool) { mv(result, 1); - jump(done); + j(done); bind(fail); mv(result, zr); @@ -2949,7 +2949,7 @@ void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected, // Success mv(result, 1); - jump(done); + j(done); // Fail bind(fail); @@ -2994,7 +2994,7 @@ void MacroAssembler::cmpxchg(Register addr, Register expected, } else { mv(result, expected); } - jump(done); + j(done); // not equal, failed bind(ne_done); @@ -3029,7 +3029,7 @@ void MacroAssembler::cmpxchg_weak(Register addr, Register expected, // Success mv(result, 1); - jump(done); + j(done); // Fail bind(fail); @@ -3227,7 +3227,7 @@ void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, // Hacked jmp, which may only be used just before L_fallthrough. #define final_jmp(label) \ if (&(label) == &L_fallthrough) { /*do nothing*/ } \ - else jump(label) /*omit semi*/ + else j(label) /*omit semi*/ // If the pointers are equal, we are done (e.g., String[] elements). // This self-check enables sharing of secondary supertype arrays among @@ -3373,7 +3373,7 @@ void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, sd(super_klass, super_cache_addr); if (L_success != &L_fallthrough) { - jump(*L_success); + j(*L_success); } #undef IS_A_TEMP @@ -3829,7 +3829,7 @@ void MacroAssembler::mul_add(Register out, Register in, Register offset, sw(t0, Address(offset, 0)); srli(out, t0, 32); subw(len, len, 1); - jump(L_tail_loop); + j(L_tail_loop); bind(L_end); } @@ -3967,15 +3967,15 @@ void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register shadd(t0, kdx, z, t0, LogBytesPerInt); sd(product, Address(t0, 0)); - jump(L_first_loop); + j(L_first_loop); bind(L_one_y); lwu(y_idx, Address(y, 0)); - jump(L_multiply); + j(L_multiply); bind(L_one_x); lwu(x_xstart, Address(x, 0)); - jump(L_first_loop); + j(L_first_loop); bind(L_first_loop_exit); } @@ -4052,7 +4052,7 @@ void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, sd(tmp4, Address(tmp6, 0)); sd(tmp3, Address(tmp6, wordSize)); - jump(L_third_loop); + j(L_third_loop); bind(L_third_loop_exit); @@ -4193,7 +4193,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi shadd(t0, xstart, z, t0, LogBytesPerInt); sw(carry, Address(t0, 0)); - jump(L_second_loop_unaligned); + j(L_second_loop_unaligned); } bind(L_multiply_64_x_64_loop); @@ -4274,12 +4274,12 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi srli(carry, carry, 32); shadd(t0, tmp3, z, t0, LogBytesPerInt); sw(carry, Address(t0, 0)); - jump(L_second_loop_aligned); + j(L_second_loop_aligned); // Next infrequent code is moved outside loops. bind(L_last_x); lwu(product_hi, Address(x, 0)); - jump(L_third_loop_prologue); + j(L_third_loop_prologue); bind(L_done); } diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp index 3c64eeb1f52f8..4b5f6ea6dfe40 100644 --- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp @@ -594,9 +594,9 @@ class MacroAssembler: public Assembler { void jump_link_relocate(const Address &adr, Register ra_reg, Register temp); public: - void jump(const address dest, Register temp = t0); - void jump(Label &l, Register temp = t0); - void jump(const Address &adr, Register temp = t0); + void j(const address dest, Register temp = t0); + void j(Label &l, Register temp = t0); + void j(const Address &adr, Register temp = t0); void call(const address dest, Register temp = t0) { assert_cond(dest != nullptr); diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index c9f09b124e72b..10a80cd094024 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -2316,7 +2316,7 @@ encode %{ enc_class riscv_enc_j(label lbl) %{ C2_MacroAssembler _masm(&cbuf); Label* L = $lbl$$label; - __ jump(*L); + __ j(*L); %} enc_class riscv_enc_far_cmpULtGe_imm0_branch(cmpOpULtGe cmp, iRegIorL op1, label lbl) %{ @@ -2324,7 +2324,7 @@ encode %{ Label* L = $lbl$$label; switch ($cmp$$cmpcode) { case(BoolTest::ge): - __ jump(*L); + __ j(*L); break; case(BoolTest::lt): break; @@ -2351,7 +2351,7 @@ encode %{ __ mv(result_reg, zr); } else { __ mv(cr_reg, zr); - __ jump(done); + __ j(done); } __ bind(miss); @@ -9014,7 +9014,7 @@ instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2) Label Lsrc1, Ldone; __ ble(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1); __ mv(as_Register($dst$$reg), as_Register($src2$$reg)); - __ jump(Ldone); + __ j(Ldone); __ bind(Lsrc1); __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); __ bind(Ldone); @@ -9043,7 +9043,7 @@ instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2) Label Lsrc1, Ldone; __ bge(as_Register($src1$$reg), as_Register($src2$$reg), Lsrc1); __ mv(as_Register($dst$$reg), as_Register($src2$$reg)); - __ jump(Ldone); + __ j(Ldone); __ bind(Lsrc1); __ mv(as_Register($dst$$reg), as_Register($src1$$reg)); __ bind(Ldone); diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp index 12b3afd1f0b53..7435b552d15de 100644 --- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp +++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp @@ -972,7 +972,7 @@ static void gen_continuation_enter(MacroAssembler* masm, oop_maps->add_gc_map(__ pc() - start, map); __ post_call_nop(); - __ jump(exit); + __ j(exit); CodeBuffer* cbuf = masm->code_section()->outer(); address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call); @@ -1005,7 +1005,7 @@ static void gen_continuation_enter(MacroAssembler* masm, oop_maps->add_gc_map(__ pc() - start, map); __ post_call_nop(); - __ jump(exit); + __ j(exit); __ bind(call_thaw); @@ -1651,7 +1651,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ ld(obj_reg, Address(oop_handle_reg, 0)); if (LockingMode == LM_MONITOR) { - __ jump(slow_path_lock); + __ j(slow_path_lock); } else if (LockingMode == LM_LEGACY) { // Load (object->mark() | 1) into swap_reg % x10 __ ld(t0, Address(obj_reg, oopDesc::mark_offset_in_bytes())); @@ -1781,7 +1781,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ ld(t0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); __ bnez(t0, not_recursive); __ decrement(Address(xthread, JavaThread::held_monitor_count_offset())); - __ jump(done); + __ j(done); } __ bind(not_recursive); @@ -1792,7 +1792,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, } if (LockingMode == LM_MONITOR) { - __ jump(slow_path_unlock); + __ j(slow_path_unlock); } else if (LockingMode == LM_LEGACY) { // get address of the stack lock __ la(x10, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size)); @@ -1896,7 +1896,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ bind(L); } #endif - __ jump(lock_done); + __ j(lock_done); __ block_comment("} Slow path lock"); @@ -1933,7 +1933,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, if (ret_type == T_FLOAT || ret_type == T_DOUBLE) { restore_native_result(masm, ret_type, stack_slots); } - __ jump(unlock_done); + __ j(unlock_done); __ block_comment("} Slow path unlock"); @@ -1946,7 +1946,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, __ rt_call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)); restore_native_result(masm, ret_type, stack_slots); // and continue - __ jump(reguard_done); + __ j(reguard_done); // SLOW PATH safepoint { @@ -1966,7 +1966,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, // Restore any method result value restore_native_result(masm, ret_type, stack_slots); - __ jump(safepoint_in_progress_done); + __ j(safepoint_in_progress_done); __ block_comment("} safepoint"); } @@ -1984,7 +1984,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), xthread, c_rarg1); restore_args(masm, total_c_args, c_arg, out_regs); - __ jump(dtrace_method_entry_done); + __ j(dtrace_method_entry_done); __ block_comment("} dtrace entry"); } @@ -1997,7 +1997,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), xthread, c_rarg1); restore_native_result(masm, ret_type, stack_slots); - __ jump(dtrace_method_exit_done); + __ j(dtrace_method_exit_done); __ block_comment("} dtrace exit"); } @@ -2088,7 +2088,7 @@ void SharedRuntime::generate_deopt_blob() { // Normal deoptimization. Save exec mode for unpack_frames. __ mv(xcpool, Deoptimization::Unpack_deopt); // callee-saved - __ jump(cont); + __ j(cont); int reexecute_offset = __ pc() - start; #if INCLUDE_JVMCI && !defined(COMPILER1) @@ -2105,7 +2105,7 @@ void SharedRuntime::generate_deopt_blob() { (void) reg_saver.save_live_registers(masm, 0, &frame_size_in_words); __ mv(xcpool, Deoptimization::Unpack_reexecute); // callee-saved - __ jump(cont); + __ j(cont); #if INCLUDE_JVMCI Label after_fetch_unroll_info_call; @@ -2139,7 +2139,7 @@ void SharedRuntime::generate_deopt_blob() { __ reset_last_Java_frame(false); - __ jump(after_fetch_unroll_info_call); + __ j(after_fetch_unroll_info_call); } // EnableJVMCI #endif // INCLUDE_JVMCI diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index 7033fab4a4392..4bd33d08f8928 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -432,15 +432,15 @@ class StubGenerator: public StubCodeGenerator { __ BIND(is_long); __ sd(x10, Address(j_rarg2, 0)); - __ jump(exit); + __ j(exit); __ BIND(is_float); __ fsw(j_farg0, Address(j_rarg2, 0), t0); - __ jump(exit); + __ j(exit); __ BIND(is_double); __ fsd(j_farg0, Address(j_rarg2, 0), t0); - __ jump(exit); + __ j(exit); return start; } @@ -490,7 +490,7 @@ class StubGenerator: public StubCodeGenerator { // complete return to VM assert(StubRoutines::_call_stub_return_address != nullptr, "_call_stub_return_address must have been generated before"); - __ jump(StubRoutines::_call_stub_return_address); + __ j(StubRoutines::_call_stub_return_address); return start; } @@ -886,7 +886,7 @@ class StubGenerator: public StubCodeGenerator { __ bnez(cnt, loop_forward); if (is_backward) { - __ jump(done); + __ j(done); __ bind(loop_backward); __ sub(t0, cnt, vl); @@ -945,7 +945,7 @@ class StubGenerator: public StubCodeGenerator { __ bgez(t0, copy32_loop); __ addi(t0, cnt, -8); __ bgez(t0, copy8_loop, is_far); - __ jump(copy_small); + __ j(copy_small); } else { __ mv(t0, 16); __ blt(cnt, t0, copy_small, is_far); @@ -969,7 +969,7 @@ class StubGenerator: public StubCodeGenerator { } __ addi(cnt, cnt, -granularity); __ beqz(cnt, done, is_far); - __ jump(same_aligned); + __ j(same_aligned); __ bind(copy_big); __ mv(t0, 32); @@ -1060,7 +1060,7 @@ class StubGenerator: public StubCodeGenerator { __ decode_heap_oop(temp); // calls verify_oop } __ add(t1, t1, size); - __ jump(loop); + __ j(loop); __ bind(end); } @@ -1171,7 +1171,7 @@ class StubGenerator: public StubCodeGenerator { __ slli(t1, count, exact_log2(size)); Label L_continue; __ bltu(t0, t1, L_continue); - __ jump(nooverlap_target); + __ j(nooverlap_target); __ bind(L_continue); DecoratorSet decorators = IN_HEAP | IS_ARRAY; @@ -1543,7 +1543,7 @@ class StubGenerator: public StubCodeGenerator { // Copy from low to high addresses __ mv(start_to, to); // Save destination array start address - __ jump(L_load_element); + __ j(L_load_element); // ======== begin loop ======== // (Loop is rotated; its entry is L_load_element.) @@ -1671,17 +1671,17 @@ class StubGenerator: public StubCodeGenerator { __ beqz(t0, L_int_aligned); __ test_bit(t0, t0, 0); __ beqz(t0, L_short_aligned); - __ jump(RuntimeAddress(byte_copy_entry)); + __ j(RuntimeAddress(byte_copy_entry)); __ BIND(L_short_aligned); __ srli(count, count, LogBytesPerShort); // size => short_count - __ jump(RuntimeAddress(short_copy_entry)); + __ j(RuntimeAddress(short_copy_entry)); __ BIND(L_int_aligned); __ srli(count, count, LogBytesPerInt); // size => int_count - __ jump(RuntimeAddress(int_copy_entry)); + __ j(RuntimeAddress(int_copy_entry)); __ BIND(L_long_aligned); __ srli(count, count, LogBytesPerLong); // size => long_count - __ jump(RuntimeAddress(long_copy_entry)); + __ j(RuntimeAddress(long_copy_entry)); return start; } @@ -1862,13 +1862,13 @@ class StubGenerator: public StubCodeGenerator { __ add(from, src, src_pos); // src_addr __ add(to, dst, dst_pos); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ jump(RuntimeAddress(byte_copy_entry)); + __ j(RuntimeAddress(byte_copy_entry)); __ BIND(L_copy_shorts); __ shadd(from, src_pos, src, t0, 1); // src_addr __ shadd(to, dst_pos, dst, t0, 1); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ jump(RuntimeAddress(short_copy_entry)); + __ j(RuntimeAddress(short_copy_entry)); __ BIND(L_copy_ints); __ test_bit(t0, x30_elsize, 0); @@ -1876,7 +1876,7 @@ class StubGenerator: public StubCodeGenerator { __ shadd(from, src_pos, src, t0, 2); // src_addr __ shadd(to, dst_pos, dst, t0, 2); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ jump(RuntimeAddress(int_copy_entry)); + __ j(RuntimeAddress(int_copy_entry)); __ BIND(L_copy_longs); #ifdef ASSERT @@ -1895,7 +1895,7 @@ class StubGenerator: public StubCodeGenerator { __ shadd(from, src_pos, src, t0, 3); // src_addr __ shadd(to, dst_pos, dst, t0, 3); // dst_addr __ sign_extend(count, scratch_length, 32); // length - __ jump(RuntimeAddress(long_copy_entry)); + __ j(RuntimeAddress(long_copy_entry)); // ObjArrayKlass __ BIND(L_objArray); @@ -1916,7 +1916,7 @@ class StubGenerator: public StubCodeGenerator { __ add(to, to, arrayOopDesc::base_offset_in_bytes(T_OBJECT)); __ sign_extend(count, scratch_length, 32); // length __ BIND(L_plain_copy); - __ jump(RuntimeAddress(oop_copy_entry)); + __ j(RuntimeAddress(oop_copy_entry)); __ BIND(L_checkcast_copy); // live at this point: scratch_src_klass, scratch_length, t2 (dst_klass) @@ -1958,7 +1958,7 @@ class StubGenerator: public StubCodeGenerator { assert(c_rarg3 == sco_temp, "#3 already in place"); // Set up arguments for checkcast_copy_entry. __ mv(c_rarg4, dst_klass); // dst.klass.element_klass - __ jump(RuntimeAddress(checkcast_copy_entry)); + __ j(RuntimeAddress(checkcast_copy_entry)); } __ BIND(L_failed); @@ -2378,7 +2378,7 @@ class StubGenerator: public StubCodeGenerator { __ mv(tmpL, tmp3); __ xorr(tmp3, tmpU, tmpL); __ bnez(tmp3, CALCULATE_DIFFERENCE); - __ jump(DONE); // no character left + __ j(DONE); // no character left // Find the first different characters in the longwords and // compute their difference. @@ -2521,7 +2521,7 @@ class StubGenerator: public StubCodeGenerator { __ andi(cnt1, cnt1, 0xFFFF); } __ sub(result, tmp5, cnt1); - __ jump(LENGTH_DIFF); + __ j(LENGTH_DIFF); __ bind(DIFF); __ ctzc_bit(tmp3, tmp4, isLL); // count zero from lsb to msb __ srl(tmp1, tmp1, tmp3); @@ -2534,7 +2534,7 @@ class StubGenerator: public StubCodeGenerator { __ andi(tmp2, tmp2, 0xFFFF); } __ sub(result, tmp1, tmp2); - __ jump(LENGTH_DIFF); + __ j(LENGTH_DIFF); __ bind(LAST_CHECK_AND_LENGTH_DIFF); __ xorr(tmp4, tmp1, tmp2); __ bnez(tmp4, DIFF); @@ -2646,7 +2646,7 @@ class StubGenerator: public StubCodeGenerator { __ sub(match_mask, ch2, mask1); __ orr(ch2, ch2, mask2); __ mv(trailing_zeros, -1); // all bits set - __ jump(L_SMALL_PROCEED); + __ j(L_SMALL_PROCEED); __ align(OptoLoopAlignment); __ bind(L_SMALL); @@ -2691,18 +2691,18 @@ class StubGenerator: public StubCodeGenerator { __ addi(trailing_zeros, trailing_zeros, haystack_isL ? 7 : 15); __ add(result, result, 1); __ add(haystack, haystack, haystack_chr_size); - __ jump(L_SMALL_HAS_ZERO_LOOP); + __ j(L_SMALL_HAS_ZERO_LOOP); __ align(OptoLoopAlignment); __ bind(L_SMALL_CMP_LOOP_LAST_CMP); __ bne(first, ch2, L_SMALL_CMP_LOOP_NOMATCH); - __ jump(DONE); + __ j(DONE); __ align(OptoLoopAlignment); __ bind(L_SMALL_CMP_LOOP_LAST_CMP2); __ compute_index(haystack, trailing_zeros, match_mask, result, ch2, tmp, haystack_isL); __ bne(ch1, ch2, L_SMALL_CMP_LOOP_NOMATCH); - __ jump(DONE); + __ j(DONE); __ align(OptoLoopAlignment); __ bind(L_HAS_ZERO); @@ -2738,19 +2738,19 @@ class StubGenerator: public StubCodeGenerator { __ ctzc_bit(trailing_zeros, match_mask, haystack_isL, needle_len, ch2); // find next "first" char index __ addi(trailing_zeros, trailing_zeros, haystack_isL ? 7 : 15); __ add(haystack, haystack, haystack_chr_size); - __ jump(L_HAS_ZERO_LOOP); + __ j(L_HAS_ZERO_LOOP); __ align(OptoLoopAlignment); __ bind(L_CMP_LOOP_LAST_CMP); __ bne(needle_len, ch2, L_CMP_LOOP_NOMATCH); - __ jump(DONE); + __ j(DONE); __ align(OptoLoopAlignment); __ bind(L_CMP_LOOP_LAST_CMP2); __ compute_index(haystack, trailing_zeros, match_mask, result, ch2, tmp, haystack_isL); __ add(result, result, 1); __ bne(ch1, ch2, L_CMP_LOOP_NOMATCH); - __ jump(DONE); + __ j(DONE); __ align(OptoLoopAlignment); __ bind(L_HAS_ZERO_LOOP_NOMATCH); @@ -2770,7 +2770,7 @@ class StubGenerator: public StubCodeGenerator { __ slli(tmp, match_mask, haystack_chr_shift); __ sub(haystack, haystack, tmp); __ sign_extend(haystack_len, haystack_len, 32); - __ jump(L_LOOP_PROCEED); + __ j(L_LOOP_PROCEED); __ align(OptoLoopAlignment); __ bind(NOMATCH); diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp index b90728917ab3c..2fa3e2a9953fe 100644 --- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp @@ -121,7 +121,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() { __ test_bit(t0, c_rarg3, i); __ bnez(t0, d); __ flw(r, Address(sp, (10 + i) * wordSize)); - __ jump(done); + __ j(done); __ bind(d); __ fld(r, Address(sp, (10 + i) * wordSize)); __ bind(done); @@ -421,7 +421,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common( c_rarg1, c_rarg2); } // throw exception - __ jump(address(Interpreter::throw_exception_entry())); + __ j(address(Interpreter::throw_exception_entry())); return entry; } @@ -564,7 +564,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) { in_bytes(InvocationCounter::counter_offset())); const Address mask(x10, in_bytes(MethodData::invoke_mask_offset())); __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, t0, t1, false, overflow); - __ jump(done); + __ j(done); } __ bind(no_mdo); // Increment counter in MethodCounters @@ -581,7 +581,7 @@ void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) __ mv(c_rarg1, zr); __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), c_rarg1); - __ jump(do_continue); + __ j(do_continue); } // See if we've got enough room on the stack for locals plus overhead @@ -1745,10 +1745,10 @@ void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& vep) { assert(t != nullptr && t->is_valid() && t->tos_in() == vtos, "illegal template"); Label L; - aep = __ pc(); __ push_ptr(); __ jump(L); - fep = __ pc(); __ push_f(); __ jump(L); - dep = __ pc(); __ push_d(); __ jump(L); - lep = __ pc(); __ push_l(); __ jump(L); + aep = __ pc(); __ push_ptr(); __ j(L); + fep = __ pc(); __ push_f(); __ j(L); + dep = __ pc(); __ push_d(); __ j(L); + lep = __ pc(); __ push_l(); __ j(L); bep = cep = sep = iep = __ pc(); __ push_i(); vep = __ pc(); diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp index 227fe36b0db69..58f57f32b2f65 100644 --- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp +++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp @@ -201,7 +201,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, __ bnez(temp_reg, L_fast_patch); // Let breakpoint table handling rewrite to quicker bytecode __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), xmethod, xbcp, bc_reg); - __ jump(L_patch_done); + __ j(L_patch_done); __ bind(L_fast_patch); } @@ -342,7 +342,7 @@ void TemplateTable::ldc(LdcType type) { call_VM(x10, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), c_rarg1); __ push_ptr(x10); __ verify_oop(x10); - __ jump(Done); + __ j(Done); __ bind(notClass); __ mv(t1, (u1)JVM_CONSTANT_Float); @@ -352,7 +352,7 @@ void TemplateTable::ldc(LdcType type) { __ shadd(x11, x11, x12, x11, 3); __ flw(f10, Address(x11, base_offset)); __ push_f(f10); - __ jump(Done); + __ j(Done); __ bind(notFloat); @@ -363,7 +363,7 @@ void TemplateTable::ldc(LdcType type) { __ shadd(x11, x11, x12, x11, 3); __ lw(x10, Address(x11, base_offset)); __ push_i(x10); - __ jump(Done); + __ j(Done); __ bind(notInt); condy_helper(Done); @@ -439,7 +439,7 @@ void TemplateTable::ldc2_w() { __ shadd(x12, x10, x11, x12, 3); __ fld(f10, Address(x12, base_offset)); __ push_d(f10); - __ jump(Done); + __ j(Done); __ bind(notDouble); __ mv(t1, (int)JVM_CONSTANT_Long); @@ -449,7 +449,7 @@ void TemplateTable::ldc2_w() { __ shadd(x10, x10, x11, x10, 3); __ ld(x10, Address(x10, base_offset)); __ push_l(x10); - __ jump(Done); + __ j(Done); __ bind(notLong); condy_helper(Done); @@ -491,7 +491,7 @@ void TemplateTable::condy_helper(Label& Done) { // itos __ lw(x10, field); __ push(itos); - __ jump(Done); + __ j(Done); __ bind(notInt); __ mv(t1, ftos); @@ -499,7 +499,7 @@ void TemplateTable::condy_helper(Label& Done) { // ftos __ load_float(field); __ push(ftos); - __ jump(Done); + __ j(Done); __ bind(notFloat); __ mv(t1, stos); @@ -507,7 +507,7 @@ void TemplateTable::condy_helper(Label& Done) { // stos __ load_signed_short(x10, field); __ push(stos); - __ jump(Done); + __ j(Done); __ bind(notShort); __ mv(t1, btos); @@ -515,7 +515,7 @@ void TemplateTable::condy_helper(Label& Done) { // btos __ load_signed_byte(x10, field); __ push(btos); - __ jump(Done); + __ j(Done); __ bind(notByte); __ mv(t1, ctos); @@ -523,7 +523,7 @@ void TemplateTable::condy_helper(Label& Done) { // ctos __ load_unsigned_short(x10, field); __ push(ctos); - __ jump(Done); + __ j(Done); __ bind(notChar); __ mv(t1, ztos); @@ -531,7 +531,7 @@ void TemplateTable::condy_helper(Label& Done) { // ztos __ load_signed_byte(x10, field); __ push(ztos); - __ jump(Done); + __ j(Done); __ bind(notBool); break; @@ -544,7 +544,7 @@ void TemplateTable::condy_helper(Label& Done) { // ltos __ ld(x10, field); __ push(ltos); - __ jump(Done); + __ j(Done); __ bind(notLong); __ mv(t1, dtos); @@ -552,7 +552,7 @@ void TemplateTable::condy_helper(Label& Done) { // dtos __ load_double(field); __ push(dtos); - __ jump(Done); + __ j(Done); __ bind(notDouble); break; @@ -1086,7 +1086,7 @@ void TemplateTable::aastore() { // Come here on failure // object is at TOS - __ jump(Interpreter::_throw_ArrayStoreException_entry); + __ j(Interpreter::_throw_ArrayStoreException_entry); // Come here on success __ bind(ok_is_subtype); @@ -1095,7 +1095,7 @@ void TemplateTable::aastore() { __ ld(x10, at_tos()); // Now store using the appropriate barrier do_oop_store(_masm, element_address, x10, IS_ARRAY); - __ jump(done); + __ j(done); // Have a null in x10, x13=array, x12=index. Store null at ary[idx] __ bind(is_null); @@ -1705,7 +1705,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) { __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, x10, t0, false, UseOnStackReplacement ? &backedge_counter_overflow : &dispatch); - __ jump(dispatch); + __ j(dispatch); } __ bind(no_mdo); // Increment backedge counter in MethodCounters* @@ -1923,7 +1923,7 @@ void TemplateTable::tableswitch() { __ bind(default_case); __ profile_switch_default(x10); __ lwu(x13, Address(x11, 0)); - __ jump(continue_execution); + __ j(continue_execution); } void TemplateTable::lookupswitch() { @@ -1944,7 +1944,7 @@ void TemplateTable::fast_linearswitch() { // set counter __ lwu(x11, Address(x9, BytesPerInt)); __ revb_w(x11, x11); - __ jump(loop_entry); + __ j(loop_entry); // table search __ bind(loop); __ shadd(t0, x11, x9, t0, 3); @@ -1956,7 +1956,7 @@ void TemplateTable::fast_linearswitch() { // default case __ profile_switch_default(x10); __ lwu(x13, Address(x9, 0)); - __ jump(continue_execution); + __ j(continue_execution); // entry found -> get offset __ bind(found); __ shadd(t0, x11, x9, t0, 3); @@ -2021,7 +2021,7 @@ void TemplateTable::fast_binaryswitch() { // And start Label entry; - __ jump(entry); + __ j(entry); // binary search loop { @@ -2041,7 +2041,7 @@ void TemplateTable::fast_binaryswitch() { __ bge(key, temp, L_greater); // if [key < array[h].fast_match()] then j = h __ mv(j, h); - __ jump(L_done); + __ j(L_done); __ bind(L_greater); // if [key >= array[h].fast_match()] then i = h __ mv(i, h); @@ -2351,11 +2351,11 @@ void TemplateTable::load_resolved_method_entry_interface(Register cache, __ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift); __ beqz(t0, NotVFinal); __ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); - __ jump(Done); + __ j(Done); __ bind(NotVFinal); __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); - __ jump(Done); + __ j(Done); __ bind(NotVirtual); __ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); @@ -2379,7 +2379,7 @@ void TemplateTable::load_resolved_method_entry_virtual(Register cache, __ test_bit(t0, flags, ResolvedMethodEntry::is_vfinal_shift); __ beqz(t0, NotVFinal); __ ld(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::method_offset()))); - __ jump(Done); + __ j(Done); __ bind(NotVFinal); __ load_unsigned_short(method_or_table_index, Address(cache, in_bytes(ResolvedMethodEntry::table_index_offset()))); @@ -2540,7 +2540,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_bgetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notByte); __ sub(t0, tos_state, (u1)ztos); @@ -2554,7 +2554,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr // uses btos rewriting, no truncating to t/f bit is needed for getfield patch_bytecode(Bytecodes::_fast_bgetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notBool); __ sub(t0, tos_state, (u1)atos); @@ -2565,7 +2565,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_agetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notObj); __ sub(t0, tos_state, (u1)itos); @@ -2578,7 +2578,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_igetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notInt); __ sub(t0, tos_state, (u1)ctos); @@ -2590,7 +2590,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_cgetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notChar); __ sub(t0, tos_state, (u1)stos); @@ -2602,7 +2602,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_sgetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notShort); __ sub(t0, tos_state, (u1)ltos); @@ -2614,7 +2614,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_lgetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notLong); __ sub(t0, tos_state, (u1)ftos); @@ -2626,7 +2626,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_fgetfield, bc, x11); } - __ jump(Done); + __ j(Done); __ bind(notFloat); #ifdef ASSERT @@ -2641,7 +2641,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr patch_bytecode(Bytecodes::_fast_dgetfield, bc, x11); } #ifdef ASSERT - __ jump(Done); + __ j(Done); __ bind(notDouble); __ stop("Bad state"); @@ -2772,7 +2772,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_bputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notByte); @@ -2792,7 +2792,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_zputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notBool); @@ -2813,7 +2813,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_aputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notObj); @@ -2833,7 +2833,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_iputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notInt); @@ -2853,7 +2853,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_cputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notChar); @@ -2873,7 +2873,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_sputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notShort); @@ -2893,7 +2893,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_lputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notLong); @@ -2913,7 +2913,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr if (rc == may_rewrite) { patch_bytecode(Bytecodes::_fast_fputfield, bc, x11, true, byte_no); } - __ jump(Done); + __ j(Done); } __ bind(notFloat); @@ -2938,7 +2938,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr } #ifdef ASSERT - __ jump(Done); + __ j(Done); __ bind(notDouble); __ stop("Bad state"); @@ -3391,7 +3391,7 @@ void TemplateTable::invokeinterface(int byte_no) { Label subtype; __ check_klass_subtype(x13, x10, x14, subtype); // If we get here the typecheck failed - __ jump(no_such_interface); + __ j(no_such_interface); __ bind(subtype); __ profile_final_call(x10); @@ -3571,7 +3571,7 @@ void TemplateTable::_new() { if (ZeroTLAB) { // the fields have been already cleared - __ jump(initialize_header); + __ j(initialize_header); } // The object is initialized before the header. If the object size is @@ -3604,7 +3604,7 @@ void TemplateTable::_new() { __ call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast(SharedRuntime::dtrace_object_alloc)), x10); __ pop(atos); // restore the return value } - __ jump(done); + __ j(done); } // slow case @@ -3668,7 +3668,7 @@ void TemplateTable::checkcast() { // vm_result_2 has metadata result __ get_vm_result_2(x10, xthread); __ pop_reg(x13); // restore receiver - __ jump(resolved); + __ j(resolved); // Get superklass in x10 and subklass in x13 __ bind(quicked); @@ -3685,7 +3685,7 @@ void TemplateTable::checkcast() { // Come here on failure __ push_reg(x13); // object is at TOS - __ jump(Interpreter::_throw_ClassCastException_entry); + __ j(Interpreter::_throw_ClassCastException_entry); // Come here on success __ bind(ok_is_subtype); @@ -3693,7 +3693,7 @@ void TemplateTable::checkcast() { // Collect counts on whether this test sees nulls a lot or not. if (ProfileInterpreter) { - __ jump(done); + __ j(done); __ bind(is_null); __ profile_null_seen(x12); } else { @@ -3726,7 +3726,7 @@ void TemplateTable::instanceof() { __ pop_reg(x13); // restore receiver __ verify_oop(x13); __ load_klass(x13, x13); - __ jump(resolved); + __ j(resolved); // Get superklass in x10 and subklass in x13 __ bind(quicked); @@ -3741,14 +3741,14 @@ void TemplateTable::instanceof() { // Come here on failure __ mv(x10, zr); - __ jump(done); + __ j(done); // Come here on success __ bind(ok_is_subtype); __ mv(x10, 1); // Collect counts on whether this test sees nulls a lot or not. if (ProfileInterpreter) { - __ jump(done); + __ j(done); __ bind(is_null); __ profile_null_seen(x12); } else { @@ -3793,7 +3793,7 @@ void TemplateTable::_breakpoint() { void TemplateTable::athrow() { transition(atos, vtos); __ null_check(x10); - __ jump(Interpreter::throw_exception_entry()); + __ j(Interpreter::throw_exception_entry()); } //----------------------------------------------------------------------------- @@ -3840,7 +3840,7 @@ void TemplateTable::monitorenter() { __ la(c_rarg2, monitor_block_bot); // points to word before bottom - __ jump(entry); + __ j(entry); __ bind(loop); // check if current entry is used @@ -3886,7 +3886,7 @@ void TemplateTable::monitorenter() { __ srai(t0, t0, Interpreter::logStackElementSize); __ sd(t0, monitor_block_bot); // set new monitor block bottom - __ jump(entry); + __ j(entry); // 2. move expression stack contents __ bind(loop); __ ld(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack @@ -3944,7 +3944,7 @@ void TemplateTable::monitorexit() { __ la(c_rarg2, monitor_block_bot); // points to word before bottom // of monitor block - __ jump(entry); + __ j(entry); __ bind(loop); // check if current entry is for same object