Skip to content

Commit

Permalink
Merge
Browse files Browse the repository at this point in the history
  • Loading branch information
Rob McKenna committed Jan 24, 2025
2 parents 44000a9 + 53aa9f2 commit 268ba3c
Show file tree
Hide file tree
Showing 318 changed files with 13,028 additions and 2,435 deletions.
14 changes: 13 additions & 1 deletion src/hotspot/cpu/aarch64/compressedKlass_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,14 @@
*/

#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/macroAssembler.hpp"
#include "logging/log.hpp"
#include "oops/compressedKlass.hpp"
#include "memory/metaspace.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/formatBuffer.hpp"

// Helper function; reserve at an address that is compatible with EOR
static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
Expand Down Expand Up @@ -79,6 +81,7 @@ static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
}
return result;
}

char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size_t size, bool aslr, bool optimize_for_zero_base) {

char* result = nullptr;
Expand Down Expand Up @@ -117,3 +120,12 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size

return result;
}

bool CompressedKlassPointers::check_klass_decode_mode(address base, int shift, const size_t range) {
return MacroAssembler::check_klass_decode_mode(base, shift, range);
}

bool CompressedKlassPointers::set_klass_decode_mode() {
const size_t range = klass_range_end() - base();
return MacroAssembler::set_klass_decode_mode(_base, _shift, range);
}
43 changes: 29 additions & 14 deletions src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5291,32 +5291,47 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone);

MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() {
assert(UseCompressedClassPointers, "not using compressed class pointers");
assert(Metaspace::initialized(), "metaspace not initialized yet");
assert(_klass_decode_mode != KlassDecodeNone, "should be initialized");
return _klass_decode_mode;
}

if (_klass_decode_mode != KlassDecodeNone) {
return _klass_decode_mode;
}
MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) {
assert(UseCompressedClassPointers, "not using compressed class pointers");

// KlassDecodeMode shouldn't be set already.
assert(_klass_decode_mode == KlassDecodeNone, "set once");

if (CompressedKlassPointers::base() == nullptr) {
return (_klass_decode_mode = KlassDecodeZero);
if (base == nullptr) {
return KlassDecodeZero;
}

if (operand_valid_for_logical_immediate(
/*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
const size_t range = CompressedKlassPointers::klass_range_end() - CompressedKlassPointers::base();
/*is32*/false, (uint64_t)base)) {
const uint64_t range_mask = right_n_bits(log2i_ceil(range));
if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
return (_klass_decode_mode = KlassDecodeXor);
if (((uint64_t)base & range_mask) == 0) {
return KlassDecodeXor;
}
}

const uint64_t shifted_base =
(uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift();
guarantee((shifted_base & 0xffff0000ffffffff) == 0,
"compressed class base bad alignment");
(uint64_t)base >> shift;
if ((shifted_base & 0xffff0000ffffffff) == 0) {
return KlassDecodeMovk;
}

// No valid encoding.
return KlassDecodeNone;
}

// Check if one of the above decoding modes will work for given base, shift and range.
bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) {
return klass_decode_mode(base, shift, range) != KlassDecodeNone;
}

return (_klass_decode_mode = KlassDecodeMovk);
bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) {
_klass_decode_mode = klass_decode_mode(base, shift, range);
return _klass_decode_mode != KlassDecodeNone;
}

void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
Expand Down
13 changes: 12 additions & 1 deletion src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,22 @@ class MacroAssembler: public Assembler {
KlassDecodeMovk
};

KlassDecodeMode klass_decode_mode();
// Calculate decoding mode based on given parameters, used for checking then ultimately setting.
static KlassDecodeMode klass_decode_mode(address base, int shift, const size_t range);

private:
static KlassDecodeMode _klass_decode_mode;

// Returns above setting with asserts
static KlassDecodeMode klass_decode_mode();

public:
// Checks the decode mode and returns false if not compatible with preferred decoding mode.
static bool check_klass_decode_mode(address base, int shift, const size_t range);

// Sets the decode mode and returns false if cannot be set.
static bool set_klass_decode_mode(address base, int shift, const size_t range);

public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {}

Expand Down
14 changes: 7 additions & 7 deletions src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dsin());
}
__ call(fn);
__ rt_call(fn);
__ mv(ra, x9);
break;
case Interpreter::java_lang_math_cos :
Expand All @@ -202,7 +202,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dcos());
}
__ call(fn);
__ rt_call(fn);
__ mv(ra, x9);
break;
case Interpreter::java_lang_math_tan :
Expand All @@ -215,7 +215,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dtan());
}
__ call(fn);
__ rt_call(fn);
__ mv(ra, x9);
break;
case Interpreter::java_lang_math_log :
Expand All @@ -228,7 +228,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog());
}
__ call(fn);
__ rt_call(fn);
__ mv(ra, x9);
break;
case Interpreter::java_lang_math_log10 :
Expand All @@ -241,7 +241,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dlog10());
}
__ call(fn);
__ rt_call(fn);
__ mv(ra, x9);
break;
case Interpreter::java_lang_math_exp :
Expand All @@ -254,7 +254,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dexp());
}
__ call(fn);
__ rt_call(fn);
__ mv(ra, x9);
break;
case Interpreter::java_lang_math_pow :
Expand All @@ -268,7 +268,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
} else {
fn = CAST_FROM_FN_PTR(address, StubRoutines::dpow());
}
__ call(fn);
__ rt_call(fn);
__ mv(ra, x9);
break;
case Interpreter::java_lang_math_fmaD :
Expand Down
8 changes: 4 additions & 4 deletions src/hotspot/cpu/riscv/vm_version_riscv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,10 +154,6 @@ void VM_Version::common_initialize() {
unaligned_access.value() != MISALIGNED_FAST);
}

if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_DEFAULT(AlignVector, AvoidUnalignedAccesses);
}

// See JDK-8026049
// This machine has fast unaligned memory accesses
if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
Expand Down Expand Up @@ -440,6 +436,10 @@ void VM_Version::c2_initialize() {
warning("AES/CTR intrinsics are not available on this CPU");
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}

if (FLAG_IS_DEFAULT(AlignVector)) {
FLAG_SET_DEFAULT(AlignVector, AvoidUnalignedAccesses);
}
}

#endif // COMPILER2
Expand Down
12 changes: 6 additions & 6 deletions src/hotspot/cpu/s390/vm_version_s390.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,12 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
}

// The OptoScheduling information is not maintained in s390.ad.
if (OptoScheduling) {
warning("OptoScheduling is not supported on this CPU.");
FLAG_SET_DEFAULT(OptoScheduling, false);
}
#endif
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
Expand All @@ -323,12 +329,6 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
}

// The OptoScheduling information is not maintained in s390.ad.
if (OptoScheduling) {
warning("OptoScheduling is not supported on this CPU.");
FLAG_SET_DEFAULT(OptoScheduling, false);
}
}


Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/cpu/x86/stubRoutines_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ enum platform_dependent_constants {
// AVX512 intrinsics add more code in 64-bit VM,
// Windows have more code to save/restore registers
_compiler_stubs_code_size = 20000 LP64_ONLY(+47000) WINDOWS_ONLY(+2000),
_final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+2000) ZGC_ONLY(+20000)
_final_stubs_code_size = 10000 LP64_ONLY(+20000) WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)
};

class x86 {
Expand Down
22 changes: 17 additions & 5 deletions src/hotspot/share/cds/metaspaceShared.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,16 @@ size_t MetaspaceShared::core_region_alignment() {
}

static bool shared_base_valid(char* shared_base) {
// We check user input for SharedBaseAddress at dump time. We must weed out values
// we already know to be invalid later.
// We check user input for SharedBaseAddress at dump time.

// At CDS runtime, "shared_base" will be the (attempted) mapping start. It will also
// be the encoding base, since the the headers of archived base objects (and with Lilliput,
// the prototype mark words) carry pre-computed narrow Klass IDs that refer to the mapping
// start as base.
//
// Therefore, "shared_base" must be later usable as encoding base.
// On AARCH64, The "shared_base" may not be later usable as encoding base, depending on the
// total size of the reserved area and the precomputed_narrow_klass_shift. This is checked
// before reserving memory. Here we weed out values already known to be invalid later.
return AARCH64_ONLY(is_aligned(shared_base, 4 * G)) NOT_AARCH64(true);
}

Expand Down Expand Up @@ -985,8 +986,10 @@ bool MetaspaceShared::try_link_class(JavaThread* current, InstanceKlass* ik) {
ik->external_name());
CLEAR_PENDING_EXCEPTION;
SystemDictionaryShared::set_class_has_failed_verification(ik);
} else {
assert(!SystemDictionaryShared::has_class_failed_verification(ik), "sanity");
ik->compute_has_loops_flag_for_methods();
}
ik->compute_has_loops_flag_for_methods();
BytecodeVerificationLocal = saved;
return true;
} else {
Expand Down Expand Up @@ -1486,6 +1489,15 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
const size_t total_range_size =
archive_space_size + gap_size + class_space_size;

// Test that class space base address plus shift can be decoded by aarch64, when restored.
const int precomputed_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
if (!CompressedKlassPointers::check_klass_decode_mode(base_address, precomputed_narrow_klass_shift,
total_range_size)) {
log_info(cds)("CDS initialization: Cannot use SharedBaseAddress " PTR_FORMAT " with precomputed shift %d.",
p2i(base_address), precomputed_narrow_klass_shift);
use_archive_base_addr = false;
}

assert(total_range_size > ccs_begin_offset, "must be");
if (use_windows_memory_mapping() && use_archive_base_addr) {
if (base_address != nullptr) {
Expand Down Expand Up @@ -1525,7 +1537,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
}

// Paranoid checks:
assert(base_address == nullptr || (address)total_space_rs.base() == base_address,
assert(!use_archive_base_addr || (address)total_space_rs.base() == base_address,
"Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_space_rs.base()));
assert(is_aligned(total_space_rs.base(), base_address_alignment), "Sanity");
assert(total_space_rs.size() == total_range_size, "Sanity");
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/code/codeCache.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -205,7 +205,7 @@ void CodeCache::initialize_heaps() {
const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize);
const size_t ps = page_size(false, 8);
const size_t min_size = MAX2(os::vm_allocation_granularity(), ps);
const size_t min_cache_size = CompilerConfig::min_code_cache_size(); // Make sure we have enough space for VM internal code
const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code
size_t cache_size = align_up(ReservedCodeCacheSize, min_size);

// Prerequisites
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/compiler/compilationPolicy.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -455,7 +455,7 @@ void CompilationPolicy::initialize() {
c2_size = C2Compiler::initial_code_buffer_size();
#endif
size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
int max_count = (ReservedCodeCacheSize - (int)CompilerConfig::min_code_cache_size()) / (int)buffer_size;
int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
if (count > max_count) {
// Lower the compiler count such that all buffers fit into the code cache
count = MAX2(max_count, c1_only ? 1 : 2);
Expand Down
5 changes: 3 additions & 2 deletions src/hotspot/share/compiler/compilerDefinitions.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -475,7 +475,8 @@ void CompilerConfig::set_jvmci_specific_flags() {

bool CompilerConfig::check_args_consistency(bool status) {
// Check lower bounds of the code cache
size_t min_code_cache_size = CompilerConfig::min_code_cache_size();
// Template Interpreter code is approximately 3X larger in debug builds.
uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
if (ReservedCodeCacheSize < InitialCodeCacheSize) {
jio_fprintf(defaultStream::error_stream(),
"Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n",
Expand Down
4 changes: 1 addition & 3 deletions src/hotspot/share/compiler/compilerDefinitions.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -148,8 +148,6 @@ class CompilerConfig : public AllStatic {
inline static bool is_c2_or_jvmci_compiler_only();
inline static bool is_c2_or_jvmci_compiler_enabled();

inline static size_t min_code_cache_size();

private:
static bool is_compilation_mode_selected();
static void set_compilation_policy_flags();
Expand Down
17 changes: 1 addition & 16 deletions src/hotspot/share/compiler/compilerDefinitions.inline.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand All @@ -25,12 +25,6 @@
#ifndef SHARE_COMPILER_COMPILERDEFINITIONS_INLINE_HPP
#define SHARE_COMPILER_COMPILERDEFINITIONS_INLINE_HPP

#ifdef COMPILER1
#include "c1/c1_Compiler.hpp"
#endif
#ifdef COMPILER2
#include "opto/c2compiler.hpp"
#endif
#include "compiler/compilerDefinitions.hpp"

#include "compiler/compiler_globals.hpp"
Expand Down Expand Up @@ -138,13 +132,4 @@ inline bool CompilerConfig::is_c2_or_jvmci_compiler_enabled() {
return is_c2_enabled() || is_jvmci_compiler_enabled();
}

inline size_t CompilerConfig::min_code_cache_size() {
size_t min_code_cache_size = CodeCacheMinimumUseSpace;
// Template Interpreter code is approximately 3X larger in debug builds.
DEBUG_ONLY(min_code_cache_size *= 3);
COMPILER1_PRESENT(min_code_cache_size += Compiler::code_buffer_size());
COMPILER2_PRESENT(min_code_cache_size += C2Compiler::initial_code_buffer_size());
return min_code_cache_size;
}

#endif // SHARE_COMPILER_COMPILERDEFINITIONS_INLINE_HPP
Loading

0 comments on commit 268ba3c

Please sign in to comment.