From 3892078094735be9d8074d23ce3d70201cd60445 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Thu, 2 May 2024 08:27:12 +0000 Subject: [PATCH 01/19] 8324121: SIGFPE in PhaseIdealLoop::extract_long_range_checks 8329163: C2: possible overflow in PhaseIdealLoop::extract_long_range_checks() Backport-of: cb2a6713596548d76c03912709656172b0bbcc76 --- src/hotspot/share/opto/loopnode.cpp | 17 +++--- src/hotspot/share/opto/loopnode.hpp | 4 +- .../TestLargeScaleInLongRCOverflow.java | 59 +++++++++++++++++++ 3 files changed, 71 insertions(+), 9 deletions(-) create mode 100644 test/hotspot/jtreg/compiler/rangechecks/TestLargeScaleInLongRCOverflow.java diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp index e102d9ac9e9..52467e75010 100644 --- a/src/hotspot/share/opto/loopnode.cpp +++ b/src/hotspot/share/opto/loopnode.cpp @@ -801,13 +801,14 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { } #endif - jlong stride_con = head->stride_con(); - assert(stride_con != 0, "missed some peephole opt"); + jlong stride_con_long = head->stride_con(); + assert(stride_con_long != 0, "missed some peephole opt"); // We can't iterate for more than max int at a time. - if (stride_con != (jint)stride_con) { + if (stride_con_long != (jint)stride_con_long || stride_con_long == min_jint) { assert(bt == T_LONG, "only for long loops"); return false; } + jint stride_con = checked_cast(stride_con_long); // The number of iterations for the integer count loop: guarantee no // overflow: max_jint - stride_con max. -1 so there's no need for a // loop limit check if the exit test is <= or >=. @@ -945,7 +946,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { } // Clone the iv data nodes as an integer iv - Node* int_stride = _igvn.intcon(checked_cast(stride_con)); + Node* int_stride = _igvn.intcon(stride_con); set_ctrl(int_stride, C->root()); Node* inner_phi = new PhiNode(x->in(0), TypeInt::INT); Node* inner_incr = new AddINode(inner_phi, int_stride); @@ -1040,7 +1041,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { register_new_node(outer_phi, outer_head); } - transform_long_range_checks(checked_cast(stride_con), range_checks, outer_phi, inner_iters_actual_int, + transform_long_range_checks(stride_con, range_checks, outer_phi, inner_iters_actual_int, inner_phi, iv_add, inner_head); // Peel one iteration of the loop and use the safepoint at the end // of the peeled iteration to insert Parse Predicates. If no well @@ -1077,7 +1078,7 @@ bool PhaseIdealLoop::create_loop_nest(IdealLoopTree* loop, Node_List &old_new) { return true; } -int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jlong stride_con, int iters_limit, PhiNode* phi, +int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jint stride_con, int iters_limit, PhiNode* phi, Node_List& range_checks) { const jlong min_iters = 2; jlong reduced_iters_limit = iters_limit; @@ -1093,7 +1094,9 @@ int PhaseIdealLoop::extract_long_range_checks(const IdealLoopTree* loop, jlong s jlong scale = 0; if (loop->is_range_check_if(if_proj, this, T_LONG, phi, range, offset, scale) && loop->is_invariant(range) && loop->is_invariant(offset) && - original_iters_limit / ABS(scale * stride_con) >= min_iters) { + scale != min_jlong && + original_iters_limit / ABS(scale) >= min_iters * ABS(stride_con)) { + assert(scale == (jint)scale, "scale should be an int"); reduced_iters_limit = MIN2(reduced_iters_limit, original_iters_limit/ABS(scale)); range_checks.push(c); } diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp index 6d44434d71e..868a4fd153c 100644 --- a/src/hotspot/share/opto/loopnode.hpp +++ b/src/hotspot/share/opto/loopnode.hpp @@ -1695,8 +1695,8 @@ class PhaseIdealLoop : public PhaseTransform { LoopNode* create_inner_head(IdealLoopTree* loop, BaseCountedLoopNode* head, IfNode* exit_test); - int extract_long_range_checks(const IdealLoopTree* loop, jlong stride_con, int iters_limit, PhiNode* phi, - Node_List &range_checks); + int extract_long_range_checks(const IdealLoopTree* loop, jint stride_con, int iters_limit, PhiNode* phi, + Node_List &range_checks); void transform_long_range_checks(int stride_con, const Node_List &range_checks, Node* outer_phi, Node* inner_iters_actual_int, Node* inner_phi, diff --git a/test/hotspot/jtreg/compiler/rangechecks/TestLargeScaleInLongRCOverflow.java b/test/hotspot/jtreg/compiler/rangechecks/TestLargeScaleInLongRCOverflow.java new file mode 100644 index 00000000000..c762b0e2fd7 --- /dev/null +++ b/test/hotspot/jtreg/compiler/rangechecks/TestLargeScaleInLongRCOverflow.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8324121 + * @summary SIGFPE in PhaseIdealLoop::extract_long_range_checks + * @run main/othervm -Xcomp -XX:CompileCommand=compileonly,TestLargeScaleInLongRCOverflow::test* -XX:-TieredCompilation TestLargeScaleInLongRCOverflow + * + */ + +import java.util.Objects; + +public class TestLargeScaleInLongRCOverflow { + + public static void main(String args[]) { + Objects.checkIndex(0, 1); + try { + test1(); + } catch (java.lang.IndexOutOfBoundsException e) { } + try { + test2(); + } catch (java.lang.IndexOutOfBoundsException e) { } + } + + // SIGFPE in PhaseIdealLoop::extract_long_range_checks + public static void test1() { + for (long i = 1; i < 100; i += 2) { + Objects.checkIndex(Long.MIN_VALUE * i, 10); + } + } + + // "assert(static_cast(result) == thing) failed: must be" in PhaseIdealLoop::transform_long_range_checks + public static void test2() { + for (long i = 1; i < 100; i += 2) { + Objects.checkIndex((Long.MIN_VALUE + 2) * i, 10); + } + } +} From 87d5da4f4f61bde5125b2534cd059a5772f991b7 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Thu, 2 May 2024 12:27:14 +0000 Subject: [PATCH 02/19] 8328822: C2: "negative trip count?" assert failure in profile predicate code Backport-of: 2ceeb6c00135310b7bdabacb92d26d81de525240 --- src/hotspot/share/opto/loopPredicate.cpp | 2 +- .../TestCountedLoopMinJintStride.java | 64 +++++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 test/hotspot/jtreg/compiler/predicates/TestCountedLoopMinJintStride.java diff --git a/src/hotspot/share/opto/loopPredicate.cpp b/src/hotspot/share/opto/loopPredicate.cpp index 9ae24cb2055..fa39301cbef 100644 --- a/src/hotspot/share/opto/loopPredicate.cpp +++ b/src/hotspot/share/opto/loopPredicate.cpp @@ -1146,7 +1146,7 @@ bool PhaseIdealLoop::loop_predication_should_follow_branches(IdealLoopTree* loop CountedLoopNode* cl = head->as_CountedLoop(); if (cl->phi() != nullptr) { const TypeInt* t = _igvn.type(cl->phi())->is_int(); - float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS(cl->stride_con()); + float worst_case_trip_cnt = ((float)t->_hi - t->_lo) / ABS((float)cl->stride_con()); if (worst_case_trip_cnt < loop_trip_cnt) { loop_trip_cnt = worst_case_trip_cnt; } diff --git a/test/hotspot/jtreg/compiler/predicates/TestCountedLoopMinJintStride.java b/test/hotspot/jtreg/compiler/predicates/TestCountedLoopMinJintStride.java new file mode 100644 index 00000000000..62bfc60cc73 --- /dev/null +++ b/test/hotspot/jtreg/compiler/predicates/TestCountedLoopMinJintStride.java @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2024, Red Hat, Inc. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8328822 + * @summary C2: "negative trip count?" assert failure in profile predicate code + * @run main/othervm -XX:-BackgroundCompilation TestCountedLoopMinJintStride + */ + +import java.util.Objects; + +public class TestCountedLoopMinJintStride { + public static void main(String[] args) { + for (int i = 0; i < 20_000; i++) { + test1(Integer.MAX_VALUE-1, Integer.MAX_VALUE, 0); + testHelper1(100, -1, Integer.MAX_VALUE, 0); + test2(Integer.MAX_VALUE-1, Integer.MAX_VALUE, 0); + testHelper2(100, -1, Integer.MAX_VALUE, 0); + } + } + + private static void test1(int stop, int range, int start) { + testHelper1(stop, Integer.MIN_VALUE, range, start); + } + + private static void testHelper1(int stop, int stride, int range, int start) { + for (int i = stop; i >= start; i += stride) { + Objects.checkIndex(i, range); + } + } + + private static void test2(int stop, int range, int start) { + testHelper1(stop, Integer.MIN_VALUE, range, start); + } + + private static void testHelper2(int stop, int stride, int range, int start) { + for (int i = stop; i >= start; i += stride) { + if (i < 0 || i >= range) { + throw new RuntimeException("out of bounds"); + } + } + } +} From a4d8d061a8a694502a085b444a480fa3211a07dc Mon Sep 17 00:00:00 2001 From: Zhengyu Gu Date: Thu, 2 May 2024 14:14:09 +0000 Subject: [PATCH 03/19] 8328744: Parallel: Parallel GC throws OOM before heap is fully expanded Backport-of: bf8146eac24ba8e00d1794ef7134ecf2476cf897 --- src/hotspot/share/gc/parallel/psScavenge.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp index 1bfba37735f..7da789261c0 100644 --- a/src/hotspot/share/gc/parallel/psScavenge.cpp +++ b/src/hotspot/share/gc/parallel/psScavenge.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -708,12 +708,14 @@ bool PSScavenge::should_attempt_scavenge() { size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); - bool result = promotion_estimate < old_gen->free_in_bytes(); + // Total free size after possible old gen expansion + size_t free_in_old_gen = old_gen->max_gen_size() - old_gen->used_in_bytes(); + bool result = promotion_estimate < free_in_old_gen; log_trace(ergo)("%s scavenge: average_promoted " SIZE_FORMAT " padded_average_promoted " SIZE_FORMAT " free in old gen " SIZE_FORMAT, result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), (size_t) policy->padded_average_promoted_in_bytes(), - old_gen->free_in_bytes()); + free_in_old_gen); if (young_gen->used_in_bytes() < (size_t) policy->padded_average_promoted_in_bytes()) { log_trace(ergo)(" padded_promoted_average is greater than maximum promotion = " SIZE_FORMAT, young_gen->used_in_bytes()); } From 3bb8eeed4817d490a1089aa567434af3f303c4f8 Mon Sep 17 00:00:00 2001 From: Goetz Lindenmaier Date: Fri, 3 May 2024 06:22:47 +0000 Subject: [PATCH 04/19] 8317809: Insertion of free code blobs into code cache can be very slow during class unloading Reviewed-by: phh, adinn Backport-of: 30817b742300f10f566e6aee3a8c1f8af4ab3083 --- .../share/classfile/classLoaderData.cpp | 2 +- .../share/classfile/classLoaderData.hpp | 14 +- .../share/classfile/classLoaderDataGraph.cpp | 26 +-- .../share/classfile/classLoaderDataGraph.hpp | 2 - src/hotspot/share/code/codeBlob.cpp | 4 +- src/hotspot/share/code/codeBlob.hpp | 2 +- src/hotspot/share/code/codeCache.cpp | 36 +--- src/hotspot/share/code/codeCache.hpp | 4 +- src/hotspot/share/code/compiledMethod.hpp | 2 +- src/hotspot/share/code/nmethod.cpp | 17 +- src/hotspot/share/code/nmethod.hpp | 8 +- src/hotspot/share/compiler/compileBroker.cpp | 2 +- src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 34 +++- src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 2 + src/hotspot/share/gc/g1/g1ConcurrentMark.cpp | 31 +--- src/hotspot/share/gc/g1/g1FullCollector.cpp | 13 +- .../share/gc/parallel/psParallelCompact.cpp | 26 ++- src/hotspot/share/gc/serial/genMarkSweep.cpp | 14 +- .../share/gc/shared/classUnloadingContext.cpp | 174 ++++++++++++++++++ .../share/gc/shared/classUnloadingContext.hpp | 77 ++++++++ .../share/gc/shared/genCollectedHeap.cpp | 6 +- .../gc/shenandoah/shenandoahCodeRoots.cpp | 3 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 8 +- .../share/gc/shenandoah/shenandoahUnload.cpp | 6 +- src/hotspot/share/gc/x/xHeap.cpp | 4 + src/hotspot/share/gc/x/xNMethod.cpp | 3 +- src/hotspot/share/gc/z/zGeneration.cpp | 4 + src/hotspot/share/gc/z/zNMethod.cpp | 3 +- 28 files changed, 396 insertions(+), 131 deletions(-) create mode 100644 src/hotspot/share/gc/shared/classUnloadingContext.cpp create mode 100644 src/hotspot/share/gc/shared/classUnloadingContext.hpp diff --git a/src/hotspot/share/classfile/classLoaderData.cpp b/src/hotspot/share/classfile/classLoaderData.cpp index ca6b58d6530..be91f26bdef 100644 --- a/src/hotspot/share/classfile/classLoaderData.cpp +++ b/src/hotspot/share/classfile/classLoaderData.cpp @@ -599,7 +599,7 @@ void ClassLoaderData::unload() { free_deallocate_list_C_heap_structures(); // Clean up class dependencies and tell serviceability tools - // these classes are unloading. Must be called + // these classes are unloading. This must be called // after erroneous classes are released. classes_do(InstanceKlass::unload_class); diff --git a/src/hotspot/share/classfile/classLoaderData.hpp b/src/hotspot/share/classfile/classLoaderData.hpp index 6b5cfdc0664..c9d025aded1 100644 --- a/src/hotspot/share/classfile/classLoaderData.hpp +++ b/src/hotspot/share/classfile/classLoaderData.hpp @@ -186,12 +186,16 @@ class ClassLoaderData : public CHeapObj { ClassLoaderData* next() const; void unlink_next(); - void set_unloading_next(ClassLoaderData* unloading_next); - ClassLoaderData* unloading_next() const; - ClassLoaderData(Handle h_class_loader, bool has_class_mirror_holder); + +public: ~ClassLoaderData(); + void set_unloading_next(ClassLoaderData* unloading_next); + ClassLoaderData* unloading_next() const; + void unload(); + +private: // The CLD are not placed in the Heap, so the Card Table or // the Mod Union Table can't be used to mark when CLD have modified oops. // The CT and MUT bits saves this information for the whole class loader data. @@ -203,11 +207,11 @@ class ClassLoaderData : public CHeapObj { oop holder_no_keepalive() const; oop holder() const; + void classes_do(void f(Klass* const)); + private: - void unload(); bool keep_alive() const { return _keep_alive > 0; } - void classes_do(void f(Klass* const)); void loaded_classes_do(KlassClosure* klass_closure); void classes_do(void f(InstanceKlass*)); void methods_do(void f(Method*)); diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp index ded67718929..2046286651e 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp @@ -31,6 +31,7 @@ #include "classfile/moduleEntry.hpp" #include "classfile/packageEntry.hpp" #include "code/dependencyContext.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "logging/log.hpp" #include "logging/logStream.hpp" #include "memory/allocation.inline.hpp" @@ -124,7 +125,6 @@ void ClassLoaderDataGraph::walk_metadata_and_clean_metaspaces() { // List head of all class loader data. ClassLoaderData* volatile ClassLoaderDataGraph::_head = nullptr; -ClassLoaderData* ClassLoaderDataGraph::_unloading_head = nullptr; bool ClassLoaderDataGraph::_should_clean_deallocate_lists = false; bool ClassLoaderDataGraph::_safepoint_cleanup_needed = false; @@ -342,11 +342,7 @@ void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) { } void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) { - assert_locked_or_safepoint(ClassLoaderDataGraph_lock); - for (ClassLoaderData* cld = _unloading_head; cld != nullptr; cld = cld->unloading_next()) { - assert(cld->is_unloading(), "invariant"); - cld->classes_do(f); - } + ClassUnloadingContext::context()->classes_unloading_do(f); } void ClassLoaderDataGraph::verify_dictionary() { @@ -425,7 +421,8 @@ bool ClassLoaderDataGraph::do_unloading() { } else { // Found dead CLD. loaders_removed++; - data->unload(); + + ClassUnloadingContext::context()->register_unloading_class_loader_data(data); // Move dead CLD to unloading list. if (prev != nullptr) { @@ -435,8 +432,6 @@ bool ClassLoaderDataGraph::do_unloading() { // The GC might be walking this concurrently Atomic::store(&_head, data->next()); } - data->set_unloading_next(_unloading_head); - _unloading_head = data; } } @@ -469,16 +464,9 @@ void ClassLoaderDataGraph::clean_module_and_package_info() { } void ClassLoaderDataGraph::purge(bool at_safepoint) { - ClassLoaderData* list = _unloading_head; - _unloading_head = nullptr; - ClassLoaderData* next = list; - bool classes_unloaded = false; - while (next != nullptr) { - ClassLoaderData* purge_me = next; - next = purge_me->unloading_next(); - delete purge_me; - classes_unloaded = true; - } + ClassUnloadingContext::context()->purge_class_loader_data(); + + bool classes_unloaded = ClassUnloadingContext::context()->has_unloaded_classes(); Metaspace::purge(classes_unloaded); if (classes_unloaded) { diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.hpp b/src/hotspot/share/classfile/classLoaderDataGraph.hpp index ae502b6d30d..3de2c10850e 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.hpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.hpp @@ -43,8 +43,6 @@ class ClassLoaderDataGraph : public AllStatic { private: // All CLDs (except unlinked CLDs) can be reached by walking _head->_next->... static ClassLoaderData* volatile _head; - // All unlinked CLDs - static ClassLoaderData* _unloading_head; // Set if there's anything to purge in the deallocate lists or previous versions // during a safepoint after class unloading in a full GC. diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index a384edc936f..058ac24e167 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -164,7 +164,7 @@ RuntimeBlob::RuntimeBlob( void RuntimeBlob::free(RuntimeBlob* blob) { assert(blob != nullptr, "caller must check for nullptr"); ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - blob->flush(); + blob->purge(); { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); CodeCache::free(blob); @@ -173,7 +173,7 @@ void RuntimeBlob::free(RuntimeBlob* blob) { MemoryService::track_code_cache_memory_usage(); } -void CodeBlob::flush() { +void CodeBlob::purge(bool free_code_cache_data) { if (_oop_maps != nullptr) { delete _oop_maps; _oop_maps = nullptr; diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index 6c40fe992ae..719ca57b30f 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -143,7 +143,7 @@ class CodeBlob { static unsigned int align_code_offset(int offset); // Deletion - virtual void flush(); + virtual void purge(bool free_code_cache_data = true); // Typing virtual bool is_buffer_blob() const { return false; } diff --git a/src/hotspot/share/code/codeCache.cpp b/src/hotspot/share/code/codeCache.cpp index d2ee5c36317..9b0bdc3643d 100644 --- a/src/hotspot/share/code/codeCache.cpp +++ b/src/hotspot/share/code/codeCache.cpp @@ -37,6 +37,7 @@ #include "compiler/compilerDefinitions.inline.hpp" #include "compiler/oopMap.hpp" #include "gc/shared/barrierSetNMethod.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/collectedHeap.hpp" #include "jfr/jfrEvents.hpp" #include "jvm_io.h" @@ -606,7 +607,7 @@ void CodeCache::free(CodeBlob* cb) { cb->~CodeBlob(); // Get heap for given CodeBlob and deallocate - get_code_heap(cb)->deallocate(cb); + heap->deallocate(cb); assert(heap->blob_count() >= 0, "sanity check"); } @@ -970,36 +971,8 @@ void CodeCache::purge_exception_caches() { _exception_cache_purge_list = nullptr; } -// Register an is_unloading nmethod to be flushed after unlinking -void CodeCache::register_unlinked(nmethod* nm) { - assert(nm->unlinked_next() == nullptr, "Only register for unloading once"); - for (;;) { - // Only need acquire when reading the head, when the next - // pointer is walked, which it is not here. - nmethod* head = Atomic::load(&_unlinked_head); - nmethod* next = head != nullptr ? head : nm; // Self looped means end of list - nm->set_unlinked_next(next); - if (Atomic::cmpxchg(&_unlinked_head, head, nm) == head) { - break; - } - } -} - -// Flush all the nmethods the GC unlinked -void CodeCache::flush_unlinked_nmethods() { - nmethod* nm = _unlinked_head; - _unlinked_head = nullptr; - size_t freed_memory = 0; - while (nm != nullptr) { - nmethod* next = nm->unlinked_next(); - freed_memory += nm->total_size(); - nm->flush(); - if (next == nm) { - // Self looped means end of list - break; - } - nm = next; - } +// Restart compiler if possible and required.. +void CodeCache::maybe_restart_compiler(size_t freed_memory) { // Try to start the compiler again if we freed any memory if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) { @@ -1013,7 +986,6 @@ void CodeCache::flush_unlinked_nmethods() { } uint8_t CodeCache::_unloading_cycle = 1; -nmethod* volatile CodeCache::_unlinked_head = nullptr; void CodeCache::increment_unloading_cycle() { // 2-bit value (see IsUnloadingState in nmethod.cpp for details) diff --git a/src/hotspot/share/code/codeCache.hpp b/src/hotspot/share/code/codeCache.hpp index 403eacdfab3..fbcaefd2a62 100644 --- a/src/hotspot/share/code/codeCache.hpp +++ b/src/hotspot/share/code/codeCache.hpp @@ -106,7 +106,6 @@ class CodeCache : AllStatic { static TruncatedSeq _unloading_gc_intervals; static TruncatedSeq _unloading_allocation_rates; static volatile bool _unloading_threshold_gc_requested; - static nmethod* volatile _unlinked_head; static ExceptionCache* volatile _exception_cache_purge_list; @@ -213,8 +212,7 @@ class CodeCache : AllStatic { // nmethod::is_cold. static void arm_all_nmethods(); - static void flush_unlinked_nmethods(); - static void register_unlinked(nmethod* nm); + static void maybe_restart_compiler(size_t freed_memory); static void do_unloading(bool unloading_occurred); static uint8_t unloading_cycle() { return _unloading_cycle; } diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp index 912ca1b3f88..4bcda08ec99 100644 --- a/src/hotspot/share/code/compiledMethod.hpp +++ b/src/hotspot/share/code/compiledMethod.hpp @@ -174,7 +174,7 @@ class CompiledMethod : public CodeBlob { void* _gc_data; - virtual void flush() = 0; + virtual void purge(bool free_code_cache_data = true) = 0; private: DeoptimizationStatus deoptimization_status() const { diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index ce516b8aef1..1b328597b7f 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -42,6 +42,7 @@ #include "compiler/oopMap.inline.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetNMethod.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/collectedHeap.hpp" #include "interpreter/bytecode.hpp" #include "jvm.h" @@ -639,7 +640,7 @@ nmethod::nmethod( ByteSize basic_lock_sp_offset, OopMapSet* oop_maps ) : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), - _unlinked_next(nullptr), + _is_unlinked(false), _native_receiver_sp_offset(basic_lock_owner_sp_offset), _native_basic_lock_sp_offset(basic_lock_sp_offset), _is_unloading_state(0) @@ -783,7 +784,7 @@ nmethod::nmethod( #endif ) : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false, true), - _unlinked_next(nullptr), + _is_unlinked(false), _native_receiver_sp_offset(in_ByteSize(-1)), _native_basic_lock_sp_offset(in_ByteSize(-1)), _is_unloading_state(0) @@ -1406,7 +1407,7 @@ bool nmethod::make_not_entrant() { // For concurrent GCs, there must be a handshake between unlink and flush void nmethod::unlink() { - if (_unlinked_next != nullptr) { + if (_is_unlinked) { // Already unlinked. It can be invoked twice because concurrent code cache // unloading might need to restart when inline cache cleaning fails due to // running out of ICStubs, which can only be refilled at safepoints @@ -1440,10 +1441,10 @@ void nmethod::unlink() { // Register for flushing when it is safe. For concurrent class unloading, // that would be after the unloading handshake, and for STW class unloading // that would be when getting back to the VM thread. - CodeCache::register_unlinked(this); + ClassUnloadingContext::context()->register_unlinked_nmethod(this); } -void nmethod::flush() { +void nmethod::purge(bool free_code_cache_data) { MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); // completely deallocate this method @@ -1466,8 +1467,10 @@ void nmethod::flush() { Universe::heap()->unregister_nmethod(this); CodeCache::unregister_old_nmethod(this); - CodeBlob::flush(); - CodeCache::free(this); + CodeBlob::purge(); + if (free_code_cache_data) { + CodeCache::free(this); + } } oop nmethod::oop_at(int index) const { diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index cbe2bbb65a9..3be50e28e15 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -196,7 +196,7 @@ class nmethod : public CompiledMethod { address _verified_entry_point; // entry point without class check address _osr_entry_point; // entry point for on stack replacement - nmethod* _unlinked_next; + bool _is_unlinked; // Shared fields for all nmethod's int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method @@ -441,8 +441,8 @@ class nmethod : public CompiledMethod { virtual bool is_unloading(); virtual void do_unloading(bool unloading_occurred); - nmethod* unlinked_next() const { return _unlinked_next; } - void set_unlinked_next(nmethod* next) { _unlinked_next = next; } + bool is_unlinked() const { return _is_unlinked; } + void set_is_unlinked() { assert(!_is_unlinked, "already unlinked"); _is_unlinked = true; } #if INCLUDE_RTM_OPT // rtm state accessing and manipulating @@ -522,7 +522,7 @@ class nmethod : public CompiledMethod { void unlink(); // Deallocate this nmethod - called by the GC - void flush(); + void purge(bool free_code_cache_data = true); // See comment at definition of _last_seen_on_stack void mark_as_maybe_on_stack(); diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 7bfff15b316..c8a5df23b49 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -1775,7 +1775,7 @@ bool CompileBroker::init_compiler_runtime() { void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { BufferBlob* blob = thread->get_buffer_blob(); if (blob != nullptr) { - blob->flush(); + blob->purge(); MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); CodeCache::free(blob); } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 1f95f2a9ea9..6fc044939b8 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "classfile/classLoaderDataGraph.hpp" #include "classfile/metadataOnStackMark.hpp" -#include "classfile/stringTable.hpp" +#include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "compiler/oopMap.hpp" @@ -74,6 +74,7 @@ #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionRemSet.inline.hpp" #include "gc/g1/heapRegionSet.inline.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/concurrentGCBreakpoints.hpp" #include "gc/shared/gcBehaviours.hpp" #include "gc/shared/gcHeapSummary.hpp" @@ -855,10 +856,6 @@ void G1CollectedHeap::verify_before_full_collection() { } void G1CollectedHeap::prepare_for_mutator_after_full_collection() { - // Delete metaspaces for unloaded class loaders and clean up loader_data graph - ClassLoaderDataGraph::purge(/*at_safepoint*/true); - DEBUG_ONLY(MetaspaceUtils::verify();) - // Prepare heap for normal collections. assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); @@ -2596,6 +2593,33 @@ void G1CollectedHeap::complete_cleaning(bool class_unloading_occurred) { workers()->run_task(&unlink_task); } +void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjectClosure* is_alive, GCTimer* timer) { + GCTraceTime(Debug, gc, phases) debug(description, timer); + + ClassUnloadingContext ctx(workers()->active_workers(), + false /* lock_codeblob_free_separately */); + { + CodeCache::UnlinkingScope scope(is_alive); + bool unloading_occurred = SystemDictionary::do_unloading(timer); + GCTraceTime(Debug, gc, phases) t("G1 Complete Cleaning", timer); + complete_cleaning(unloading_occurred); + } + { + GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", timer); + ctx.purge_nmethods(); + } + { + GCTraceTime(Debug, gc, phases) t("Free Code Blobs", timer); + ctx.free_code_blobs(); + } + { + GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", timer); + ClassLoaderDataGraph::purge(true /* at_safepoint */); + DEBUG_ONLY(MetaspaceUtils::verify();) + } +} + + bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) { assert(obj != nullptr, "must not be null"); assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj)); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index e1b95bf616d..9d4566916ff 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -1265,6 +1265,8 @@ class G1CollectedHeap : public CollectedHeap { // Performs cleaning of data structures after class unloading. void complete_cleaning(bool class_unloading_occurred); + void unload_classes_and_code(const char* description, BoolObjectClosure* cl, GCTimer* timer); + // Verification // Perform any cleanup actions necessary before allowing a verification. diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index b040c1f4656..fb773a025e7 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -25,8 +25,6 @@ #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" #include "classfile/classLoaderDataGraph.hpp" -#include "classfile/systemDictionary.hpp" -#include "code/codeCache.hpp" #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1BatchedTask.hpp" #include "gc/g1/g1CardSetMemory.hpp" @@ -1245,6 +1243,12 @@ void G1ConcurrentMark::remark() { if (mark_finished) { weak_refs_work(); + // Unload Klasses, String, Code Cache, etc. + if (ClassUnloadingWithConcurrentMark) { + G1CMIsAliveClosure is_alive(_g1h); + _g1h->unload_classes_and_code("Class Unloading", &is_alive, _gc_timer_cm); + } + SATBMarkQueueSet& satb_mq_set = G1BarrierSet::satb_mark_queue_set(); // We're done with marking. // This is the end of the marking cycle, we're expected all @@ -1282,12 +1286,6 @@ void G1ConcurrentMark::remark() { reclaim_empty_regions(); } - // Clean out dead classes - if (ClassUnloadingWithConcurrentMark) { - GCTraceTime(Debug, gc, phases) debug("Purge Metaspace", _gc_timer_cm); - ClassLoaderDataGraph::purge(/*at_safepoint*/true); - } - _g1h->resize_heap_if_necessary(); _g1h->uncommit_regions_if_necessary(); @@ -1612,9 +1610,6 @@ class G1CMRefProcProxyTask : public RefProcProxyTask { void G1ConcurrentMark::weak_refs_work() { ResourceMark rm; - // Is alive closure. - G1CMIsAliveClosure g1_is_alive(_g1h); - { GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); @@ -1671,18 +1666,8 @@ void G1ConcurrentMark::weak_refs_work() { { GCTraceTime(Debug, gc, phases) debug("Weak Processing", _gc_timer_cm); - WeakProcessor::weak_oops_do(_g1h->workers(), &g1_is_alive, &do_nothing_cl, 1); - } - - // Unload Klasses, String, Code Cache, etc. - if (ClassUnloadingWithConcurrentMark) { - GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); - { - CodeCache::UnlinkingScope scope(&g1_is_alive); - bool unloading_occurred = SystemDictionary::do_unloading(_gc_timer_cm); - _g1h->complete_cleaning(unloading_occurred); - } - CodeCache::flush_unlinked_nmethods(); + G1CMIsAliveClosure is_alive(_g1h); + WeakProcessor::weak_oops_do(_g1h->workers(), &is_alive, &do_nothing_cl, 1); } } diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index 62e7cdfabd4..df1afe0d3e9 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -24,9 +24,6 @@ #include "precompiled.hpp" #include "classfile/classLoaderDataGraph.hpp" -#include "classfile/systemDictionary.hpp" -#include "code/codeCache.hpp" -#include "compiler/oopMap.hpp" #include "gc/g1/g1CollectedHeap.hpp" #include "gc/g1/g1FullCollector.inline.hpp" #include "gc/g1/g1FullGCAdjustTask.hpp" @@ -41,6 +38,7 @@ #include "gc/g1/g1RegionMarkStatsCache.inline.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "gc/shared/preservedMarks.inline.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/referenceProcessor.hpp" #include "gc/shared/verifyOption.hpp" #include "gc/shared/weakProcessor.inline.hpp" @@ -319,14 +317,7 @@ void G1FullCollector::phase1_mark_live_objects() { // Class unloading and cleanup. if (ClassUnloading) { - GCTraceTime(Debug, gc, phases) debug("Phase 1: Class Unloading and Cleanup", scope()->timer()); - { - CodeCache::UnlinkingScope unloading_scope(&_is_alive); - // Unload classes and purge the SystemDictionary. - bool unloading_occurred = SystemDictionary::do_unloading(scope()->timer()); - _heap->complete_cleaning(unloading_occurred); - } - CodeCache::flush_unlinked_nmethods(); + _heap->unload_classes_and_code("Phase 1: Class Unloading and Cleanup", &_is_alive, scope()->timer()); } { diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 532031a2102..9baf0c21db6 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -42,6 +42,7 @@ #include "gc/parallel/psScavenge.hpp" #include "gc/parallel/psStringDedup.hpp" #include "gc/parallel/psYoungGen.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/gcCause.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcId.hpp" @@ -1024,9 +1025,12 @@ void PSParallelCompact::post_compact() ct->dirty_MemRegion(old_mr); } - // Delete metaspaces for unloaded class loaders and clean up loader_data graph - ClassLoaderDataGraph::purge(/*at_safepoint*/true); - DEBUG_ONLY(MetaspaceUtils::verify();) + { + // Delete metaspaces for unloaded class loaders and clean up loader_data graph + GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer()); + ClassLoaderDataGraph::purge(true /* at_safepoint */); + DEBUG_ONLY(MetaspaceUtils::verify();) + } // Need to clear claim bits for the next mark. ClassLoaderDataGraph::clear_claimed_marks(); @@ -1764,6 +1768,9 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { ref_processor()->start_discovery(maximum_heap_compaction); + ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, + false /* lock_codeblob_free_separately */); + marking_phase(&_gc_tracer); bool max_on_system_gc = UseMaximumCompactionOnSystemGC @@ -2053,6 +2060,8 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { { GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer); + ClassUnloadingContext* ctx = ClassUnloadingContext::context(); + bool unloading_occurred; { CodeCache::UnlinkingScope scope(is_alive_closure()); @@ -2064,8 +2073,15 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { CodeCache::do_unloading(unloading_occurred); } - // Release unloaded nmethods's memory. - CodeCache::flush_unlinked_nmethods(); + { + GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); + // Release unloaded nmethod's memory. + ctx->purge_nmethods(); + } + { + GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); + ctx->free_code_blobs(); + } // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(unloading_occurred); diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp index 995ebc337f0..82e0181dbc3 100644 --- a/src/hotspot/share/gc/serial/genMarkSweep.cpp +++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp @@ -35,6 +35,7 @@ #include "gc/serial/cardTableRS.hpp" #include "gc/serial/genMarkSweep.hpp" #include "gc/serial/serialGcRefProcProxyTask.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/gcHeapSummary.hpp" #include "gc/shared/gcTimer.hpp" @@ -196,6 +197,8 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { { GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", gc_timer()); + ClassUnloadingContext* ctx = ClassUnloadingContext::context(); + bool unloading_occurred; { CodeCache::UnlinkingScope scope(&is_alive); @@ -207,8 +210,15 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { CodeCache::do_unloading(unloading_occurred); } - // Release unloaded nmethod's memory. - CodeCache::flush_unlinked_nmethods(); + { + GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer()); + // Release unloaded nmethod's memory. + ctx->purge_nmethods(); + } + { + GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); + ctx->free_code_blobs(); + } // Prune dead klasses from subklass/sibling/implementor lists. Klass::clean_weak_klass_links(unloading_occurred); diff --git a/src/hotspot/share/gc/shared/classUnloadingContext.cpp b/src/hotspot/share/gc/shared/classUnloadingContext.cpp new file mode 100644 index 00000000000..f624eaa6e44 --- /dev/null +++ b/src/hotspot/share/gc/shared/classUnloadingContext.cpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" + +#include "classfile/classLoaderData.inline.hpp" +#include "code/nmethod.hpp" +#include "gc/shared/classUnloadingContext.hpp" +#include "runtime/mutexLocker.hpp" +#include "utilities/growableArray.hpp" + +ClassUnloadingContext* ClassUnloadingContext::_context = nullptr; + +ClassUnloadingContext::ClassUnloadingContext(uint num_workers, bool lock_codeblob_free_separately) : + _cld_head(nullptr), + _num_nmethod_unlink_workers(num_workers), + _unlinked_nmethods(nullptr), + _lock_codeblob_free_separately(lock_codeblob_free_separately) { + + assert(_context == nullptr, "context already set"); + _context = this; + + assert(num_workers > 0, "must be"); + + _unlinked_nmethods = NEW_C_HEAP_ARRAY(NMethodSet*, num_workers, mtGC); + for (uint i = 0; i < num_workers; ++i) { + _unlinked_nmethods[i] = new NMethodSet(); + } +} + +ClassUnloadingContext::~ClassUnloadingContext() { + for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) { + delete _unlinked_nmethods[i]; + } + FREE_C_HEAP_ARRAY(NMethodSet*, _unlinked_nmethods); + + assert(_context == this, "context not set correctly"); + _context = nullptr; +} + +bool ClassUnloadingContext::has_unloaded_classes() const { + return _cld_head != nullptr; +} + +void ClassUnloadingContext::register_unloading_class_loader_data(ClassLoaderData* cld) { + assert_locked_or_safepoint(ClassLoaderDataGraph_lock); + + cld->unload(); + + cld->set_unloading_next(_cld_head); + _cld_head = cld; +} + +void ClassUnloadingContext::purge_class_loader_data() { + for (ClassLoaderData* cld = _cld_head; cld != nullptr;) { + assert(cld->is_unloading(), "invariant"); + + ClassLoaderData* next = cld->unloading_next(); + delete cld; + cld = next; + } +} + +void ClassUnloadingContext::classes_unloading_do(void f(Klass* const)) { + assert_locked_or_safepoint(ClassLoaderDataGraph_lock); + for (ClassLoaderData* cld = _cld_head; cld != nullptr; cld = cld->unloading_next()) { + assert(cld->is_unloading(), "invariant"); + cld->classes_do(f); + } +} + +void ClassUnloadingContext::register_unlinked_nmethod(nmethod* nm) { + assert(_context != nullptr, "no context set"); + + assert(!nm->is_unlinked(), "Only register for unloading once"); + assert(_num_nmethod_unlink_workers == 1 || Thread::current()->is_Worker_thread(), "must be worker thread if parallel"); + + uint worker_id = _num_nmethod_unlink_workers == 1 ? 0 : WorkerThread::worker_id(); + assert(worker_id < _num_nmethod_unlink_workers, "larger than expected worker id %u", worker_id); + + _unlinked_nmethods[worker_id]->append(nm); + + nm->set_is_unlinked(); +} + +void ClassUnloadingContext::purge_nmethods() { + assert(_context != nullptr, "no context set"); + + size_t freed_memory = 0; + + for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) { + NMethodSet* set = _unlinked_nmethods[i]; + for (nmethod* nm : *set) { + freed_memory += nm->size(); + nm->purge(false /* free_code_cache_data */); + } + } + + CodeCache::maybe_restart_compiler(freed_memory); +} + +void ClassUnloadingContext::free_code_blobs() { + assert(_context != nullptr, "no context set"); + + // Sort nmethods before freeing to benefit from optimizations. If Nmethods were + // collected in parallel, use a new temporary buffer for the result, otherwise + // sort in-place. + NMethodSet* nmethod_set = nullptr; + + bool is_parallel = _num_nmethod_unlink_workers > 1; + + // Merge all collected nmethods into a huge array. + if (is_parallel) { + int num_nmethods = 0; + + for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) { + num_nmethods += _unlinked_nmethods[i]->length(); + } + nmethod_set = new NMethodSet(num_nmethods); + for (uint i = 0; i < _num_nmethod_unlink_workers; ++i) { + nmethod_set->appendAll(_unlinked_nmethods[i]); + } + } else { + nmethod_set = _unlinked_nmethods[0]; + } + + // Sort by ascending address. + auto sort_nmethods = [] (nmethod** a, nmethod** b) -> int { + uintptr_t u_a = (uintptr_t)*a; + uintptr_t u_b = (uintptr_t)*b; + if (u_a == u_b) return 0; + if (u_a < u_b) return -1; + return 1; + }; + nmethod_set->sort(sort_nmethods); + + // And free. Duplicate loop for clarity depending on where we want the locking. + if (_lock_codeblob_free_separately) { + for (nmethod* nm : *nmethod_set) { + MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); + CodeCache::free(nm); + } + } else { + MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); + for (nmethod* nm : *nmethod_set) { + CodeCache::free(nm); + } + } + + if (is_parallel) { + delete nmethod_set; + } +} diff --git a/src/hotspot/share/gc/shared/classUnloadingContext.hpp b/src/hotspot/share/gc/shared/classUnloadingContext.hpp new file mode 100644 index 00000000000..42a8f764fa4 --- /dev/null +++ b/src/hotspot/share/gc/shared/classUnloadingContext.hpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_GC_SHARED_CLASSUNLOADINGCONTEXT_HPP +#define SHARE_GC_SHARED_CLASSUNLOADINGCONTEXT_HPP + +#include "memory/allocation.hpp" +#include "utilities/growableArray.hpp" + +class ClassLoaderData; +class Klass; +class nmethod; + +class ClassUnloadingContext : public CHeapObj { + static ClassUnloadingContext* _context; + + ClassLoaderData* volatile _cld_head; + + const uint _num_nmethod_unlink_workers; + + using NMethodSet = GrowableArrayCHeap; + NMethodSet** _unlinked_nmethods; + + bool _lock_codeblob_free_separately; + +public: + static ClassUnloadingContext* context() { assert(_context != nullptr, "context not set"); return _context; } + + // Num_nmethod_unlink_workers configures the maximum numbers of threads unlinking + // nmethods. + // lock_codeblob_free_separately determines whether freeing the code blobs takes + // the CodeCache_lock during the whole operation (=false) or per code blob + // free operation (=true). + ClassUnloadingContext(uint num_nmethod_unlink_workers, + bool lock_codeblob_free_separately); + ~ClassUnloadingContext(); + + bool has_unloaded_classes() const; + + void register_unloading_class_loader_data(ClassLoaderData* cld); + void purge_class_loader_data(); + + void classes_unloading_do(void f(Klass* const)); + + // Register unloading nmethods, potentially in parallel. + void register_unlinked_nmethod(nmethod* nm); + void purge_nmethods(); + void free_code_blobs(); + + void purge_and_free_nmethods() { + purge_nmethods(); + free_code_blobs(); + } +}; + +#endif // SHARE_GC_SHARED_CLASSUNLOADINGCONTEXT_HPP diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp index 36f229ab817..0854a4776f0 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -36,6 +36,7 @@ #include "gc/serial/markSweep.hpp" #include "gc/shared/adaptiveSizePolicy.hpp" #include "gc/shared/cardTableBarrierSet.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/collectedHeap.inline.hpp" #include "gc/shared/collectorCounters.hpp" #include "gc/shared/continuationGCSupport.inline.hpp" @@ -555,6 +556,9 @@ void GenCollectedHeap::do_collection(bool full, CodeCache::on_gc_marking_cycle_start(); + ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, + false /* lock_codeblob_free_separately */); + collect_generation(_old_gen, full, size, @@ -570,7 +574,7 @@ void GenCollectedHeap::do_collection(bool full, _young_gen->compute_new_size(); // Delete metaspaces for unloaded class loaders and clean up loader_data graph - ClassLoaderDataGraph::purge(/*at_safepoint*/true); + ClassLoaderDataGraph::purge(true /* at_safepoint */); DEBUG_ONLY(MetaspaceUtils::verify();) // Need to clear claim bits for the next mark. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp index 92d447258f2..c5b6d787b95 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCodeRoots.cpp @@ -26,6 +26,7 @@ #include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "code/nmethod.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shenandoah/shenandoahClosures.inline.hpp" #include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp" #include "gc/shenandoah/shenandoahHeap.inline.hpp" @@ -235,7 +236,7 @@ void ShenandoahCodeRoots::unlink(WorkerThreads* workers, bool unloading_occurred void ShenandoahCodeRoots::purge() { assert(ShenandoahHeap::heap()->unload_classes(), "Only when running concurrent class unloading"); - CodeCache::flush_unlinked_nmethods(); + ClassUnloadingContext::context()->purge_and_free_nmethods(); } ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() : diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 2333e4e21bd..2e6ac1c356a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -27,6 +27,7 @@ #include "memory/allocation.hpp" #include "memory/universe.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/gcArguments.hpp" #include "gc/shared/gcTimer.hpp" #include "gc/shared/gcTraceTime.inline.hpp" @@ -1767,6 +1768,9 @@ void ShenandoahHeap::stop() { void ShenandoahHeap::stw_unload_classes(bool full_gc) { if (!unload_classes()) return; + ClassUnloadingContext ctx(_workers->active_workers(), + false /* lock_codeblob_free_separately */); + // Unload classes and purge SystemDictionary. { ShenandoahPhaseTimings::Phase phase = full_gc ? @@ -1784,14 +1788,14 @@ void ShenandoahHeap::stw_unload_classes(bool full_gc) { _workers->run_task(&unlink_task); } // Release unloaded nmethods's memory. - CodeCache::flush_unlinked_nmethods(); + ClassUnloadingContext::context()->purge_and_free_nmethods(); } { ShenandoahGCPhase phase(full_gc ? ShenandoahPhaseTimings::full_gc_purge_cldg : ShenandoahPhaseTimings::degen_gc_purge_cldg); - ClassLoaderDataGraph::purge(/*at_safepoint*/true); + ClassLoaderDataGraph::purge(true /* at_safepoint */); } // Resize and verify metaspace MetaspaceGC::compute_new_size(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp index afd10efdfdd..81eee97f25f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp @@ -30,6 +30,7 @@ #include "code/codeCache.hpp" #include "code/dependencyContext.hpp" #include "gc/shared/gcBehaviours.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/shenandoah/shenandoahNMethod.inline.hpp" #include "gc/shenandoah/shenandoahLock.hpp" @@ -138,6 +139,9 @@ void ShenandoahUnload::unload() { assert(ClassUnloading, "Filtered by caller"); assert(heap->is_concurrent_weak_root_in_progress(), "Filtered by caller"); + ClassUnloadingContext ctx(heap->workers()->active_workers(), + true /* lock_codeblob_free_separately */); + // Unlink stale metadata and nmethods { ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_class_unload_unlink); @@ -181,7 +185,7 @@ void ShenandoahUnload::unload() { { ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_class_unload_purge_cldg); - ClassLoaderDataGraph::purge(/*at_safepoint*/false); + ClassLoaderDataGraph::purge(false /* at_safepoint */); } { diff --git a/src/hotspot/share/gc/x/xHeap.cpp b/src/hotspot/share/gc/x/xHeap.cpp index a242a8063be..65da4e1c983 100644 --- a/src/hotspot/share/gc/x/xHeap.cpp +++ b/src/hotspot/share/gc/x/xHeap.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/classLoaderDataGraph.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/locationPrinter.hpp" #include "gc/shared/tlab_globals.hpp" #include "gc/x/xAddress.inline.hpp" @@ -320,6 +321,9 @@ void XHeap::process_non_strong_references() { // Process weak roots _weak_roots_processor.process_weak_roots(); + ClassUnloadingContext ctx(_workers.active_workers(), + true /* lock_codeblob_free_separately */); + // Unlink stale metadata and nmethods _unload.unlink(); diff --git a/src/hotspot/share/gc/x/xNMethod.cpp b/src/hotspot/share/gc/x/xNMethod.cpp index d86828aa847..613e1908502 100644 --- a/src/hotspot/share/gc/x/xNMethod.cpp +++ b/src/hotspot/share/gc/x/xNMethod.cpp @@ -27,6 +27,7 @@ #include "code/icBuffer.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetNMethod.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/x/xBarrier.inline.hpp" #include "gc/x/xGlobals.hpp" @@ -362,5 +363,5 @@ void XNMethod::unlink(XWorkers* workers, bool unloading_occurred) { } void XNMethod::purge() { - CodeCache::flush_unlinked_nmethods(); + ClassUnloadingContext::context()->purge_and_free_nmethods(); } diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index 1b4afd4eefb..bb0653f72ea 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/classLoaderDataGraph.hpp" #include "code/nmethod.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/gcLocker.hpp" #include "gc/shared/gcVMOperations.hpp" #include "gc/shared/isGCActiveMark.hpp" @@ -1320,6 +1321,9 @@ void ZGenerationOld::process_non_strong_references() { // Process weak roots _weak_roots_processor.process_weak_roots(); + ClassUnloadingContext ctx(_workers.active_workers(), + true /* lock_codeblob_free_separately */); + // Unlink stale metadata and nmethods _unload.unlink(); diff --git a/src/hotspot/share/gc/z/zNMethod.cpp b/src/hotspot/share/gc/z/zNMethod.cpp index e4c24660365..71d514face1 100644 --- a/src/hotspot/share/gc/z/zNMethod.cpp +++ b/src/hotspot/share/gc/z/zNMethod.cpp @@ -28,6 +28,7 @@ #include "code/icBuffer.hpp" #include "gc/shared/barrierSet.hpp" #include "gc/shared/barrierSetNMethod.hpp" +#include "gc/shared/classUnloadingContext.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "gc/z/zAddress.hpp" #include "gc/z/zArray.inline.hpp" @@ -443,5 +444,5 @@ void ZNMethod::unlink(ZWorkers* workers, bool unloading_occurred) { } void ZNMethod::purge() { - CodeCache::flush_unlinked_nmethods(); + ClassUnloadingContext::context()->purge_and_free_nmethods(); } From 835d016ade123a053c283589da472a96a4afae1e Mon Sep 17 00:00:00 2001 From: Gui Cao Date: Sun, 5 May 2024 04:27:07 +0000 Subject: [PATCH 05/19] 8330094: RISC-V: Save and restore FRM in the call stub Reviewed-by: fyang Backport-of: b0496096dc8d7dc7acf28aa006141a3ecea446de --- src/hotspot/cpu/riscv/frame_riscv.hpp | 2 +- src/hotspot/cpu/riscv/riscv_v.ad | 9 ------- src/hotspot/cpu/riscv/stubGenerator_riscv.cpp | 27 ++++++++++++++++--- 3 files changed, 25 insertions(+), 13 deletions(-) diff --git a/src/hotspot/cpu/riscv/frame_riscv.hpp b/src/hotspot/cpu/riscv/frame_riscv.hpp index ae6242a75bd..74425d23d7b 100644 --- a/src/hotspot/cpu/riscv/frame_riscv.hpp +++ b/src/hotspot/cpu/riscv/frame_riscv.hpp @@ -131,7 +131,7 @@ // Entry frames // n.b. these values are determined by the layout defined in // stubGenerator for the Java call stub - entry_frame_after_call_words = 34, + entry_frame_after_call_words = 35, entry_frame_call_wrapper_offset = -10, // we don't need a save area diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad index 06826ecf046..145453bb538 100644 --- a/src/hotspot/cpu/riscv/riscv_v.ad +++ b/src/hotspot/cpu/riscv/riscv_v.ad @@ -2942,7 +2942,6 @@ instruct vloadcon(vReg dst, immI0 src) %{ __ vsetvli_helper(bt, Matcher::vector_length(this)); __ vid_v(as_VectorRegister($dst$$reg)); if (is_floating_point_type(bt)) { - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg)); } %} @@ -3156,7 +3155,6 @@ instruct vcvtBtoX(vReg dst, vReg src) %{ if (is_floating_point_type(bt)) { __ integer_extend_v(as_VectorRegister($dst$$reg), bt == T_FLOAT ? T_INT : T_LONG, Matcher::vector_length(this), as_VectorRegister($src$$reg), T_BYTE); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg)); } else { __ integer_extend_v(as_VectorRegister($dst$$reg), bt, @@ -3203,7 +3201,6 @@ instruct vcvtStoX_fp_extend(vReg dst, vReg src) %{ __ integer_extend_v(as_VectorRegister($dst$$reg), (bt == T_FLOAT ? T_INT : T_LONG), Matcher::vector_length(this), as_VectorRegister($src$$reg), T_SHORT); __ vsetvli_helper(bt, Matcher::vector_length(this)); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg)); %} ins_pipe(pipe_slow); @@ -3242,7 +3239,6 @@ instruct vcvtItoF(vReg dst, vReg src) %{ format %{ "vcvtItoF $dst, $src" %} ins_encode %{ __ vsetvli_helper(T_FLOAT, Matcher::vector_length(this)); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); %} ins_pipe(pipe_slow); @@ -3255,7 +3251,6 @@ instruct vcvtItoD(vReg dst, vReg src) %{ format %{ "vcvtItoD $dst, $src" %} ins_encode %{ __ vsetvli_helper(T_INT, Matcher::vector_length(this), Assembler::mf2); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfwcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); %} ins_pipe(pipe_slow); @@ -3283,7 +3278,6 @@ instruct vcvtLtoF(vReg dst, vReg src) %{ format %{ "vcvtLtoF $dst, $src" %} ins_encode %{ __ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfncvt_f_x_w(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); %} ins_pipe(pipe_slow); @@ -3295,7 +3289,6 @@ instruct vcvtLtoD(vReg dst, vReg src) %{ format %{ "vcvtLtoD $dst, $src" %} ins_encode %{ __ vsetvli_helper(T_DOUBLE, Matcher::vector_length(this)); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfcvt_f_x_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); %} ins_pipe(pipe_slow); @@ -3353,7 +3346,6 @@ instruct vcvtFtoD(vReg dst, vReg src) %{ format %{ "vcvtFtoD $dst, $src" %} ins_encode %{ __ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfwcvt_f_f_v(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); %} ins_pipe(pipe_slow); @@ -3401,7 +3393,6 @@ instruct vcvtDtoF(vReg dst, vReg src) %{ format %{ "vcvtDtoF $dst, $src" %} ins_encode %{ __ vsetvli_helper(T_FLOAT, Matcher::vector_length(this), Assembler::mf2); - __ csrwi(CSR_FRM, C2_MacroAssembler::rne); __ vfncvt_f_f_w(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg)); %} ins_pipe(pipe_slow); diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp index aab65019619..8c5e1c097ef 100644 --- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp +++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp @@ -126,8 +126,9 @@ class StubGenerator: public StubCodeGenerator { // [ return_from_Java ] <--- sp // [ argument word n ] // ... - // -34 [ argument word 1 ] - // -33 [ saved f27 ] <--- sp_after_call + // -35 [ argument word 1 ] + // -34 [ saved FRM in Floating-point Control and Status Register ] <--- sp_after_call + // -33 [ saved f27 ] // -32 [ saved f26 ] // -31 [ saved f25 ] // -30 [ saved f24 ] @@ -164,8 +165,9 @@ class StubGenerator: public StubCodeGenerator { // Call stub stack layout word offsets from fp enum call_stub_layout { - sp_after_call_off = -33, + sp_after_call_off = -34, + frm_off = sp_after_call_off, f27_off = -33, f26_off = -32, f25_off = -31, @@ -213,6 +215,7 @@ class StubGenerator: public StubCodeGenerator { const Address sp_after_call (fp, sp_after_call_off * wordSize); + const Address frm_save (fp, frm_off * wordSize); const Address call_wrapper (fp, call_wrapper_off * wordSize); const Address result (fp, result_off * wordSize); const Address result_type (fp, result_type_off * wordSize); @@ -295,6 +298,16 @@ class StubGenerator: public StubCodeGenerator { __ fsd(f26, f26_save); __ fsd(f27, f27_save); + __ frrm(t0); + __ sd(t0, frm_save); + // Set frm to the state we need. We do want Round to Nearest. We + // don't want non-IEEE rounding modes. + Label skip_fsrmi; + guarantee(__ RoundingMode::rne == 0, "must be"); + __ beqz(t0, skip_fsrmi); + __ fsrmi(__ RoundingMode::rne); + __ bind(skip_fsrmi); + // install Java thread in global register now we have saved // whatever value it held __ mv(xthread, c_rarg7); @@ -414,6 +427,14 @@ class StubGenerator: public StubCodeGenerator { __ ld(x9, x9_save); + // restore frm + Label skip_fsrm; + __ ld(t0, frm_save); + __ frrm(t1); + __ beq(t0, t1, skip_fsrm); + __ fsrm(t0); + __ bind(skip_fsrm); + __ ld(c_rarg0, call_wrapper); __ ld(c_rarg1, result); __ ld(c_rarg2, result_type); From d459ae9dc922d045ea38837f260f60e685e3b368 Mon Sep 17 00:00:00 2001 From: Joachim Kern Date: Mon, 6 May 2024 06:30:01 +0000 Subject: [PATCH 06/19] 8329850: [AIX] Allow loading of different members of same shared library archive Backport-of: cfd19f017681a7aded67937c5132263bbcc7be6f --- src/hotspot/os/aix/porting_aix.cpp | 31 +++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/src/hotspot/os/aix/porting_aix.cpp b/src/hotspot/os/aix/porting_aix.cpp index 68233097b49..630bdf22c44 100644 --- a/src/hotspot/os/aix/porting_aix.cpp +++ b/src/hotspot/os/aix/porting_aix.cpp @@ -906,10 +906,11 @@ struct TableLocker { ~TableLocker() { pthread_mutex_unlock(&g_handletable_mutex); } }; struct handletableentry{ - void* handle; - ino64_t inode; - dev64_t devid; - uint refcount; + void* handle; + ino64_t inode; + dev64_t devid; + char* member; + uint refcount; }; constexpr unsigned init_num_handles = 128; static unsigned max_handletable = 0; @@ -1049,6 +1050,14 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) { return nullptr; } else { + // extract member string if exist duplicate it and store pointer of it + // if member does not exist store nullptr + char* member = nullptr; + const char* substr; + if (filename[strlen(filename) - 1] == ')' && (substr = strrchr(filename, '('))) { + member = os::strdup(substr); + } + unsigned i = 0; TableLocker lock; // check if library belonging to filename is already loaded. @@ -1056,7 +1065,10 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) { for (i = 0; i < g_handletable_used; i++) { if ((p_handletable + i)->handle && (p_handletable + i)->inode == libstat.st_ino && - (p_handletable + i)->devid == libstat.st_dev) { + (p_handletable + i)->devid == libstat.st_dev && + (((p_handletable + i)->member == nullptr && member == nullptr) || + ((p_handletable + i)->member != nullptr && member != nullptr && + strcmp((p_handletable + i)->member, member) == 0))) { (p_handletable + i)->refcount++; result = (p_handletable + i)->handle; break; @@ -1084,6 +1096,7 @@ void* Aix_dlopen(const char* filename, int Flags, const char** error_report) { (p_handletable + i)->handle = result; (p_handletable + i)->inode = libstat.st_ino; (p_handletable + i)->devid = libstat.st_dev; + (p_handletable + i)->member = member; (p_handletable + i)->refcount = 1; } else { @@ -1131,7 +1144,7 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) { // while in the second case we simply have to nag. res = (0 == ::dlclose(libhandle)); if (!res) { - // error analysis when dlopen fails + // error analysis when dlclose fails const char* error_report = ::dlerror(); if (error_report == nullptr) { error_report = "dlerror returned no error description"; @@ -1145,7 +1158,11 @@ bool os::pd_dll_unload(void* libhandle, char* ebuf, int ebuflen) { if (i < g_handletable_used) { if (res) { // First case: libhandle was found (with refcount == 0) and ::dlclose successful, - // so delete entry from array + // so delete entry from array (do not forget to free member-string space if member exists) + if ((p_handletable + i)->member) { + os::free((p_handletable + i)->member); + (p_handletable + i)->member = nullptr; + } g_handletable_used--; // If the entry was the last one of the array, the previous g_handletable_used-- // is sufficient to remove the entry from the array, otherwise we move the last From 021372c76d583e29cf2abf4b932e31321adf4295 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Mon, 6 May 2024 08:07:34 +0000 Subject: [PATCH 07/19] 8328703: Illegal accesses in Java_jdk_internal_org_jline_terminal_impl_jna_linux_CLibraryImpl_ioctl0 Backport-of: 87e864bf21d71daae4e001ec4edbb4ef1f60c36d --- .../linux/native/lible/CLibrary.cpp | 16 ++++++++-------- .../macosx/native/lible/CLibrary.cpp | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/jdk.internal.le/linux/native/lible/CLibrary.cpp b/src/jdk.internal.le/linux/native/lible/CLibrary.cpp index 7815fe0cd2e..3215e1f5013 100644 --- a/src/jdk.internal.le/linux/native/lible/CLibrary.cpp +++ b/src/jdk.internal.le/linux/native/lible/CLibrary.cpp @@ -150,20 +150,20 @@ JNIEXPORT void JNICALL Java_jdk_internal_org_jline_terminal_impl_jna_linux_CLibr (JNIEnv *env, jobject, jint fd, jint cmd, jobject data) { winsize ws; - ws.ws_row = env->GetIntField(data, ws_row); - ws.ws_col = env->GetIntField(data, ws_col); - ws.ws_xpixel = env->GetIntField(data, ws_xpixel); - ws.ws_ypixel = env->GetIntField(data, ws_ypixel); + ws.ws_row = env->GetShortField(data, ws_row); + ws.ws_col = env->GetShortField(data, ws_col); + ws.ws_xpixel = env->GetShortField(data, ws_xpixel); + ws.ws_ypixel = env->GetShortField(data, ws_ypixel); if (ioctl(fd, cmd, &ws) != 0) { throw_errno(env); return ; } - env->SetIntField(data, ws_row, ws.ws_row); - env->SetIntField(data, ws_col, ws.ws_col); - env->SetIntField(data, ws_xpixel, ws.ws_xpixel); - env->SetIntField(data, ws_ypixel, ws.ws_ypixel); + env->SetShortField(data, ws_row, ws.ws_row); + env->SetShortField(data, ws_col, ws.ws_col); + env->SetShortField(data, ws_xpixel, ws.ws_xpixel); + env->SetShortField(data, ws_ypixel, ws.ws_ypixel); } /* diff --git a/src/jdk.internal.le/macosx/native/lible/CLibrary.cpp b/src/jdk.internal.le/macosx/native/lible/CLibrary.cpp index 760e090f5e8..3bc481f4afa 100644 --- a/src/jdk.internal.le/macosx/native/lible/CLibrary.cpp +++ b/src/jdk.internal.le/macosx/native/lible/CLibrary.cpp @@ -154,20 +154,20 @@ JNIEXPORT void JNICALL Java_jdk_internal_org_jline_terminal_impl_jna_osx_CLibrar (JNIEnv *env, jobject, jint fd, jlong cmd, jobject data) { winsize ws; - ws.ws_row = env->GetIntField(data, ws_row); - ws.ws_col = env->GetIntField(data, ws_col); - ws.ws_xpixel = env->GetIntField(data, ws_xpixel); - ws.ws_ypixel = env->GetIntField(data, ws_ypixel); + ws.ws_row = env->GetShortField(data, ws_row); + ws.ws_col = env->GetShortField(data, ws_col); + ws.ws_xpixel = env->GetShortField(data, ws_xpixel); + ws.ws_ypixel = env->GetShortField(data, ws_ypixel); if (ioctl(fd, cmd, &ws) != 0) { throw_errno(env); return ; } - env->SetIntField(data, ws_row, ws.ws_row); - env->SetIntField(data, ws_col, ws.ws_col); - env->SetIntField(data, ws_xpixel, ws.ws_xpixel); - env->SetIntField(data, ws_ypixel, ws.ws_ypixel); + env->SetShortField(data, ws_row, ws.ws_row); + env->SetShortField(data, ws_col, ws.ws_col); + env->SetShortField(data, ws_xpixel, ws.ws_xpixel); + env->SetShortField(data, ws_ypixel, ws.ws_ypixel); } /* From 3770c28c05f279ed1a3854c6f70e7e1bbee6ff0e Mon Sep 17 00:00:00 2001 From: SendaoYan Date: Mon, 6 May 2024 08:13:04 +0000 Subject: [PATCH 08/19] 8331331: :tier1 target explanation in doc/testing.md is incorrect Backport-of: 04271dfe7a262379944e2a2cf83a98a3a1b78a74 --- doc/testing.html | 5 +++-- doc/testing.md | 10 +++++----- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/testing.html b/doc/testing.html index 19d937df1ea..f13400920c6 100644 --- a/doc/testing.html +++ b/doc/testing.html @@ -179,8 +179,9 @@

Test selection

The test specifications given in TEST is parsed into fully qualified test descriptors, which clearly and unambigously show which tests will be run. As an example, :tier1 will expand -to -jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 jtreg:$(TOPDIR)/test/nashorn:tier1 jtreg:$(TOPDIR)/test/jaxp:tier1. +to include all subcomponent test directories that define `tier1`, +for example: +jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 .... You can always submit a list of fully qualified test descriptors in the TEST variable if you want to shortcut the parser.

Common Test Groups

diff --git a/doc/testing.md b/doc/testing.md index 9756a691a8c..bc154e40ae7 100644 --- a/doc/testing.md +++ b/doc/testing.md @@ -102,11 +102,11 @@ test runs, the `test TEST="x"` solution needs to be used. The test specifications given in `TEST` is parsed into fully qualified test descriptors, which clearly and unambigously show which tests will be run. As an -example, `:tier1` will expand to `jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 -jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 -jtreg:$(TOPDIR)/test/nashorn:tier1 jtreg:$(TOPDIR)/test/jaxp:tier1`. You can -always submit a list of fully qualified test descriptors in the `TEST` variable -if you want to shortcut the parser. +example, `:tier1` will expand to include all subcomponent test directories +that define `tier1`, for example: `jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 +jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 ...`. You +can always submit a list of fully qualified test descriptors in the `TEST` +variable if you want to shortcut the parser. ### Common Test Groups From abbad928945d0679745e0997d804c44fe394d3db Mon Sep 17 00:00:00 2001 From: Amit Kumar Date: Mon, 6 May 2024 08:15:30 +0000 Subject: [PATCH 09/19] 8326201: [S390] Need to bailout cleanly if creation of stubs fails when code cache is out of space Backport-of: d5f3d5c8cc347ae384dea25b1a55ed57204d1af3 --- src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp | 5 +++-- src/hotspot/cpu/s390/s390.ad | 4 +++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp index 200f7ee978d..b7f1d360568 100644 --- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp @@ -1,6 +1,6 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2018 SAP SE. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -428,6 +428,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) { "must be aligned"); ce->emit_static_call_stub(); + CHECK_BAILOUT(); // Prepend each BRASL with a nop. __ relocate(relocInfo::static_call_type); diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad index e1d3df97edf..32e5323b6b2 100644 --- a/src/hotspot/cpu/s390/s390.ad +++ b/src/hotspot/cpu/s390/s390.ad @@ -1,6 +1,6 @@ // // Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. -// Copyright (c) 2017, 2022 SAP SE. All rights reserved. +// Copyright (c) 2017, 2024 SAP SE. All rights reserved. // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. // // This code is free software; you can redistribute it and/or modify it @@ -1447,6 +1447,7 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) { address base = __ start_a_stub(size_exception_handler()); if (base == NULL) { + ciEnv::current()->record_failure("CodeCache is full"); return 0; // CodeBuffer::expand failed } @@ -1468,6 +1469,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { address base = __ start_a_stub(size_deopt_handler()); if (base == NULL) { + ciEnv::current()->record_failure("CodeCache is full"); return 0; // CodeBuffer::expand failed } From 3ff535969372b5d01517eff3d03528df13bf300e Mon Sep 17 00:00:00 2001 From: Amit Kumar Date: Mon, 6 May 2024 08:17:00 +0000 Subject: [PATCH 10/19] 8330011: [s390x] update block-comments to make code consistent Reviewed-by: lucy Backport-of: 01bda278d6a498ca89c0bc5218680cd51a04e9d3 --- src/hotspot/cpu/s390/downcallLinker_s390.cpp | 34 +++++++++++--------- src/hotspot/cpu/s390/upcallLinker_s390.cpp | 24 +++++++------- 2 files changed, 30 insertions(+), 28 deletions(-) diff --git a/src/hotspot/cpu/s390/downcallLinker_s390.cpp b/src/hotspot/cpu/s390/downcallLinker_s390.cpp index f831da90755..dc443666bae 100644 --- a/src/hotspot/cpu/s390/downcallLinker_s390.cpp +++ b/src/hotspot/cpu/s390/downcallLinker_s390.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2020, Red Hat, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -166,10 +166,10 @@ void DowncallStubGenerator::generate() { locs.set(StubLocations::TARGET_ADDRESS, _abi._scratch2); if (_captured_state_mask != 0) { - __ block_comment("{ _captured_state_mask is set"); + __ block_comment("_captured_state_mask_is_set {"); locs.set_frame_data(StubLocations::CAPTURED_STATE_BUFFER, allocated_frame_size); allocated_frame_size += BytesPerWord; - __ block_comment("} _captured_state_mask is set"); + __ block_comment("} _captured_state_mask_is_set"); } allocated_frame_size = align_up(allocated_frame_size, StackAlignmentInBytes); @@ -184,7 +184,7 @@ void DowncallStubGenerator::generate() { _frame_complete = __ pc() - start; // frame build complete. if (_needs_transition) { - __ block_comment("{ thread java2native"); + __ block_comment("thread_java2native {"); __ get_PC(Z_R1_scratch); address the_pc = __ pc(); __ set_last_Java_frame(Z_SP, Z_R1_scratch); @@ -194,18 +194,18 @@ void DowncallStubGenerator::generate() { // State transition __ set_thread_state(_thread_in_native); - __ block_comment("} thread java2native"); + __ block_comment("} thread_java2native"); } - __ block_comment("{ argument shuffle"); + __ block_comment("argument_shuffle {"); arg_shuffle.generate(_masm, shuffle_reg, frame::z_jit_out_preserve_size, _abi._shadow_space_bytes, locs); - __ block_comment("} argument shuffle"); + __ block_comment("} argument_shuffle"); __ call(as_Register(locs.get(StubLocations::TARGET_ADDRESS))); ////////////////////////////////////////////////////////////////////////////// if (_captured_state_mask != 0) { - __ block_comment("{ save thread local"); + __ block_comment("save_thread_local {"); out_reg_spiller.generate_spill(_masm, spill_offset); @@ -216,7 +216,7 @@ void DowncallStubGenerator::generate() { out_reg_spiller.generate_fill(_masm, spill_offset); - __ block_comment("} save thread local"); + __ block_comment("} save_thread_local"); } ////////////////////////////////////////////////////////////////////////////// @@ -227,7 +227,7 @@ void DowncallStubGenerator::generate() { Label L_after_reguard; if (_needs_transition) { - __ block_comment("{ thread native2java"); + __ block_comment("thread_native2java {"); __ set_thread_state(_thread_in_native_trans); if (!UseSystemMemoryBarrier) { @@ -244,14 +244,16 @@ void DowncallStubGenerator::generate() { // change thread state __ set_thread_state(_thread_in_Java); - __ block_comment("reguard stack check"); - __ z_cli(Address(Z_thread, JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(StackOverflow::StackGuardState) - 1)), - StackOverflow::stack_guard_yellow_reserved_disabled); + __ block_comment("reguard_stack_check {"); + __ z_cli(Address(Z_thread, + JavaThread::stack_guard_state_offset() + in_ByteSize(sizeof(StackOverflow::StackGuardState) - 1)), + StackOverflow::stack_guard_yellow_reserved_disabled); __ z_bre(L_reguard); + __ block_comment("} reguard_stack_check"); __ bind(L_after_reguard); __ reset_last_Java_frame(); - __ block_comment("} thread native2java"); + __ block_comment("} thread_native2java"); } __ pop_frame(); @@ -261,7 +263,7 @@ void DowncallStubGenerator::generate() { ////////////////////////////////////////////////////////////////////////////// if (_needs_transition) { - __ block_comment("{ L_safepoint_poll_slow_path"); + __ block_comment("L_safepoint_poll_slow_path {"); __ bind(L_safepoint_poll_slow_path); // Need to save the native result registers around any runtime calls. @@ -277,7 +279,7 @@ void DowncallStubGenerator::generate() { __ block_comment("} L_safepoint_poll_slow_path"); ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ L_reguard"); + __ block_comment("L_reguard {"); __ bind(L_reguard); // Need to save the native result registers around any runtime calls. diff --git a/src/hotspot/cpu/s390/upcallLinker_s390.cpp b/src/hotspot/cpu/s390/upcallLinker_s390.cpp index b748ec547cc..93f21ab10b6 100644 --- a/src/hotspot/cpu/s390/upcallLinker_s390.cpp +++ b/src/hotspot/cpu/s390/upcallLinker_s390.cpp @@ -63,7 +63,7 @@ static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDesc int offset = reg_save_area_offset; - __ block_comment("{ preserve_callee_saved_regs "); + __ block_comment("preserve_callee_saved_regs {"); for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); // Z_SP saved/restored by prologue/epilogue @@ -82,7 +82,7 @@ static void preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDesc } } - __ block_comment("} preserve_callee_saved_regs "); + __ block_comment("} preserve_callee_saved_regs"); } static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) { @@ -92,7 +92,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr int offset = reg_save_area_offset; - __ block_comment("{ restore_callee_saved_regs "); + __ block_comment("restore_callee_saved_regs {"); for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); // Z_SP saved/restored by prologue/epilogue @@ -111,7 +111,7 @@ static void restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescr } } - __ block_comment("} restore_callee_saved_regs "); + __ block_comment("} restore_callee_saved_regs"); } static const int upcall_stub_code_base_size = 1024; // depends on GC (resolve_jobject) @@ -199,7 +199,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry, // Java methods won't preserve them, so save them here: preserve_callee_saved_registers(_masm, abi, reg_save_area_offset); - __ block_comment("{ on_entry"); + __ block_comment("on_entry {"); __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_entry)); __ z_aghik(Z_ARG1, Z_SP, frame_data_offset); __ call(call_target_address); @@ -207,14 +207,14 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry, __ block_comment("} on_entry"); arg_spiller.generate_fill(_masm, arg_save_area_offset); - __ block_comment("{ argument shuffle"); + __ block_comment("argument_shuffle {"); arg_shuffle.generate(_masm, shuffle_reg, abi._shadow_space_bytes, frame::z_jit_out_preserve_size, locs); - __ block_comment("} argument shuffle"); + __ block_comment("} argument_shuffle"); - __ block_comment("{ receiver "); + __ block_comment("receiver {"); __ load_const_optimized(Z_ARG1, (intptr_t)receiver); __ resolve_jobject(Z_ARG1, Z_tmp_1, Z_tmp_2); - __ block_comment("} receiver "); + __ block_comment("} receiver"); __ load_const_optimized(Z_method, (intptr_t)entry); __ z_stg(Z_method, Address(Z_thread, in_bytes(JavaThread::callee_target_offset()))); @@ -250,7 +250,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry, result_spiller.generate_spill(_masm, res_save_area_offset); - __ block_comment("{ on_exit"); + __ block_comment("on_exit {"); __ load_const_optimized(call_target_address, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::on_exit)); __ z_aghik(Z_ARG1, Z_SP, frame_data_offset); __ call(call_target_address); @@ -266,7 +266,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry, ////////////////////////////////////////////////////////////////////////////// - __ block_comment("{ exception handler"); + __ block_comment("exception_handler {"); intptr_t exception_handler_offset = __ pc() - start; @@ -277,7 +277,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry, __ call_c(call_target_address); __ should_not_reach_here(); - __ block_comment("} exception handler"); + __ block_comment("} exception_handler"); _masm->flush(); From 915988281c47bd03d334d98701fcbabda67318a7 Mon Sep 17 00:00:00 2001 From: Amit Kumar Date: Mon, 6 May 2024 08:27:43 +0000 Subject: [PATCH 11/19] 8310513: [s390x] Intrinsify recursive ObjectMonitor locking Reviewed-by: lucy Backport-of: 47df14590c003ccb1607ec0edfe999fcf2aebd86 --- src/hotspot/cpu/s390/macroAssembler_s390.cpp | 110 +++++++++++-------- 1 file changed, 64 insertions(+), 46 deletions(-) diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp index 8a56f3e4c2b..2dc3167dea6 100644 --- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp +++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. - * Copyright (c) 2016, 2023 SAP SE. All rights reserved. + * Copyright (c) 2016, 2024 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -3149,28 +3149,32 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis Register temp = temp2; NearLabel done, object_has_monitor; + const int hdr_offset = oopDesc::mark_offset_in_bytes(); + + assert_different_registers(temp1, temp2, oop, box); + BLOCK_COMMENT("compiler_fast_lock_object {"); // Load markWord from oop into mark. - z_lg(displacedHeader, 0, oop); + z_lg(displacedHeader, hdr_offset, oop); if (DiagnoseSyncOnValueBasedClasses != 0) { - load_klass(Z_R1_scratch, oop); - z_l(Z_R1_scratch, Address(Z_R1_scratch, Klass::access_flags_offset())); + load_klass(temp, oop); + z_l(temp, Address(temp, Klass::access_flags_offset())); assert((JVM_ACC_IS_VALUE_BASED_CLASS & 0xFFFF) == 0, "or change following instruction"); - z_nilh(Z_R1_scratch, JVM_ACC_IS_VALUE_BASED_CLASS >> 16); + z_nilh(temp, JVM_ACC_IS_VALUE_BASED_CLASS >> 16); z_brne(done); } // Handle existing monitor. // The object has an existing monitor iff (mark & monitor_value) != 0. guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word"); - z_lgr(temp, displacedHeader); - z_nill(temp, markWord::monitor_value); - z_brne(object_has_monitor); + z_tmll(displacedHeader, markWord::monitor_value); + z_brnaz(object_has_monitor); if (LockingMode == LM_MONITOR) { // Set NE to indicate 'failure' -> take slow-path + // From loading the markWord, we know that oop != nullptr z_ltgr(oop, oop); z_bru(done); } else if (LockingMode == LM_LEGACY) { @@ -3182,23 +3186,24 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis // Initialize the box (must happen before we update the object mark). z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box); - // Memory Fence (in cmpxchgd) - // Compare object markWord with mark and if equal exchange scratch1 with object markWord. - - // If the compare-and-swap succeeded, then we found an unlocked object and we - // have now locked it. - z_csg(displacedHeader, box, 0, oop); + // Compare object markWord with mark and if equal, exchange box with object markWork. + // If the compare-and-swap succeeds, then we found an unlocked object and have now locked it. + z_csg(displacedHeader, box, hdr_offset, oop); assert(currentHeader == displacedHeader, "must be same register"); // Identified two registers from z/Architecture. z_bre(done); - // We did not see an unlocked object so try the fast recursive case. - + // We did not see an unlocked object + // currentHeader contains what is currently stored in the oop's markWord. + // We might have a recursive case. Verify by checking if the owner is self. + // To do so, compare the value in the markWord (currentHeader) with the stack pointer. z_sgr(currentHeader, Z_SP); load_const_optimized(temp, (~(os::vm_page_size() - 1) | markWord::lock_mask_in_place)); z_ngr(currentHeader, temp); - // z_brne(done); - // z_release(); + + // result zero: owner is self -> recursive lock. Indicate that by storing 0 in the box. + // result not-zero: attempt failed. We don't hold the lock -> go for slow case. + z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box); z_bru(done); @@ -3208,28 +3213,34 @@ void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Regis z_bru(done); } + bind(object_has_monitor); + Register zero = temp; Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value. - bind(object_has_monitor); // The object's monitor m is unlocked iff m->owner is null, // otherwise m->owner may contain a thread or a stack address. - // + // Try to CAS m->owner from null to current thread. - z_lghi(zero, 0); // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ. + // Otherwise, register zero is filled with the current owner. + z_lghi(zero, 0); z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged); if (LockingMode != LM_LIGHTWEIGHT) { // Store a non-null value into the box. z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box); } -#ifdef ASSERT - z_brne(done); - // We've acquired the monitor, check some invariants. - // Invariant 1: _recursions should be 0. - asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged, - "monitor->_recursions should be 0", -1); - z_ltgr(zero, zero); // Set CR=EQ. -#endif + + z_bre(done); // acquired the lock for the first time. + + BLOCK_COMMENT("fast_path_recursive_lock {"); + // Check if we are already the owner (recursive lock) + z_cgr(Z_thread, zero); // owner is stored in zero by "z_csg" above + z_brne(done); // not a recursive lock + + // Current thread already owns the lock. Just increment recursion count. + z_agsi(Address(monitor_tagged, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 1ll); + z_cgr(zero, zero); // set the CC to EQUAL + BLOCK_COMMENT("} fast_path_recursive_lock"); bind(done); BLOCK_COMMENT("} compiler_fast_lock_object"); @@ -3242,11 +3253,12 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg Register displacedHeader = temp1; Register currentHeader = temp2; Register temp = temp1; - Register monitor = temp2; const int hdr_offset = oopDesc::mark_offset_in_bytes(); - Label done, object_has_monitor; + assert_different_registers(temp1, temp2, oop, box); + + Label done, object_has_monitor, not_recursive; BLOCK_COMMENT("compiler_fast_unlock_object {"); @@ -3261,30 +3273,25 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg // The object has an existing monitor iff (mark & monitor_value) != 0. z_lg(currentHeader, hdr_offset, oop); guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word"); - if (LockingMode == LM_LIGHTWEIGHT) { - z_lgr(temp, currentHeader); - } - z_nill(currentHeader, markWord::monitor_value); - z_brne(object_has_monitor); + + z_tmll(currentHeader, markWord::monitor_value); + z_brnaz(object_has_monitor); if (LockingMode == LM_MONITOR) { // Set NE to indicate 'failure' -> take slow-path z_ltgr(oop, oop); z_bru(done); } else if (LockingMode == LM_LEGACY) { - // Check if it is still a light weight lock, this is true if we see + // Check if it is still a lightweight lock, this is true if we see // the stack address of the basicLock in the markWord of the object // copy box to currentHeader such that csg does not kill it. z_lgr(currentHeader, box); - z_csg(currentHeader, displacedHeader, 0, oop); + z_csg(currentHeader, displacedHeader, hdr_offset, oop); z_bru(done); // csg sets CR as desired. } else { assert(LockingMode == LM_LIGHTWEIGHT, "must be"); - // don't load currentHead again from stack-top after monitor check, as it is possible - // some other thread modified it. - // currentHeader is altered, but it's contents are copied in temp as well - lightweight_unlock(oop, temp, currentHeader, done); + lightweight_unlock(oop, currentHeader, displacedHeader, done); z_bru(done); } @@ -3293,11 +3300,22 @@ void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Reg // Handle existing monitor. bind(object_has_monitor); - z_lg(currentHeader, hdr_offset, oop); // CurrentHeader is tagged with monitor_value set. - load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); - z_brne(done); - load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); + + z_cg(Z_thread, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner))); z_brne(done); + + BLOCK_COMMENT("fast_path_recursive_unlock {"); + load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions))); + z_bre(not_recursive); // if 0 then jump, it's not recursive locking + + // Recursive inflated unlock + z_agsi(Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), -1ll); + z_cgr(currentHeader, currentHeader); // set the CC to EQUAL + BLOCK_COMMENT("} fast_path_recursive_unlock"); + z_bru(done); + + bind(not_recursive); + load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList))); z_brne(done); load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq))); From 2b858f5bdee5bcb29ef6ecf995400b390a2cb91f Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Mon, 6 May 2024 08:37:55 +0000 Subject: [PATCH 12/19] 8328938: C2 SuperWord: disable vectorization for large stride and scale Reviewed-by: epeter, simonis Backport-of: 2931458711244e20eb7845a1aefcf6ed4206bce1 --- src/hotspot/share/opto/superword.cpp | 19 ++ .../superword/TestLargeScaleAndStride.java | 253 ++++++++++++++++++ 2 files changed, 272 insertions(+) create mode 100644 test/hotspot/jtreg/compiler/loopopts/superword/TestLargeScaleAndStride.java diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp index 6a641a44b32..cb1a2a769fc 100644 --- a/src/hotspot/share/opto/superword.cpp +++ b/src/hotspot/share/opto/superword.cpp @@ -4200,6 +4200,25 @@ SWPointer::SWPointer(MemNode* mem, SuperWord* slp, Node_Stack *nstack, bool anal NOT_PRODUCT(if(_slp->is_trace_alignment()) _tracer.restore_depth();) NOT_PRODUCT(_tracer.ctor_6(mem);) + // In the pointer analysis, and especially the AlignVector, analysis we assume that + // stride and scale are not too large. For example, we multiply "scale * stride", + // and assume that this does not overflow the int range. We also take "abs(scale)" + // and "abs(stride)", which would overflow for min_int = -(2^31). Still, we want + // to at least allow small and moderately large stride and scale. Therefore, we + // allow values up to 2^30, which is only a factor 2 smaller than the max/min int. + // Normal performance relevant code will have much lower values. And the restriction + // allows us to keep the rest of the autovectorization code much simpler, since we + // do not have to deal with overflows. + jlong long_scale = _scale; + jlong long_stride = slp->lp()->stride_is_con() ? slp->iv_stride() : 0; + jlong max_val = 1 << 30; + if (abs(long_scale) >= max_val || + abs(long_stride) >= max_val || + abs(long_scale * long_stride) >= max_val) { + assert(!valid(), "adr stride*scale is too large"); + return; + } + _base = base; _adr = adr; assert(valid(), "Usable"); diff --git a/test/hotspot/jtreg/compiler/loopopts/superword/TestLargeScaleAndStride.java b/test/hotspot/jtreg/compiler/loopopts/superword/TestLargeScaleAndStride.java new file mode 100644 index 00000000000..cfb2931d928 --- /dev/null +++ b/test/hotspot/jtreg/compiler/loopopts/superword/TestLargeScaleAndStride.java @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test id=vanilla + * @bug 8328938 + * @summary Test autovectorization with large scale and stride + * @modules java.base/jdk.internal.misc + * @library /test/lib / + * @run main compiler.loopopts.superword.TestLargeScaleAndStride + */ + +/* + * @test id=AlignVector + * @bug 8328938 + * @modules java.base/jdk.internal.misc + * @library /test/lib / + * @requires vm.compiler2.enabled + * @run main/othervm -XX:+AlignVector compiler.loopopts.superword.TestLargeScaleAndStride + */ + +package compiler.loopopts.superword; + +import jdk.internal.misc.Unsafe; + +public class TestLargeScaleAndStride { + static final Unsafe UNSAFE = Unsafe.getUnsafe(); + static int RANGE = 100_000; + + public static void main(String[] args) { + byte[] a = new byte[100]; + fill(a); + + byte[] gold1a = a.clone(); + byte[] gold1b = a.clone(); + byte[] gold2a = a.clone(); + byte[] gold2b = a.clone(); + byte[] gold2c = a.clone(); + byte[] gold2d = a.clone(); + byte[] gold3 = a.clone(); + test1a(gold1a); + test1b(gold1b); + test2a(gold2a); + test2b(gold2b); + test2c(gold2c); + test2d(gold2d); + test3(gold3); + + for (int i = 0; i < 100; i++) { + byte[] c = a.clone(); + test1a(c); + verify(c, gold1a); + } + + for (int i = 0; i < 100; i++) { + byte[] c = a.clone(); + test1b(c); + verify(c, gold1b); + } + + for (int i = 0; i < 100; i++) { + byte[] c = a.clone(); + test2a(c); + verify(c, gold2a); + } + + for (int i = 0; i < 100; i++) { + byte[] c = a.clone(); + test2b(c); + verify(c, gold2b); + } + + for (int i = 0; i < 100; i++) { + byte[] c = a.clone(); + test2c(c); + verify(c, gold2c); + } + + for (int i = 0; i < 100; i++) { + byte[] c = a.clone(); + test2d(c); + verify(c, gold2d); + } + + for (int i = 0; i < 100; i++) { + byte[] c = a.clone(); + test3(c); + verify(c, gold3); + } + } + + static void fill(byte[] a) { + for (int i = 0; i < a.length; i++) { + a[i] = (byte)i; + } + } + + static void verify(byte[] a, byte[] b) { + for (int i = 0; i < a.length; i++) { + if (a[i] != b[i]) { + throw new RuntimeException("wrong value: " + i + ": " + a[i] + " != " + b[i]); + } + } + } + + static void test1a(byte[] a) { + int scale = 1 << 31; + for (int i = 0; i < RANGE; i+=2) { + long base = UNSAFE.ARRAY_BYTE_BASE_OFFSET; + // i is a multiple of 2 + // 2 * (1 >> 31) -> overflow to zero + int j = scale * i; // always zero + byte v0 = UNSAFE.getByte(a, base + (int)(j + 0)); + byte v1 = UNSAFE.getByte(a, base + (int)(j + 1)); + byte v2 = UNSAFE.getByte(a, base + (int)(j + 2)); + byte v3 = UNSAFE.getByte(a, base + (int)(j + 3)); + UNSAFE.putByte(a, base + (int)(j + 0), (byte)(v0 + 1)); + UNSAFE.putByte(a, base + (int)(j + 1), (byte)(v1 + 1)); + UNSAFE.putByte(a, base + (int)(j + 2), (byte)(v2 + 1)); + UNSAFE.putByte(a, base + (int)(j + 3), (byte)(v3 + 1)); + } + } + + static void test1b(byte[] a) { + int scale = 1 << 31; + for (int i = RANGE-2; i >= 0; i-=2) { + long base = UNSAFE.ARRAY_BYTE_BASE_OFFSET; + // i is a multiple of 2 + // 2 * (1 >> 31) -> overflow to zero + int j = scale * i; // always zero + byte v0 = UNSAFE.getByte(a, base + (int)(j + 0)); + byte v1 = UNSAFE.getByte(a, base + (int)(j + 1)); + byte v2 = UNSAFE.getByte(a, base + (int)(j + 2)); + byte v3 = UNSAFE.getByte(a, base + (int)(j + 3)); + UNSAFE.putByte(a, base + (int)(j + 0), (byte)(v0 + 1)); + UNSAFE.putByte(a, base + (int)(j + 1), (byte)(v1 + 1)); + UNSAFE.putByte(a, base + (int)(j + 2), (byte)(v2 + 1)); + UNSAFE.putByte(a, base + (int)(j + 3), (byte)(v3 + 1)); + } + } + + static void test2a(byte[] a) { + int scale = 1 << 30; + for (int i = 0; i < RANGE; i+=4) { + long base = UNSAFE.ARRAY_BYTE_BASE_OFFSET; + // i is a multiple of 4 + // 4 * (1 >> 30) -> overflow to zero + int j = scale * i; // always zero + byte v0 = UNSAFE.getByte(a, base + (int)(j + 0)); + byte v1 = UNSAFE.getByte(a, base + (int)(j + 1)); + byte v2 = UNSAFE.getByte(a, base + (int)(j + 2)); + byte v3 = UNSAFE.getByte(a, base + (int)(j + 3)); + UNSAFE.putByte(a, base + (int)(j + 0), (byte)(v0 + 1)); + UNSAFE.putByte(a, base + (int)(j + 1), (byte)(v1 + 1)); + UNSAFE.putByte(a, base + (int)(j + 2), (byte)(v2 + 1)); + UNSAFE.putByte(a, base + (int)(j + 3), (byte)(v3 + 1)); + } + } + + + static void test2b(byte[] a) { + int scale = 1 << 30; + for (int i = RANGE-4; i >= 0; i-=4) { + long base = UNSAFE.ARRAY_BYTE_BASE_OFFSET; + // i is a multiple of 4 + // 4 * (1 >> 30) -> overflow to zero + int j = scale * i; // always zero + byte v0 = UNSAFE.getByte(a, base + (int)(j + 0)); + byte v1 = UNSAFE.getByte(a, base + (int)(j + 1)); + byte v2 = UNSAFE.getByte(a, base + (int)(j + 2)); + byte v3 = UNSAFE.getByte(a, base + (int)(j + 3)); + UNSAFE.putByte(a, base + (int)(j + 0), (byte)(v0 + 1)); + UNSAFE.putByte(a, base + (int)(j + 1), (byte)(v1 + 1)); + UNSAFE.putByte(a, base + (int)(j + 2), (byte)(v2 + 1)); + UNSAFE.putByte(a, base + (int)(j + 3), (byte)(v3 + 1)); + } + } + + static void test2c(byte[] a) { + int scale = -(1 << 30); + for (int i = 0; i < RANGE; i+=4) { + long base = UNSAFE.ARRAY_BYTE_BASE_OFFSET; + // i is a multiple of 4 + // 4 * (1 >> 30) -> overflow to zero + int j = scale * i; // always zero + byte v0 = UNSAFE.getByte(a, base + (int)(j + 0)); + byte v1 = UNSAFE.getByte(a, base + (int)(j + 1)); + byte v2 = UNSAFE.getByte(a, base + (int)(j + 2)); + byte v3 = UNSAFE.getByte(a, base + (int)(j + 3)); + UNSAFE.putByte(a, base + (int)(j + 0), (byte)(v0 + 1)); + UNSAFE.putByte(a, base + (int)(j + 1), (byte)(v1 + 1)); + UNSAFE.putByte(a, base + (int)(j + 2), (byte)(v2 + 1)); + UNSAFE.putByte(a, base + (int)(j + 3), (byte)(v3 + 1)); + } + } + + static void test2d(byte[] a) { + int scale = -(1 << 30); + for (int i = RANGE-4; i >= 0; i-=4) { + long base = UNSAFE.ARRAY_BYTE_BASE_OFFSET; + // i is a multiple of 4 + // 4 * (1 >> 30) -> overflow to zero + int j = scale * i; // always zero + byte v0 = UNSAFE.getByte(a, base + (int)(j + 0)); + byte v1 = UNSAFE.getByte(a, base + (int)(j + 1)); + byte v2 = UNSAFE.getByte(a, base + (int)(j + 2)); + byte v3 = UNSAFE.getByte(a, base + (int)(j + 3)); + UNSAFE.putByte(a, base + (int)(j + 0), (byte)(v0 + 1)); + UNSAFE.putByte(a, base + (int)(j + 1), (byte)(v1 + 1)); + UNSAFE.putByte(a, base + (int)(j + 2), (byte)(v2 + 1)); + UNSAFE.putByte(a, base + (int)(j + 3), (byte)(v3 + 1)); + } + } + + static void test3(byte[] a) { + int scale = 1 << 28; + int stride = 1 << 4; + int start = -(1 << 30); + int end = 1 << 30; + for (int i = start; i < end; i+=stride) { + long base = UNSAFE.ARRAY_BYTE_BASE_OFFSET; + int j = scale * i; // always zero + byte v0 = UNSAFE.getByte(a, base + (int)(j + 0)); + byte v1 = UNSAFE.getByte(a, base + (int)(j + 1)); + byte v2 = UNSAFE.getByte(a, base + (int)(j + 2)); + byte v3 = UNSAFE.getByte(a, base + (int)(j + 3)); + UNSAFE.putByte(a, base + (int)(j + 0), (byte)(v0 + 1)); + UNSAFE.putByte(a, base + (int)(j + 1), (byte)(v1 + 1)); + UNSAFE.putByte(a, base + (int)(j + 2), (byte)(v2 + 1)); + UNSAFE.putByte(a, base + (int)(j + 3), (byte)(v3 + 1)); + } + } +} From 16ba673e14038ed19b95e345b823316ff58a2182 Mon Sep 17 00:00:00 2001 From: Christoph Langer Date: Mon, 6 May 2024 08:39:15 +0000 Subject: [PATCH 13/19] 8331639: [21u]: Bump GHA bootstrap JDK to 21.0.3 Reviewed-by: sgehwolf --- make/conf/github-actions.conf | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf index 3a08380a8b6..0a9f21e97c4 100644 --- a/make/conf/github-actions.conf +++ b/make/conf/github-actions.conf @@ -29,17 +29,17 @@ GTEST_VERSION=1.13.0 JTREG_VERSION=7.3.1+1 LINUX_X64_BOOT_JDK_EXT=tar.gz -LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_linux-x64_bin.tar.gz -LINUX_X64_BOOT_JDK_SHA256=bb863b2d542976d1ae4b7b81af3e78b1e4247a64644350b552d298d8dc5980dc +LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_x64_linux_hotspot_21.0.3_9.tar.gz +LINUX_X64_BOOT_JDK_SHA256=fffa52c22d797b715a962e6c8d11ec7d79b90dd819b5bc51d62137ea4b22a340 MACOS_X64_BOOT_JDK_EXT=tar.gz -MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-x64_bin.tar.gz -MACOS_X64_BOOT_JDK_SHA256=47cf960d9bb89dbe987535a389f7e26c42de7c984ef5108612d77c81aa8cc6a4 +MACOS_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_x64_mac_hotspot_21.0.3_9.tar.gz +MACOS_X64_BOOT_JDK_SHA256=f777103aab94330d14a29bd99f3a26d60abbab8e2c375cec9602746096721a7c MACOS_AARCH64_BOOT_JDK_EXT=tar.gz -MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_macos-aarch64_bin.tar.gz -MACOS_AARCH64_BOOT_JDK_SHA256=d020f5c512c043cfb7119a591bc7e599a5bfd76d866d939f5562891d9db7c9b3 +MACOS_AARCH64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_aarch64_mac_hotspot_21.0.3_9.tar.gz +MACOS_AARCH64_BOOT_JDK_SHA256=b6be6a9568be83695ec6b7cb977f4902f7be47d74494c290bc2a5c3c951e254f WINDOWS_X64_BOOT_JDK_EXT=zip -WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk20/bdc68b4b9cbc4ebcb30745c85038d91d/36/GPL/openjdk-20_windows-x64_bin.zip -WINDOWS_X64_BOOT_JDK_SHA256=c92fae5e42b9aecf444a66c8ec563c652f60b1e231dfdd33a4f5a3e3603058fb +WINDOWS_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin21-binaries/releases/download/jdk-21.0.3%2B9/OpenJDK21U-jdk_x64_windows_hotspot_21.0.3_9.zip +WINDOWS_X64_BOOT_JDK_SHA256=c43a66cff7a403d56c5c5e1ff10d3d5f95961abf80f97f0e35380594909f0e4d From 7a400f2a5945139efaa9323231df83cc592c68fb Mon Sep 17 00:00:00 2001 From: "Alexandru C. Moraru" Date: Mon, 6 May 2024 11:49:36 +0000 Subject: [PATCH 14/19] 8309890: TestStringDeduplicationInterned.java waits for the wrong condition Backport-of: 63fe413d93861c79af5587859f01822980969c24 --- .../jtreg/gc/stringdedup/TestStringDeduplicationTools.java | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java index b5441b4cbcc..31c11b66dfc 100644 --- a/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java +++ b/test/hotspot/jtreg/gc/stringdedup/TestStringDeduplicationTools.java @@ -399,10 +399,8 @@ public static void main(String[] args) { forceDeduplication(ageThreshold, FullGC); - if (!waitForDeduplication(dupString3, baseString)) { - if (getValue(dupString3) != getValue(internedString)) { - throw new RuntimeException("String 3 doesn't match either"); - } + if (!waitForDeduplication(dupString3, internedString)) { + throw new RuntimeException("Deduplication has not occurred for string 3"); } if (afterInternedValue != getValue(dupString2)) { From ed2f5a8d6497e1a32e382b32a686f70be2e3a9fe Mon Sep 17 00:00:00 2001 From: Goetz Lindenmaier Date: Tue, 7 May 2024 08:19:02 +0000 Subject: [PATCH 15/19] 8310228: Improve error reporting for uncaught native exceptions on Windows Reviewed-by: stuefe Backport-of: 38bf1192b637cf3513cb25ac21f513bfb51cb55b --- make/test/JtregNativeHotspot.gmk | 2 + src/hotspot/os/windows/os_windows.cpp | 57 +++++++++------ .../UncaughtNativeExceptionTest.java | 71 +++++++++++++++++++ .../ErrorHandling/libNativeException.c | 32 +++++++++ 4 files changed, 140 insertions(+), 22 deletions(-) create mode 100644 test/hotspot/jtreg/runtime/ErrorHandling/UncaughtNativeExceptionTest.java create mode 100644 test/hotspot/jtreg/runtime/ErrorHandling/libNativeException.c diff --git a/make/test/JtregNativeHotspot.gmk b/make/test/JtregNativeHotspot.gmk index 219537f7412..c73783e3b0a 100644 --- a/make/test/JtregNativeHotspot.gmk +++ b/make/test/JtregNativeHotspot.gmk @@ -1511,6 +1511,8 @@ else BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libMonitorWithDeadObjectTest += -lpthread BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libnativeStack += -lpthread BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeGetCreatedJavaVMs := -ljvm -lpthread + + BUILD_HOTSPOT_JTREG_EXCLUDE += libNativeException.c endif ifeq ($(ASAN_ENABLED), true) diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index a6aa82012e8..3359bd0c63e 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -280,10 +280,12 @@ void os::run_periodic_checks(outputStream* st) { return; } +#ifndef _WIN64 // previous UnhandledExceptionFilter, if there is one static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = nullptr; +#endif -LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); +static LONG WINAPI Uncaught_Exception_Handler(struct _EXCEPTION_POINTERS* exceptionInfo); void os::init_system_properties_values() { // sysclasspath, java_home, dll_dir @@ -397,7 +399,7 @@ void os::init_system_properties_values() { #ifndef _WIN64 // set our UnhandledExceptionFilter and save any previous one - prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); + prev_uef_handler = SetUnhandledExceptionFilter(Uncaught_Exception_Handler); #endif // Done @@ -2491,9 +2493,7 @@ LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { #if defined(_M_AMD64) || defined(_M_IX86) //----------------------------------------------------------------------------- -LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { - PCONTEXT ctx = exceptionInfo->ContextRecord; -#ifndef _WIN64 +static bool handle_FLT_exception(struct _EXCEPTION_POINTERS* exceptionInfo) { // handle exception caused by native method modifying control word DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; @@ -2504,34 +2504,48 @@ LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { case EXCEPTION_FLT_INVALID_OPERATION: case EXCEPTION_FLT_OVERFLOW: case EXCEPTION_FLT_STACK_CHECK: - case EXCEPTION_FLT_UNDERFLOW: + case EXCEPTION_FLT_UNDERFLOW: { + PCONTEXT ctx = exceptionInfo->ContextRecord; +#ifndef _WIN64 jint fp_control_word = (* (jint*) StubRoutines::x86::addr_fpu_cntrl_wrd_std()); if (fp_control_word != ctx->FloatSave.ControlWord) { // Restore FPCW and mask out FLT exceptions ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; // Mask out pending FLT exceptions ctx->FloatSave.StatusWord &= 0xffffff00; - return EXCEPTION_CONTINUE_EXECUTION; + return true; } +#else // !_WIN64 + // On Windows, the mxcsr control bits are non-volatile across calls + // See also CR 6192333 + // + jint MxCsr = INITIAL_MXCSR; + // we can't use StubRoutines::x86::addr_mxcsr_std() + // because in Win64 mxcsr is not saved there + if (MxCsr != ctx->MxCsr) { + ctx->MxCsr = MxCsr; + return true; + } +#endif // !_WIN64 + } } + return false; +} +#endif + +#ifndef _WIN64 +static LONG WINAPI Uncaught_Exception_Handler(struct _EXCEPTION_POINTERS* exceptionInfo) { + if (handle_FLT_exception(exceptionInfo)) { + return EXCEPTION_CONTINUE_EXECUTION; + } + + // we only override this on 32 bits, so only check it there if (prev_uef_handler != nullptr) { // We didn't handle this exception so pass it to the previous // UnhandledExceptionFilter. return (prev_uef_handler)(exceptionInfo); } -#else // !_WIN64 - // On Windows, the mxcsr control bits are non-volatile across calls - // See also CR 6192333 - // - jint MxCsr = INITIAL_MXCSR; - // we can't use StubRoutines::x86::addr_mxcsr_std() - // because in Win64 mxcsr is not saved there - if (MxCsr != ctx->MxCsr) { - ctx->MxCsr = MxCsr; - return EXCEPTION_CONTINUE_EXECUTION; - } -#endif // !_WIN64 return EXCEPTION_CONTINUE_SEARCH; } @@ -2788,9 +2802,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { } #if defined(_M_AMD64) || defined(_M_IX86) - if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { - LONG result=Handle_FLT_Exception(exceptionInfo); - if (result==EXCEPTION_CONTINUE_EXECUTION) return result; + if ((in_java || in_native) && handle_FLT_exception(exceptionInfo)) { + return EXCEPTION_CONTINUE_EXECUTION; } #endif diff --git a/test/hotspot/jtreg/runtime/ErrorHandling/UncaughtNativeExceptionTest.java b/test/hotspot/jtreg/runtime/ErrorHandling/UncaughtNativeExceptionTest.java new file mode 100644 index 00000000000..260db1eef8b --- /dev/null +++ b/test/hotspot/jtreg/runtime/ErrorHandling/UncaughtNativeExceptionTest.java @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test id + * @enablePreview + * @requires os.family == "windows" + * @library /test/lib + * @run testng UncaughtNativeExceptionTest + */ + +import jdk.test.lib.process.OutputAnalyzer; +import jdk.test.lib.process.ProcessTools; +import org.testng.annotations.Test; + +import java.io.File; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.regex.Pattern; + +import static org.testng.Assert.assertTrue; + +public class UncaughtNativeExceptionTest { + private static class Crasher { + public static void main(String[] args) throws Throwable { + System.loadLibrary("NativeException"); + throwException(); + } + + static native void throwException(); + } + + // check that we actually report the native exception, + // and don't terminate abruptly due to stack overflow error + @Test + public void testNativeExceptionReporting() throws Exception { + OutputAnalyzer output = ProcessTools.executeTestJava( + // executeTestJvm doesn't seem to forward 'java.library.path' + "-Djava.library.path=" + System.getProperty("java.library.path"), + Crasher.class.getName()); + + File hsErrFile = HsErrFileUtils.openHsErrFileFromOutput(output); + Path hsErrPath = hsErrFile.toPath(); + assertTrue(Files.exists(hsErrPath)); + + Pattern[] positivePatterns = { + Pattern.compile(".*Internal Error \\(0x2a\\).*") + }; + HsErrFileUtils.checkHsErrFileContent(hsErrFile, positivePatterns, null, true /* check end marker */, false /* verbose */); + } +} diff --git a/test/hotspot/jtreg/runtime/ErrorHandling/libNativeException.c b/test/hotspot/jtreg/runtime/ErrorHandling/libNativeException.c new file mode 100644 index 00000000000..3bf71cf9c67 --- /dev/null +++ b/test/hotspot/jtreg/runtime/ErrorHandling/libNativeException.c @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include + +#include + +const DWORD EX_CODE = 42; + +JNIEXPORT void JNICALL Java_UncaughtNativeExceptionTest_00024Crasher_throwException(JNIEnv* env, jclass cls) { + RaiseException(EX_CODE, EXCEPTION_NONCONTINUABLE, 0, NULL); +} From 92b43c570eb3a83dff23f757d2805d9b2a44a738 Mon Sep 17 00:00:00 2001 From: Goetz Lindenmaier Date: Tue, 7 May 2024 08:20:13 +0000 Subject: [PATCH 16/19] 8317007: Add bulk removal of dead nmethods during class unloading Backport-of: f55381950266088cc0284754b16663675867ac87 --- src/hotspot/share/code/codeBlob.cpp | 4 +- src/hotspot/share/code/codeBlob.hpp | 2 +- src/hotspot/share/code/compiledMethod.hpp | 2 +- src/hotspot/share/code/nmethod.cpp | 14 +++-- src/hotspot/share/code/nmethod.hpp | 2 +- src/hotspot/share/compiler/compileBroker.cpp | 2 +- src/hotspot/share/gc/g1/g1CodeRootSet.cpp | 14 +++++ src/hotspot/share/gc/g1/g1CodeRootSet.hpp | 1 + src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 62 ++++++++++--------- src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 2 + src/hotspot/share/gc/g1/heapRegionRemSet.cpp | 4 ++ src/hotspot/share/gc/g1/heapRegionRemSet.hpp | 1 + .../gc/parallel/parallelScavengeHeap.cpp | 12 ++-- .../gc/parallel/parallelScavengeHeap.hpp | 1 + .../share/gc/parallel/psParallelCompact.cpp | 5 ++ src/hotspot/share/gc/serial/genMarkSweep.cpp | 4 ++ src/hotspot/share/gc/serial/serialHeap.cpp | 1 + .../share/gc/shared/classUnloadingContext.cpp | 7 ++- .../share/gc/shared/classUnloadingContext.hpp | 5 ++ .../share/gc/shared/genCollectedHeap.cpp | 7 ++- .../share/gc/shared/genCollectedHeap.hpp | 1 + .../share/gc/shared/scavengableNMethods.cpp | 51 ++++++++++----- .../share/gc/shared/scavengableNMethods.hpp | 8 ++- .../share/gc/shenandoah/shenandoahHeap.cpp | 1 + .../share/gc/shenandoah/shenandoahUnload.cpp | 1 + src/hotspot/share/gc/x/xHeap.cpp | 1 + src/hotspot/share/gc/z/zGeneration.cpp | 1 + 27 files changed, 151 insertions(+), 65 deletions(-) diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp index 058ac24e167..40d63419e7c 100644 --- a/src/hotspot/share/code/codeBlob.cpp +++ b/src/hotspot/share/code/codeBlob.cpp @@ -164,7 +164,7 @@ RuntimeBlob::RuntimeBlob( void RuntimeBlob::free(RuntimeBlob* blob) { assert(blob != nullptr, "caller must check for nullptr"); ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock - blob->purge(); + blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */); { MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); CodeCache::free(blob); @@ -173,7 +173,7 @@ void RuntimeBlob::free(RuntimeBlob* blob) { MemoryService::track_code_cache_memory_usage(); } -void CodeBlob::purge(bool free_code_cache_data) { +void CodeBlob::purge(bool free_code_cache_data, bool unregister_nmethod) { if (_oop_maps != nullptr) { delete _oop_maps; _oop_maps = nullptr; diff --git a/src/hotspot/share/code/codeBlob.hpp b/src/hotspot/share/code/codeBlob.hpp index 719ca57b30f..c1c34a06c75 100644 --- a/src/hotspot/share/code/codeBlob.hpp +++ b/src/hotspot/share/code/codeBlob.hpp @@ -143,7 +143,7 @@ class CodeBlob { static unsigned int align_code_offset(int offset); // Deletion - virtual void purge(bool free_code_cache_data = true); + virtual void purge(bool free_code_cache_data, bool unregister_nmethod); // Typing virtual bool is_buffer_blob() const { return false; } diff --git a/src/hotspot/share/code/compiledMethod.hpp b/src/hotspot/share/code/compiledMethod.hpp index 4bcda08ec99..ca441d9ae64 100644 --- a/src/hotspot/share/code/compiledMethod.hpp +++ b/src/hotspot/share/code/compiledMethod.hpp @@ -174,7 +174,7 @@ class CompiledMethod : public CodeBlob { void* _gc_data; - virtual void purge(bool free_code_cache_data = true) = 0; + virtual void purge(bool free_code_cache_data, bool unregister_nmethod) = 0; private: DeoptimizationStatus deoptimization_status() const { diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 1b328597b7f..0a82827bd33 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -1444,7 +1444,9 @@ void nmethod::unlink() { ClassUnloadingContext::context()->register_unlinked_nmethod(this); } -void nmethod::purge(bool free_code_cache_data) { +void nmethod::purge(bool free_code_cache_data, bool unregister_nmethod) { + assert(!free_code_cache_data, "must only call not freeing code cache data"); + MutexLocker ml(CodeCache_lock, Mutex::_no_safepoint_check_flag); // completely deallocate this method @@ -1464,13 +1466,13 @@ void nmethod::purge(bool free_code_cache_data) { ec = next; } - Universe::heap()->unregister_nmethod(this); + if (unregister_nmethod) { + Universe::heap()->unregister_nmethod(this); + } + CodeCache::unregister_old_nmethod(this); - CodeBlob::purge(); - if (free_code_cache_data) { - CodeCache::free(this); - } + CodeBlob::purge(free_code_cache_data, unregister_nmethod); } oop nmethod::oop_at(int index) const { diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp index 3be50e28e15..f428aa4ef3d 100644 --- a/src/hotspot/share/code/nmethod.hpp +++ b/src/hotspot/share/code/nmethod.hpp @@ -522,7 +522,7 @@ class nmethod : public CompiledMethod { void unlink(); // Deallocate this nmethod - called by the GC - void purge(bool free_code_cache_data = true); + void purge(bool free_code_cache_data, bool unregister_nmethod); // See comment at definition of _last_seen_on_stack void mark_as_maybe_on_stack(); diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index c8a5df23b49..9cf5d2ab7bb 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -1775,7 +1775,7 @@ bool CompileBroker::init_compiler_runtime() { void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { BufferBlob* blob = thread->get_buffer_blob(); if (blob != nullptr) { - blob->purge(); + blob->purge(true /* free_code_cache_data */, true /* unregister_nmethod */); MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); CodeCache::free(blob); } diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp index af369e68088..a1eb7b8543a 100644 --- a/src/hotspot/share/gc/g1/g1CodeRootSet.cpp +++ b/src/hotspot/share/gc/g1/g1CodeRootSet.cpp @@ -189,6 +189,15 @@ class G1CodeRootSetHashTable : public CHeapObj { } } + // Removes dead/unlinked entries. + void bulk_remove() { + auto delete_check = [&] (nmethod** value) { + return (*value)->is_unlinked(); + }; + + clean(delete_check); + } + // Calculate the log2 of the table size we want to shrink to. size_t log2_target_shrink_size(size_t current_size) const { // A table with the new size should be at most filled by this factor. Otherwise @@ -257,6 +266,11 @@ bool G1CodeRootSet::remove(nmethod* method) { return _table->remove(method); } +void G1CodeRootSet::bulk_remove() { + assert(!_is_iterating, "should not mutate while iterating the table"); + _table->bulk_remove(); +} + bool G1CodeRootSet::contains(nmethod* method) { return _table->contains(method); } diff --git a/src/hotspot/share/gc/g1/g1CodeRootSet.hpp b/src/hotspot/share/gc/g1/g1CodeRootSet.hpp index d1051e6e42e..9c5ccdd1202 100644 --- a/src/hotspot/share/gc/g1/g1CodeRootSet.hpp +++ b/src/hotspot/share/gc/g1/g1CodeRootSet.hpp @@ -44,6 +44,7 @@ class G1CodeRootSet { void add(nmethod* method); bool remove(nmethod* method); + void bulk_remove(); bool contains(nmethod* method); void clear(); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 6fc044939b8..f59de9b44e3 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -2597,6 +2597,7 @@ void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjec GCTraceTime(Debug, gc, phases) debug(description, timer); ClassUnloadingContext ctx(workers()->active_workers(), + false /* unregister_nmethods_during_purge */, false /* lock_codeblob_free_separately */); { CodeCache::UnlinkingScope scope(is_alive); @@ -2608,6 +2609,10 @@ void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjec GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", timer); ctx.purge_nmethods(); } + { + GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", timer); + G1CollectedHeap::heap()->bulk_unregister_nmethods(); + } { GCTraceTime(Debug, gc, phases) t("Free Code Blobs", timer); ctx.free_code_blobs(); @@ -2619,6 +2624,33 @@ void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjec } } +class G1BulkUnregisterNMethodTask : public WorkerTask { + HeapRegionClaimer _hrclaimer; + + class UnregisterNMethodsHeapRegionClosure : public HeapRegionClosure { + public: + + bool do_heap_region(HeapRegion* hr) { + hr->rem_set()->bulk_remove_code_roots(); + return false; + } + } _cl; + +public: + G1BulkUnregisterNMethodTask(uint num_workers) + : WorkerTask("G1 Remove Unlinked NMethods From Code Root Set Task"), + _hrclaimer(num_workers) { } + + void work(uint worker_id) { + G1CollectedHeap::heap()->heap_region_par_iterate_from_worker_offset(&_cl, &_hrclaimer, worker_id); + } +}; + +void G1CollectedHeap::bulk_unregister_nmethods() { + uint num_workers = workers()->active_workers(); + G1BulkUnregisterNMethodTask t(num_workers); + workers()->run_task(&t); +} bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) { assert(obj != nullptr, "must not be null"); @@ -3039,31 +3071,6 @@ class RegisterNMethodOopClosure: public OopClosure { void do_oop(narrowOop* p) { ShouldNotReachHere(); } }; -class UnregisterNMethodOopClosure: public OopClosure { - G1CollectedHeap* _g1h; - nmethod* _nm; - -public: - UnregisterNMethodOopClosure(G1CollectedHeap* g1h, nmethod* nm) : - _g1h(g1h), _nm(nm) {} - - void do_oop(oop* p) { - oop heap_oop = RawAccess<>::oop_load(p); - if (!CompressedOops::is_null(heap_oop)) { - oop obj = CompressedOops::decode_not_null(heap_oop); - HeapRegion* hr = _g1h->heap_region_containing(obj); - assert(!hr->is_continues_humongous(), - "trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT - " starting at " HR_FORMAT, - p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())); - - hr->remove_code_root(_nm); - } - } - - void do_oop(narrowOop* p) { ShouldNotReachHere(); } -}; - void G1CollectedHeap::register_nmethod(nmethod* nm) { guarantee(nm != nullptr, "sanity"); RegisterNMethodOopClosure reg_cl(this, nm); @@ -3071,9 +3078,8 @@ void G1CollectedHeap::register_nmethod(nmethod* nm) { } void G1CollectedHeap::unregister_nmethod(nmethod* nm) { - guarantee(nm != nullptr, "sanity"); - UnregisterNMethodOopClosure reg_cl(this, nm); - nm->oops_do(®_cl, true); + // We always unregister nmethods in bulk during code unloading only. + ShouldNotReachHere(); } void G1CollectedHeap::update_used_after_gc(bool evacuation_failed) { diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 9d4566916ff..447535f2f86 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -1267,6 +1267,8 @@ class G1CollectedHeap : public CollectedHeap { void unload_classes_and_code(const char* description, BoolObjectClosure* cl, GCTimer* timer); + void bulk_unregister_nmethods(); + // Verification // Perform any cleanup actions necessary before allowing a verification. diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp index a805108537a..4a124c20749 100644 --- a/src/hotspot/share/gc/g1/heapRegionRemSet.cpp +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.cpp @@ -110,6 +110,10 @@ void HeapRegionRemSet::remove_code_root(nmethod* nm) { guarantee(!_code_roots.contains(nm), "duplicate entry found"); } +void HeapRegionRemSet::bulk_remove_code_roots() { + _code_roots.bulk_remove(); +} + void HeapRegionRemSet::code_roots_do(CodeBlobClosure* blk) const { _code_roots.nmethods_do(blk); } diff --git a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp index 15c472d0ac9..facefde918f 100644 --- a/src/hotspot/share/gc/g1/heapRegionRemSet.hpp +++ b/src/hotspot/share/gc/g1/heapRegionRemSet.hpp @@ -151,6 +151,7 @@ class HeapRegionRemSet : public CHeapObj { void add_code_root(nmethod* nm); void add_code_root_locked(nmethod* nm); void remove_code_root(nmethod* nm); + void bulk_remove_code_roots(); // Applies blk->do_code_blob() to each of the entries in _code_roots void code_roots_do(CodeBlobClosure* blk) const; diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 45e734231a5..40488093f7d 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -527,6 +527,14 @@ void ParallelScavengeHeap::resize_all_tlabs() { CollectedHeap::resize_all_tlabs(); } +void ParallelScavengeHeap::prune_scavengable_nmethods() { + ScavengableNMethods::prune_nmethods_not_into_young(); +} + +void ParallelScavengeHeap::prune_unlinked_nmethods() { + ScavengableNMethods::prune_unlinked_nmethods(); +} + // This method is used by System.gc() and JVMTI. void ParallelScavengeHeap::collect(GCCause::Cause cause) { assert(!Heap_lock->owned_by_self(), @@ -858,10 +866,6 @@ void ParallelScavengeHeap::verify_nmethod(nmethod* nm) { ScavengableNMethods::verify_nmethod(nm); } -void ParallelScavengeHeap::prune_scavengable_nmethods() { - ScavengableNMethods::prune_nmethods(); -} - GrowableArray ParallelScavengeHeap::memory_managers() { GrowableArray memory_managers(2); memory_managers.append(_young_manager); diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index abf87b0e019..e71dc9515aa 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -174,6 +174,7 @@ class ParallelScavengeHeap : public CollectedHeap { void verify_nmethod(nmethod* nm) override; void prune_scavengable_nmethods(); + void prune_unlinked_nmethods(); size_t max_capacity() const override; diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 9baf0c21db6..741d415fe91 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -1769,6 +1769,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { ref_processor()->start_discovery(maximum_heap_compaction); ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, + false /* unregister_nmethods_during_purge */, false /* lock_codeblob_free_separately */); marking_phase(&_gc_tracer); @@ -2078,6 +2079,10 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) { // Release unloaded nmethod's memory. ctx->purge_nmethods(); } + { + GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer); + ParallelScavengeHeap::heap()->prune_unlinked_nmethods(); + } { GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); ctx->free_code_blobs(); diff --git a/src/hotspot/share/gc/serial/genMarkSweep.cpp b/src/hotspot/share/gc/serial/genMarkSweep.cpp index 82e0181dbc3..7d06fe588ac 100644 --- a/src/hotspot/share/gc/serial/genMarkSweep.cpp +++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp @@ -215,6 +215,10 @@ void GenMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) { // Release unloaded nmethod's memory. ctx->purge_nmethods(); } + { + GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", gc_timer()); + gch->prune_unlinked_nmethods(); + } { GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer()); ctx->free_code_blobs(); diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp index 8f126c3129e..9361a4e5eb9 100644 --- a/src/hotspot/share/gc/serial/serialHeap.cpp +++ b/src/hotspot/share/gc/serial/serialHeap.cpp @@ -28,6 +28,7 @@ #include "gc/serial/tenuredGeneration.inline.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "gc/shared/genMemoryPools.hpp" +#include "gc/shared/scavengableNMethods.hpp" #include "gc/shared/strongRootsScope.hpp" #include "gc/shared/suspendibleThreadSet.hpp" #include "memory/universe.hpp" diff --git a/src/hotspot/share/gc/shared/classUnloadingContext.cpp b/src/hotspot/share/gc/shared/classUnloadingContext.cpp index f624eaa6e44..6d9674ef801 100644 --- a/src/hotspot/share/gc/shared/classUnloadingContext.cpp +++ b/src/hotspot/share/gc/shared/classUnloadingContext.cpp @@ -32,10 +32,13 @@ ClassUnloadingContext* ClassUnloadingContext::_context = nullptr; -ClassUnloadingContext::ClassUnloadingContext(uint num_workers, bool lock_codeblob_free_separately) : +ClassUnloadingContext::ClassUnloadingContext(uint num_workers, + bool unregister_nmethods_during_purge, + bool lock_codeblob_free_separately) : _cld_head(nullptr), _num_nmethod_unlink_workers(num_workers), _unlinked_nmethods(nullptr), + _unregister_nmethods_during_purge(unregister_nmethods_during_purge), _lock_codeblob_free_separately(lock_codeblob_free_separately) { assert(_context == nullptr, "context already set"); @@ -113,7 +116,7 @@ void ClassUnloadingContext::purge_nmethods() { NMethodSet* set = _unlinked_nmethods[i]; for (nmethod* nm : *set) { freed_memory += nm->size(); - nm->purge(false /* free_code_cache_data */); + nm->purge(false /* free_code_cache_data */, _unregister_nmethods_during_purge); } } diff --git a/src/hotspot/share/gc/shared/classUnloadingContext.hpp b/src/hotspot/share/gc/shared/classUnloadingContext.hpp index 42a8f764fa4..30930967d38 100644 --- a/src/hotspot/share/gc/shared/classUnloadingContext.hpp +++ b/src/hotspot/share/gc/shared/classUnloadingContext.hpp @@ -42,6 +42,7 @@ class ClassUnloadingContext : public CHeapObj { using NMethodSet = GrowableArrayCHeap; NMethodSet** _unlinked_nmethods; + bool _unregister_nmethods_during_purge; bool _lock_codeblob_free_separately; public: @@ -49,10 +50,14 @@ class ClassUnloadingContext : public CHeapObj { // Num_nmethod_unlink_workers configures the maximum numbers of threads unlinking // nmethods. + // unregister_nmethods_during_purge determines whether unloaded nmethods should + // be unregistered from the garbage collector during purge. If not, ,the caller + // is responsible to do that later. // lock_codeblob_free_separately determines whether freeing the code blobs takes // the CodeCache_lock during the whole operation (=false) or per code blob // free operation (=true). ClassUnloadingContext(uint num_nmethod_unlink_workers, + bool unregister_nmethods_during_purge, bool lock_codeblob_free_separately); ~ClassUnloadingContext(); diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp index 0854a4776f0..d67a4cb636b 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -557,6 +557,7 @@ void GenCollectedHeap::do_collection(bool full, CodeCache::on_gc_marking_cycle_start(); ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */, + false /* unregister_nmethods_during_purge */, false /* lock_codeblob_free_separately */); collect_generation(_old_gen, @@ -615,7 +616,11 @@ void GenCollectedHeap::verify_nmethod(nmethod* nm) { } void GenCollectedHeap::prune_scavengable_nmethods() { - ScavengableNMethods::prune_nmethods(); + ScavengableNMethods::prune_nmethods_not_into_young(); +} + +void GenCollectedHeap::prune_unlinked_nmethods() { + ScavengableNMethods::prune_unlinked_nmethods(); } HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) { diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.hpp b/src/hotspot/share/gc/shared/genCollectedHeap.hpp index f279c7588de..271703b6dc0 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp @@ -205,6 +205,7 @@ class GenCollectedHeap : public CollectedHeap { void verify_nmethod(nmethod* nm) override; void prune_scavengable_nmethods(); + void prune_unlinked_nmethods(); // Iteration functions. void oop_iterate(OopIterateClosure* cl); diff --git a/src/hotspot/share/gc/shared/scavengableNMethods.cpp b/src/hotspot/share/gc/shared/scavengableNMethods.cpp index ec9983da4a9..9f961ff4bf8 100644 --- a/src/hotspot/share/gc/shared/scavengableNMethods.cpp +++ b/src/hotspot/share/gc/shared/scavengableNMethods.cpp @@ -59,18 +59,8 @@ void ScavengableNMethods::register_nmethod(nmethod* nm) { } void ScavengableNMethods::unregister_nmethod(nmethod* nm) { - assert_locked_or_safepoint(CodeCache_lock); - - if (gc_data(nm).on_list()) { - nmethod* prev = nullptr; - for (nmethod* cur = _head; cur != nullptr; cur = gc_data(cur).next()) { - if (cur == nm) { - unlist_nmethod(cur, prev); - return; - } - prev = cur; - } - } + // All users of this method only unregister in bulk during code unloading. + ShouldNotReachHere(); } #ifndef PRODUCT @@ -172,10 +162,37 @@ void ScavengableNMethods::nmethods_do_and_prune(CodeBlobToOopClosure* cl) { debug_only(verify_unlisted_nmethods(nullptr)); } -void ScavengableNMethods::prune_nmethods() { +void ScavengableNMethods::prune_nmethods_not_into_young() { nmethods_do_and_prune(nullptr /* No closure */); } +void ScavengableNMethods::prune_unlinked_nmethods() { + assert_locked_or_safepoint(CodeCache_lock); + + debug_only(mark_on_list_nmethods()); + + nmethod* prev = nullptr; + nmethod* cur = _head; + while (cur != nullptr) { + ScavengableNMethodsData data = gc_data(cur); + debug_only(data.clear_marked()); + assert(data.on_list(), "else shouldn't be on this list"); + + nmethod* const next = data.next(); + + if (cur->is_unlinked()) { + unlist_nmethod(cur, prev); + } else { + prev = cur; + } + + cur = next; + } + + // Check for stray marks. + debug_only(verify_unlisted_nmethods(nullptr)); +} + // Walk the list of methods which might contain oops to the java heap. void ScavengableNMethods::nmethods_do(CodeBlobToOopClosure* cl) { nmethods_do_and_prune(cl); @@ -218,8 +235,9 @@ void ScavengableNMethods::mark_on_list_nmethods() { nmethod* nm = iter.method(); ScavengableNMethodsData data = gc_data(nm); assert(data.not_marked(), "clean state"); - if (data.on_list()) + if (data.on_list()) { data.set_marked(); + } } } @@ -230,7 +248,10 @@ void ScavengableNMethods::verify_unlisted_nmethods(CodeBlobClosure* cl) { while(iter.next()) { nmethod* nm = iter.method(); - verify_nmethod(nm); + // Can not verify already unlinked nmethods as they are partially invalid already. + if (!nm->is_unlinked()) { + verify_nmethod(nm); + } if (cl != nullptr && !gc_data(nm).on_list()) { cl->do_code_blob(nm); diff --git a/src/hotspot/share/gc/shared/scavengableNMethods.hpp b/src/hotspot/share/gc/shared/scavengableNMethods.hpp index 4852e6d32fb..94d594cd529 100644 --- a/src/hotspot/share/gc/shared/scavengableNMethods.hpp +++ b/src/hotspot/share/gc/shared/scavengableNMethods.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -46,8 +46,10 @@ class ScavengableNMethods : public AllStatic { static void unregister_nmethod(nmethod* nm); static void verify_nmethod(nmethod* nm); - // Remove nmethods that no longer have scavengable oops. - static void prune_nmethods(); + // Remove nmethods that no longer have oops into young gen. + static void prune_nmethods_not_into_young(); + // Remvoe unlinked (dead) nmethods. + static void prune_unlinked_nmethods(); // Apply closure to every scavengable nmethod. // Remove nmethods that no longer have scavengable oops. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 2e6ac1c356a..91fc66b1204 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -1769,6 +1769,7 @@ void ShenandoahHeap::stop() { void ShenandoahHeap::stw_unload_classes(bool full_gc) { if (!unload_classes()) return; ClassUnloadingContext ctx(_workers->active_workers(), + true /* unregister_nmethods_during_purge */, false /* lock_codeblob_free_separately */); // Unload classes and purge SystemDictionary. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp index 81eee97f25f..bb13e9b8e22 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUnload.cpp @@ -140,6 +140,7 @@ void ShenandoahUnload::unload() { assert(heap->is_concurrent_weak_root_in_progress(), "Filtered by caller"); ClassUnloadingContext ctx(heap->workers()->active_workers(), + true /* unregister_nmethods_during_purge */, true /* lock_codeblob_free_separately */); // Unlink stale metadata and nmethods diff --git a/src/hotspot/share/gc/x/xHeap.cpp b/src/hotspot/share/gc/x/xHeap.cpp index 65da4e1c983..14661330e13 100644 --- a/src/hotspot/share/gc/x/xHeap.cpp +++ b/src/hotspot/share/gc/x/xHeap.cpp @@ -322,6 +322,7 @@ void XHeap::process_non_strong_references() { _weak_roots_processor.process_weak_roots(); ClassUnloadingContext ctx(_workers.active_workers(), + true /* unregister_nmethods_during_purge */, true /* lock_codeblob_free_separately */); // Unlink stale metadata and nmethods diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index bb0653f72ea..0b131c65248 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -1322,6 +1322,7 @@ void ZGenerationOld::process_non_strong_references() { _weak_roots_processor.process_weak_roots(); ClassUnloadingContext ctx(_workers.active_workers(), + true /* unregister_nmethods_during_purge */, true /* lock_codeblob_free_separately */); // Unlink stale metadata and nmethods From ec6b32601095017a2641f4b82143e83bef5f37b6 Mon Sep 17 00:00:00 2001 From: Aleksey Shipilev Date: Tue, 7 May 2024 09:47:45 +0000 Subject: [PATCH 17/19] 8310913: Move ReferencedKeyMap to jdk.internal so it may be shared Reviewed-by: rrich Backport-of: 6af0af593446bc33dc94bbf7334c325c4ac0ac0f --- .../classes/java/lang/invoke/MethodType.java | 133 +----------- .../classes/java/lang/runtime/Carriers.java | 3 +- .../internal/access/JavaLangRefAccess.java | 2 +- .../internal/util}/ReferenceKey.java | 12 +- .../internal/util}/ReferencedKeyMap.java | 156 +++++++++++-- .../jdk/internal/util/ReferencedKeySet.java | 205 ++++++++++++++++++ .../internal/util}/SoftReferenceKey.java | 7 +- .../internal/util}/StrongReferenceKey.java | 5 +- .../internal/util}/WeakReferenceKey.java | 7 +- .../java/lang/runtime/ReferencedKeyTest.java | 109 ---------- .../jdk/internal/util/ReferencedKeyTest.java | 175 +++++++++++++++ 11 files changed, 537 insertions(+), 277 deletions(-) rename src/java.base/share/classes/{java/lang/runtime => jdk/internal/util}/ReferenceKey.java (81%) rename src/java.base/share/classes/{java/lang/runtime => jdk/internal/util}/ReferencedKeyMap.java (63%) create mode 100644 src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java rename src/java.base/share/classes/{java/lang/runtime => jdk/internal/util}/SoftReferenceKey.java (94%) rename src/java.base/share/classes/{java/lang/runtime => jdk/internal/util}/StrongReferenceKey.java (93%) rename src/java.base/share/classes/{java/lang/runtime => jdk/internal/util}/WeakReferenceKey.java (94%) delete mode 100644 test/jdk/java/lang/runtime/ReferencedKeyTest.java create mode 100644 test/jdk/jdk/internal/util/ReferencedKeyTest.java diff --git a/src/java.base/share/classes/java/lang/invoke/MethodType.java b/src/java.base/share/classes/java/lang/invoke/MethodType.java index 951d209b69f..447a26e7600 100644 --- a/src/java.base/share/classes/java/lang/invoke/MethodType.java +++ b/src/java.base/share/classes/java/lang/invoke/MethodType.java @@ -33,7 +33,9 @@ import java.lang.ref.WeakReference; import java.util.Arrays; import java.util.Collections; +import java.util.function.Supplier; import java.util.List; +import java.util.Map; import java.util.NoSuchElementException; import java.util.Objects; import java.util.Optional; @@ -42,7 +44,8 @@ import java.util.concurrent.ConcurrentMap; import java.util.stream.Stream; -import jdk.internal.access.SharedSecrets; +import jdk.internal.util.ReferencedKeySet; +import jdk.internal.util.ReferenceKey; import jdk.internal.vm.annotation.Stable; import sun.invoke.util.BytecodeDescriptor; import sun.invoke.util.VerifyType; @@ -227,7 +230,13 @@ private static IndexOutOfBoundsException newIndexOutOfBoundsException(Object num return new IndexOutOfBoundsException(num.toString()); } - static final ConcurrentWeakInternSet internTable = new ConcurrentWeakInternSet<>(); + static final ReferencedKeySet internTable = + ReferencedKeySet.create(false, true, new Supplier<>() { + @Override + public Map, ReferenceKey> get() { + return new ConcurrentHashMap<>(512); + } + }); static final Class[] NO_PTYPES = {}; @@ -405,7 +414,7 @@ private static MethodType makeImpl(Class rtype, Class[] ptypes, boolean tr mt = new MethodType(rtype, ptypes); } mt.form = MethodTypeForm.findForm(mt); - return internTable.add(mt); + return internTable.intern(mt); } private static final @Stable MethodType[] objectOnlyTypes = new MethodType[20]; @@ -883,10 +892,6 @@ public Class[] parameterArray() { * @param x object to compare * @see Object#equals(Object) */ - // This implementation may also return true if x is a WeakEntry containing - // a method type that is equal to this. This is an internal implementation - // detail to allow for faster method type lookups. - // See ConcurrentWeakInternSet.WeakEntry#equals(Object) @Override public boolean equals(Object x) { if (this == x) { @@ -895,12 +900,6 @@ public boolean equals(Object x) { if (x instanceof MethodType) { return equals((MethodType)x); } - if (x instanceof ConcurrentWeakInternSet.WeakEntry) { - Object o = ((ConcurrentWeakInternSet.WeakEntry)x).get(); - if (o instanceof MethodType) { - return equals((MethodType)o); - } - } return false; } @@ -1392,112 +1391,4 @@ private Object readResolve() { wrapAlt = null; return mt; } - - /** - * Simple implementation of weak concurrent intern set. - * - * @param interned type - */ - private static class ConcurrentWeakInternSet { - - private final ConcurrentMap, WeakEntry> map; - private final ReferenceQueue stale; - - public ConcurrentWeakInternSet() { - this.map = new ConcurrentHashMap<>(512); - this.stale = SharedSecrets.getJavaLangRefAccess().newNativeReferenceQueue(); - } - - /** - * Get the existing interned element. - * This method returns null if no element is interned. - * - * @param elem element to look up - * @return the interned element - */ - public T get(T elem) { - if (elem == null) throw new NullPointerException(); - expungeStaleElements(); - - WeakEntry value = map.get(elem); - if (value != null) { - T res = value.get(); - if (res != null) { - return res; - } - } - return null; - } - - /** - * Interns the element. - * Always returns non-null element, matching the one in the intern set. - * Under the race against another add(), it can return different - * element, if another thread beats us to interning it. - * - * @param elem element to add - * @return element that was actually added - */ - public T add(T elem) { - if (elem == null) throw new NullPointerException(); - - // Playing double race here, and so spinloop is required. - // First race is with two concurrent updaters. - // Second race is with GC purging weak ref under our feet. - // Hopefully, we almost always end up with a single pass. - T interned; - WeakEntry e = new WeakEntry<>(elem, stale); - do { - expungeStaleElements(); - WeakEntry exist = map.putIfAbsent(e, e); - interned = (exist == null) ? elem : exist.get(); - } while (interned == null); - return interned; - } - - private void expungeStaleElements() { - Reference reference; - while ((reference = stale.poll()) != null) { - map.remove(reference); - } - } - - private static class WeakEntry extends WeakReference { - - public final int hashcode; - - public WeakEntry(T key, ReferenceQueue queue) { - super(key, queue); - hashcode = key.hashCode(); - } - - /** - * This implementation returns {@code true} if {@code obj} is another - * {@code WeakEntry} whose referent is equal to this referent, or - * if {@code obj} is equal to the referent of this. This allows - * lookups to be made without wrapping in a {@code WeakEntry}. - * - * @param obj the object to compare - * @return true if {@code obj} is equal to this or the referent of this - * @see MethodType#equals(Object) - * @see Object#equals(Object) - */ - @Override - public boolean equals(Object obj) { - Object mine = get(); - if (obj instanceof WeakEntry) { - Object that = ((WeakEntry) obj).get(); - return (that == null || mine == null) ? (this == obj) : mine.equals(that); - } - return (mine == null) ? (obj == null) : mine.equals(obj); - } - - @Override - public int hashCode() { - return hashcode; - } - - } - } - } diff --git a/src/java.base/share/classes/java/lang/runtime/Carriers.java b/src/java.base/share/classes/java/lang/runtime/Carriers.java index e0ebc998ee5..a74144fcbeb 100644 --- a/src/java.base/share/classes/java/lang/runtime/Carriers.java +++ b/src/java.base/share/classes/java/lang/runtime/Carriers.java @@ -35,6 +35,7 @@ import java.util.concurrent.ConcurrentHashMap; import jdk.internal.misc.Unsafe; +import jdk.internal.util.ReferencedKeyMap; import static java.lang.invoke.MethodType.methodType; @@ -366,7 +367,7 @@ MethodHandle[] createComponents(CarrierShape carrierShape) { * Cache mapping {@link MethodType} to previously defined {@link CarrierElements}. */ private static final Map - methodTypeCache = ReferencedKeyMap.create(ConcurrentHashMap::new); + methodTypeCache = ReferencedKeyMap.create(false, ConcurrentHashMap::new); /** * Permute a raw constructor and component accessor {@link MethodHandle MethodHandles} to diff --git a/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java b/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java index b9b180fc2da..ed9967ec3eb 100644 --- a/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java +++ b/src/java.base/share/classes/jdk/internal/access/JavaLangRefAccess.java @@ -54,7 +54,7 @@ public interface JavaLangRefAccess { /** * Constructs a new NativeReferenceQueue. * - * Invoked by MethodType.ConcurrentWeakInternSet + * Invoked by jdk.internal.util.ReferencedKeyMap */ ReferenceQueue newNativeReferenceQueue(); } diff --git a/src/java.base/share/classes/java/lang/runtime/ReferenceKey.java b/src/java.base/share/classes/jdk/internal/util/ReferenceKey.java similarity index 81% rename from src/java.base/share/classes/java/lang/runtime/ReferenceKey.java rename to src/java.base/share/classes/jdk/internal/util/ReferenceKey.java index 983d81d3a0f..a193794fe70 100644 --- a/src/java.base/share/classes/java/lang/runtime/ReferenceKey.java +++ b/src/java.base/share/classes/jdk/internal/util/ReferenceKey.java @@ -23,12 +23,9 @@ * questions. */ -package java.lang.runtime; +package jdk.internal.util; -import java.lang.ref.ReferenceQueue; -import java.lang.ref.SoftReference; -import java.lang.ref.WeakReference; -import java.util.Objects; +import java.lang.ref.Reference; /** * View/wrapper of keys used by the backing {@link ReferencedKeyMap}. @@ -39,11 +36,8 @@ * @param key type * * @since 21 - * - * Warning: This class is part of PreviewFeature.Feature.STRING_TEMPLATES. - * Do not rely on its availability. */ -sealed interface ReferenceKey permits StrongReferenceKey, WeakReferenceKey, SoftReferenceKey { +public sealed interface ReferenceKey permits StrongReferenceKey, WeakReferenceKey, SoftReferenceKey { /** * {@return the value of the unwrapped key} */ diff --git a/src/java.base/share/classes/java/lang/runtime/ReferencedKeyMap.java b/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java similarity index 63% rename from src/java.base/share/classes/java/lang/runtime/ReferencedKeyMap.java rename to src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java index 1ded08c4cba..f102e3c94e1 100644 --- a/src/java.base/share/classes/java/lang/runtime/ReferencedKeyMap.java +++ b/src/java.base/share/classes/jdk/internal/util/ReferencedKeyMap.java @@ -23,7 +23,7 @@ * questions. */ -package java.lang.runtime; +package jdk.internal.util; import java.lang.ref.Reference; import java.lang.ref.ReferenceQueue; @@ -37,9 +37,12 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; +import java.util.function.UnaryOperator; import java.util.stream.Collectors; import java.util.stream.Stream; +import jdk.internal.access.SharedSecrets; + /** * This class provides management of {@link Map maps} where it is desirable to * remove entries automatically when the key is garbage collected. This is @@ -78,11 +81,8 @@ * @param the type of mapped values * * @since 21 - * - * Warning: This class is part of PreviewFeature.Feature.STRING_TEMPLATES. - * Do not rely on its availability. */ -final class ReferencedKeyMap implements Map { +public final class ReferencedKeyMap implements Map { /** * true if {@link SoftReference} keys are to be used, * {@link WeakReference} otherwise. @@ -95,54 +95,61 @@ final class ReferencedKeyMap implements Map { private final Map, V> map; /** - * {@link ReferenceQueue} for cleaning up {@link WeakReferenceKey EntryKeys}. + * {@link ReferenceQueue} for cleaning up entries. */ private final ReferenceQueue stale; /** * Private constructor. * - * @param isSoft true if {@link SoftReference} keys are to - * be used, {@link WeakReference} otherwise. - * @param map backing map + * @param isSoft true if {@link SoftReference} keys are to + * be used, {@link WeakReference} otherwise. + * @param map backing map + * @param stale {@link ReferenceQueue} for cleaning up entries */ - private ReferencedKeyMap(boolean isSoft, Map, V> map) { + private ReferencedKeyMap(boolean isSoft, Map, V> map, ReferenceQueue stale) { this.isSoft = isSoft; this.map = map; - this.stale = new ReferenceQueue<>(); + this.stale = stale; } /** * Create a new {@link ReferencedKeyMap} map. * - * @param isSoft true if {@link SoftReference} keys are to - * be used, {@link WeakReference} otherwise. - * @param supplier {@link Supplier} of the backing map + * @param isSoft true if {@link SoftReference} keys are to + * be used, {@link WeakReference} otherwise. + * @param supplier {@link Supplier} of the backing map * * @return a new map with {@link Reference} keys * * @param the type of keys maintained by the new map * @param the type of mapped values */ - static ReferencedKeyMap + public static ReferencedKeyMap create(boolean isSoft, Supplier, V>> supplier) { - return new ReferencedKeyMap(isSoft, supplier.get()); + return create(isSoft, false, supplier); } /** - * Create a new {@link ReferencedKeyMap} map using - * {@link WeakReference} keys. + * Create a new {@link ReferencedKeyMap} map. * - * @param supplier {@link Supplier} of the backing map + * @param isSoft true if {@link SoftReference} keys are to + * be used, {@link WeakReference} otherwise. + * @param useNativeQueue true if uses NativeReferenceQueue + * otherwise use {@link ReferenceQueue}. + * @param supplier {@link Supplier} of the backing map * * @return a new map with {@link Reference} keys * * @param the type of keys maintained by the new map * @param the type of mapped values */ - static ReferencedKeyMap - create(Supplier, V>> supplier) { - return new ReferencedKeyMap(false, supplier.get()); + public static ReferencedKeyMap + create(boolean isSoft, boolean useNativeQueue, Supplier, V>> supplier) { + return new ReferencedKeyMap(isSoft, supplier.get(), + useNativeQueue ? SharedSecrets.getJavaLangRefAccess().newNativeReferenceQueue() + : new ReferenceQueue<>() + ); } /** @@ -320,10 +327,9 @@ public String toString() { /** * Removes enqueued weak references from map. */ - @SuppressWarnings("unchecked") public void removeStaleReferences() { while (true) { - WeakReferenceKey key = (WeakReferenceKey)stale.poll(); + Object key = stale.poll(); if (key == null) { break; } @@ -331,4 +337,106 @@ public void removeStaleReferences() { } } + /** + * Puts an entry where the key and the value are the same. Used for + * interning values in a set. + * + * @implNote Requires a {@link ReferencedKeyMap} whose {@code V} type + * is a {@code ReferenceKey}. Otherwise, a {@link ClassCastException} will + * be thrown. + * + * @param setMap {@link ReferencedKeyMap} where interning takes place + * @param key key to add + * + * @param type of key + * + * @return the old key instance if found otherwise the new key instance + * + * @throws ClassCastException if {@code V} is not {@code EntryKey} + */ + static T intern(ReferencedKeyMap> setMap, T key) { + T value = existingKey(setMap, key); + if (value != null) { + return value; + } + return internKey(setMap, key); + } + + /** + * Puts an entry where the key and the value are the same. Used for + * interning values in a set. + * + * @implNote Requires a {@link ReferencedKeyMap} whose {@code V} type + * is a {@code ReferenceKey}. Otherwise, a {@link ClassCastException} will + * be thrown. + * + * @param setMap {@link ReferencedKeyMap} where interning takes place + * @param key key to add + * @param interner operation to apply to key before adding to map + * + * @param type of key + * + * @return the old key instance if found otherwise the new key instance + * + * @throws ClassCastException if {@code V} is not {@code EntryKey} + * + * @implNote This version of intern should not be called during phase1 + * using a lambda. Use an UnaryOperator instance instead. + */ + static T intern(ReferencedKeyMap> setMap, T key, UnaryOperator interner) { + T value = existingKey(setMap, key); + if (value != null) { + return value; + } + key = interner.apply(key); + return internKey(setMap, key); + } + + /** + * Check if the key already exists in the map. + * + * @param setMap {@link ReferencedKeyMap} where interning takes place + * @param key key to test + * + * @param type of key + * + * @return key if found otherwise null + */ + private static T existingKey(ReferencedKeyMap> setMap, T key) { + setMap.removeStaleReferences(); + ReferenceKey entryKey = setMap.get(setMap.lookupKey(key)); + return entryKey != null ? entryKey.get() : null; + } + + /** + * Attempt to add key to map. + * + * @param setMap {@link ReferencedKeyMap} where interning takes place + * @param key key to add + * + * @param type of key + * + * @return the old key instance if found otherwise the new key instance + */ + private static T internKey(ReferencedKeyMap> setMap, T key) { + ReferenceKey entryKey = setMap.entryKey(key); + T interned; + do { + setMap.removeStaleReferences(); + ReferenceKey existing = setMap.map.putIfAbsent(entryKey, entryKey); + if (existing == null) { + return key; + } else { + // If {@code putIfAbsent} returns non-null then was actually a + // {@code replace} and older key was used. In that case the new + // key was not used and the reference marked stale. + interned = existing.get(); + if (interned != null) { + entryKey.unused(); + } + } + } while (interned == null); + return interned; + } + } diff --git a/src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java b/src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java new file mode 100644 index 00000000000..807eea87dfe --- /dev/null +++ b/src/java.base/share/classes/jdk/internal/util/ReferencedKeySet.java @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package jdk.internal.util; + +import java.lang.ref.Reference; +import java.lang.ref.ReferenceQueue; +import java.lang.ref.SoftReference; +import java.lang.ref.WeakReference; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; + +/** + * This class provides management of {@link Set set} where it is desirable to + * remove elements automatically when the element is garbage collected. This is + * accomplished by using a backing map where the keys and values are either a + * {@link WeakReference} or a {@link SoftReference}. + *

+ * To create a {@link ReferencedKeySet} the user must provide a {@link Supplier} + * of the backing map and whether {@link WeakReference} or + * {@link SoftReference} is to be used. + * {@snippet : + * Set set; + * + * set = ReferencedKeySet.create(false, HashMap::new); + * set.add(30_000_000L); + * set.add(30_000_001L); + * set.add(30_000_002L); + * set.add(30_000_003L); + * set.add(30_000_004L); + * + * set = ReferencedKeySet.create(true, ConcurrentHashMap::new); + * set.add(40_000_000L); + * set.add(40_000_001L); + * set.add(40_000_002L); + * set.add(40_000_003L); + * set.add(40_000_004L); + * } + * + * @implNote Care must be given that the backing map does replacement by + * replacing the value in the map entry instead of deleting the old entry and + * adding a new entry, otherwise replaced entries may end up with a strongly + * referenced key. {@link HashMap} and {@link ConcurrentHashMap} are known + * to be safe. + * + * @param the type of elements maintained by this set + */ +public final class ReferencedKeySet extends AbstractSet { + /** + * Backing {@link ReferencedKeySet} map. + */ + final ReferencedKeyMap> map; + + /** + * Private constructor. + * + * @param map backing map + */ + private ReferencedKeySet(ReferencedKeyMap> map) { + this.map = map; + } + + /** + * Create a new {@link ReferencedKeySet} elements. + * + * @param isSoft true if {@link SoftReference} elements are to + * be used, {@link WeakReference} otherwise. + * @param supplier {@link Supplier} of the backing map + * + * @return a new set with {@link Reference} elements + * + * @param the type of elements maintained by this set + */ + public static ReferencedKeySet + create(boolean isSoft, Supplier, ReferenceKey>> supplier) { + return create(isSoft, false, supplier); + } + + /** + * Create a new {@link ReferencedKeySet} elements. + * + * @param isSoft true if {@link SoftReference} elements are to + * be used, {@link WeakReference} otherwise. + * @param useNativeQueue true if uses NativeReferenceQueue + * otherwise use {@link ReferenceQueue}. + * @param supplier {@link Supplier} of the backing map + * + * @return a new set with {@link Reference} elements + * + * @param the type of elements maintained by this set + */ + public static ReferencedKeySet + create(boolean isSoft, boolean useNativeQueue, Supplier, ReferenceKey>> supplier) { + return new ReferencedKeySet<>(ReferencedKeyMap.create(isSoft, useNativeQueue, supplier)); + } + + /** + * Removes enqueued weak references from set. + */ + public void removeStaleReferences() { + map.removeStaleReferences(); + } + + @Override + public Iterator iterator() { + return map.keySet().iterator(); + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + @SuppressWarnings("unchecked") + public boolean contains(Object o) { + return map.containsKey((T)o); + } + + @Override + public boolean add(T e) { + return intern(e) == null; + } + + @Override + public boolean remove(Object o) { + return map.remove(o) != null; + } + + @Override + public void clear() { + map.clear(); + } + + /** + * Gets an existing element from the set, returning null if not present or + * the old element if previously added. + * + * @param e element to get + * + * @return the old element if present, otherwise, null + */ + public T get(T e) { + ReferenceKey key = map.get(e); + + return key == null ? null : key.get(); + } + + /** + * Intern an element to the set, returning the element if newly added or the + * old element if previously added. + * + * @param e element to add + * + * @return the old element if present, otherwise, the new element + */ + public T intern(T e) { + return ReferencedKeyMap.intern(map, e); + } + + /** + * Intern an element to the set, returning the element if newly added or the + * old element if previously added. + * + * @param e element to add + * @param interner operation to apply to key before adding to set + * + * @return the old element if present, otherwise, the new element + * + * @implNote This version of intern should not be called during phase1 + * using a lambda. Use an UnaryOperator instance instead. + */ + public T intern(T e, UnaryOperator interner) { + return ReferencedKeyMap.intern(map, e, interner); + } +} diff --git a/src/java.base/share/classes/java/lang/runtime/SoftReferenceKey.java b/src/java.base/share/classes/jdk/internal/util/SoftReferenceKey.java similarity index 94% rename from src/java.base/share/classes/java/lang/runtime/SoftReferenceKey.java rename to src/java.base/share/classes/jdk/internal/util/SoftReferenceKey.java index 3bb524e13dd..f7e94e79f0b 100644 --- a/src/java.base/share/classes/java/lang/runtime/SoftReferenceKey.java +++ b/src/java.base/share/classes/jdk/internal/util/SoftReferenceKey.java @@ -23,7 +23,7 @@ * questions. */ -package java.lang.runtime; +package jdk.internal.util; import java.lang.ref.ReferenceQueue; import java.lang.ref.SoftReference; @@ -35,9 +35,6 @@ * @param key type * * @since 21 - * - * Warning: This class is part of PreviewFeature.Feature.STRING_TEMPLATES. - * Do not rely on its availability. */ final class SoftReferenceKey extends SoftReference implements ReferenceKey { /** @@ -76,6 +73,8 @@ public boolean equals(Object obj) { if (obj instanceof ReferenceKey key) { obj = key.get(); } + // Note: refersTo is insufficient since keys require equivalence. + // refersTo would also require a cast to type T. return Objects.equals(get(), obj); } diff --git a/src/java.base/share/classes/java/lang/runtime/StrongReferenceKey.java b/src/java.base/share/classes/jdk/internal/util/StrongReferenceKey.java similarity index 93% rename from src/java.base/share/classes/java/lang/runtime/StrongReferenceKey.java rename to src/java.base/share/classes/jdk/internal/util/StrongReferenceKey.java index 3665cad96cc..e3264cd0dca 100644 --- a/src/java.base/share/classes/java/lang/runtime/StrongReferenceKey.java +++ b/src/java.base/share/classes/jdk/internal/util/StrongReferenceKey.java @@ -23,7 +23,7 @@ * questions. */ -package java.lang.runtime; +package jdk.internal.util; import java.util.Objects; @@ -34,9 +34,6 @@ * @param key type * * @since 21 - * - * Warning: This class is part of PreviewFeature.Feature.STRING_TEMPLATES. - * Do not rely on its availability. */ final class StrongReferenceKey implements ReferenceKey { T key; diff --git a/src/java.base/share/classes/java/lang/runtime/WeakReferenceKey.java b/src/java.base/share/classes/jdk/internal/util/WeakReferenceKey.java similarity index 94% rename from src/java.base/share/classes/java/lang/runtime/WeakReferenceKey.java rename to src/java.base/share/classes/jdk/internal/util/WeakReferenceKey.java index 5d18c2e45a0..3fe6d6026d7 100644 --- a/src/java.base/share/classes/java/lang/runtime/WeakReferenceKey.java +++ b/src/java.base/share/classes/jdk/internal/util/WeakReferenceKey.java @@ -23,7 +23,7 @@ * questions. */ -package java.lang.runtime; +package jdk.internal.util; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; @@ -35,9 +35,6 @@ * @param key type * * @since 21 - * - * Warning: This class is part of PreviewFeature.Feature.STRING_TEMPLATES. - * Do not rely on its availability. */ final class WeakReferenceKey extends WeakReference implements ReferenceKey { /** @@ -76,6 +73,8 @@ public boolean equals(Object obj) { if (obj instanceof ReferenceKey key) { obj = key.get(); } + // Note: refersTo is insufficient since keys require equivalence. + // refersTo would also require a cast to type T. return Objects.equals(get(), obj); } diff --git a/test/jdk/java/lang/runtime/ReferencedKeyTest.java b/test/jdk/java/lang/runtime/ReferencedKeyTest.java deleted file mode 100644 index 9234cffb98a..00000000000 --- a/test/jdk/java/lang/runtime/ReferencedKeyTest.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -/* - * @test - * @summary Test features provided by the ReferencedKeyMap class. - * @modules java.base/java.lang.runtime - * @enablePreview - * @compile --patch-module java.base=${test.src} ReferencedKeyTest.java - * @run main/othervm --patch-module java.base=${test.class.path} java.lang.runtime.ReferencedKeyTest - */ - -package java.lang.runtime; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Supplier; - -public class ReferencedKeyTest { - static long BASE_KEY = 10_000_000L; - - public static void main(String[] args) throws Throwable { - mapTest(false, HashMap::new); - mapTest(true, HashMap::new); - mapTest(false, ConcurrentHashMap::new); - mapTest(true, ConcurrentHashMap::new); - } - - static void assertTrue(boolean test, String message) { - if (!test) { - throw new RuntimeException(message); - } - } - - static void mapTest(boolean isSoft, Supplier, String>> supplier) { - Map map = ReferencedKeyMap.create(isSoft, supplier); - populate(map); - collect(); - // assertTrue(map.isEmpty() || isSoft, "Weak not collecting"); - populate(map); - methods(map); - } - - static void methods(Map map) { - assertTrue(map.size() == 26, "missing key"); - assertTrue(map.containsKey(BASE_KEY + 'a' -'a'), "missing key"); - assertTrue(map.get(BASE_KEY + 'b' -'a').equals("b"), "wrong key"); - assertTrue(map.containsValue("c"), "missing value"); - map.remove(BASE_KEY + 'd' -'a'); - assertTrue(map.get(BASE_KEY + 'd' -'a') == null, "not removed"); - map.putAll(Map.of(1L, "A", 2L, "B")); - assertTrue(map.get(2L).equals("B"), "collection not added"); - assertTrue(map.keySet().contains(1L), "key missing"); - assertTrue(map.values().contains("A"), "key missing"); - assertTrue(map.entrySet().contains(Map.entry(1L, "A")), "key missing"); - map.putIfAbsent(3L, "C"); - assertTrue(map.get(3L).equals("C"), "key missing"); - map.putIfAbsent(2L, "D"); - assertTrue(map.get(2L).equals("B"), "key replaced"); - map.remove(3L); - assertTrue(map.get(3L) == null, "key not removed"); - map.replace(2L, "D"); - assertTrue(map.get(2L).equals("D"), "key not replaced"); - map.replace(2L, "B", "E"); - assertTrue(map.get(2L).equals("D"), "key replaced"); - } - - static void collect() { - System.gc(); - sleep(); - } - - static void sleep() { - try { - Thread.sleep(100L); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - - static void populate(Map map) { - for (int i = 0; i < 26; i++) { - Long key = BASE_KEY + i; - String value = String.valueOf((char) ('a' + i)); - map.put(key, value); - } - } -} diff --git a/test/jdk/jdk/internal/util/ReferencedKeyTest.java b/test/jdk/jdk/internal/util/ReferencedKeyTest.java new file mode 100644 index 00000000000..c5edaedd2e2 --- /dev/null +++ b/test/jdk/jdk/internal/util/ReferencedKeyTest.java @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8285932 8310913 + * @summary Test features provided by the ReferencedKeyMap/ReferencedKeySet classes. + * @modules java.base/jdk.internal.util + * @compile --patch-module java.base=${test.src} ReferencedKeyTest.java + * @run main/othervm --patch-module java.base=${test.class.path} jdk.internal.util.ReferencedKeyTest + */ + +package jdk.internal.util; + +import java.lang.ref.PhantomReference; +import java.lang.ref.Reference; +import java.lang.ref.ReferenceQueue; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BooleanSupplier; +import java.util.function.Supplier; + +public class ReferencedKeyTest { + static long BASE_KEY = 10_000_000L; + + public static void main(String[] args) { + mapTest(false, HashMap::new); + mapTest(true, HashMap::new); + mapTest(false, ConcurrentHashMap::new); + mapTest(true, ConcurrentHashMap::new); + + setTest(false, HashMap::new); + setTest(true, HashMap::new); + setTest(false, ConcurrentHashMap::new); + setTest(true, ConcurrentHashMap::new); + } + + static void assertTrue(boolean test, String message) { + if (!test) { + throw new RuntimeException(message); + } + } + + static void mapTest(boolean isSoft, Supplier, String>> supplier) { + Map map = ReferencedKeyMap.create(isSoft, supplier); + populate(map); + if (!isSoft) { + if (!collect(() -> map.isEmpty())) { + throw new RuntimeException("WeakReference map not collecting!"); + } + } + populate(map); + methods(map); + } + + static void setTest(boolean isSoft, Supplier, ReferenceKey>> supplier) { + ReferencedKeySet set = ReferencedKeySet.create(isSoft, supplier); + populate(set); + if (!isSoft) { + if (!collect(() -> set.isEmpty())) { + throw new RuntimeException("WeakReference set not collecting!"); + } + } + populate(set); + methods(set); + } + + static void methods(Map map) { + assertTrue(map.size() == 26, "missing key"); + assertTrue(map.containsKey(BASE_KEY + 'a' -'a'), "missing key"); + assertTrue(map.get(BASE_KEY + 'b' -'a').equals("b"), "wrong key"); + assertTrue(map.containsValue("c"), "missing value"); + map.remove(BASE_KEY + 'd' -'a'); + assertTrue(map.get(BASE_KEY + 'd' -'a') == null, "not removed"); + map.putAll(Map.of(1L, "A", 2L, "B")); + assertTrue(map.get(2L).equals("B"), "collection not added"); + assertTrue(map.containsKey(1L), "key missing"); + assertTrue(map.containsValue("A"), "key missing"); + assertTrue(map.entrySet().contains(Map.entry(1L, "A")), "key missing"); + map.putIfAbsent(3L, "C"); + assertTrue(map.get(3L).equals("C"), "key missing"); + map.putIfAbsent(2L, "D"); + assertTrue(map.get(2L).equals("B"), "key replaced"); + map.remove(3L); + assertTrue(map.get(3L) == null, "key not removed"); + map.replace(2L, "D"); + assertTrue(map.get(2L).equals("D"), "key not replaced"); + map.replace(2L, "B", "E"); + assertTrue(map.get(2L).equals("D"), "key replaced"); + } + + static void methods(ReferencedKeySet set) { + assertTrue(set.size() == 26, "missing key"); + assertTrue(set.contains(BASE_KEY + 3), "missing key"); + set.remove(BASE_KEY + 3); + assertTrue(!set.contains(BASE_KEY + 3), "not removed"); + Long element1 = set.get(BASE_KEY + 2); + Long element2 = set.get(BASE_KEY + 3); + Long element3 = set.get(BASE_KEY + 4); + Long intern1 = set.intern(BASE_KEY + 2); + Long intern2 = set.intern(BASE_KEY + 3); + Long intern3 = set.intern(BASE_KEY + 4, e -> e); + assertTrue(element1 != null, "missing key"); + assertTrue(element2 == null, "not removed"); + assertTrue(element1 == intern1, "intern failed"); // must be same object + assertTrue(intern2 != null, "intern failed"); + assertTrue(element3 == intern3, "intern failed"); + } + + // Borrowed from jdk.test.lib.util.ForceGC but couldn't use from java.base/jdk.internal.util + static boolean collect(BooleanSupplier booleanSupplier) { + ReferenceQueue queue = new ReferenceQueue<>(); + Object obj = new Object(); + PhantomReference ref = new PhantomReference<>(obj, queue); + obj = null; + Reference.reachabilityFence(obj); + Reference.reachabilityFence(ref); + long timeout = 1000L; + long quanta = 200L; + long retries = timeout / quanta; + + for (; retries >= 0; retries--) { + if (booleanSupplier.getAsBoolean()) { + return true; + } + + System.gc(); + + try { + queue.remove(quanta); + } catch (InterruptedException ie) { + // ignore, the loop will try again + } + } + + return booleanSupplier.getAsBoolean(); + } + + static void populate(Map map) { + for (int i = 0; i < 26; i++) { + Long key = BASE_KEY + i; + String value = String.valueOf((char) ('a' + i)); + map.put(key, value); + } + } + + static void populate(Set set) { + for (int i = 0; i < 26; i++) { + Long value = BASE_KEY + i; + set.add(value); + } + } +} From 41fda4a528e1b1a0620445392fb8a1579b3c32f2 Mon Sep 17 00:00:00 2001 From: Liang Mao Date: Tue, 7 May 2024 13:17:45 +0000 Subject: [PATCH 18/19] 8319376: ParallelGC: Forwarded objects found during heap inspection Backport-of: 59e9981ec21258b8aa5f93cb1fb9b0ccf9f846af --- src/hotspot/share/gc/parallel/mutableSpace.cpp | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp index 69ebf86c7c2..cdbd0ac4813 100644 --- a/src/hotspot/share/gc/parallel/mutableSpace.cpp +++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp @@ -236,7 +236,18 @@ void MutableSpace::oop_iterate(OopIterateClosure* cl) { void MutableSpace::object_iterate(ObjectClosure* cl) { HeapWord* p = bottom(); while (p < top()) { - cl->do_object(cast_to_oop(p)); + oop obj = cast_to_oop(p); + // When promotion-failure occurs during Young GC, eden/from space is not cleared, + // so we can encounter objects with "forwarded" markword. + // They are essentially dead, so skipping them + if (!obj->is_forwarded()) { + cl->do_object(obj); + } +#ifdef ASSERT + else { + assert(obj->forwardee() != obj, "must not be self-forwarded"); + } +#endif p += cast_to_oop(p)->size(); } } From 93d091ad35cccc9c7010ae6481a61feee7830e92 Mon Sep 17 00:00:00 2001 From: Liang Mao Date: Tue, 7 May 2024 13:20:39 +0000 Subject: [PATCH 19/19] 8314573: G1: Heap resizing at Remark does not take existing eden regions into account Backport-of: 762b652912939b37fbd68955617705c62b9fc3a5 --- src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp index 1a7c11a685b..8645b847045 100644 --- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp +++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp @@ -217,7 +217,14 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand) { // Capacity, free and used after the GC counted as full regions to // include the waste in the following calculations. const size_t capacity_after_gc = _g1h->capacity(); - const size_t used_after_gc = capacity_after_gc - _g1h->unused_committed_regions_in_bytes(); + const size_t used_after_gc = capacity_after_gc - + _g1h->unused_committed_regions_in_bytes() - + // Discount space used by current Eden to establish a + // situation during Remark similar to at the end of full + // GC where eden is empty. During Remark there can be an + // arbitrary number of eden regions which would skew the + // results. + _g1h->eden_regions_count() * HeapRegion::GrainBytes; size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio); size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);