programmatically wait a certain amount of time will apply this factor.
If we run in forced compilation mode (-Xcomp), the build
system will automatically adjust this factor to compensate for less
-performance. Defaults to 4.
+performance. Defaults to 1.
FAILURE_HANDLER_TIMEOUT
Sets the argument -timeoutHandlerTimeout for JTReg. The
default value is 0. This is only valid if the failure handler is
diff --git a/doc/testing.md b/doc/testing.md
index 0144610a5bf92..324f9645c270c 100644
--- a/doc/testing.md
+++ b/doc/testing.md
@@ -387,7 +387,7 @@ The `TIMEOUT_FACTOR` is forwarded to JTReg framework itself
(`-timeoutFactor`). Also, some test cases that programmatically wait a
certain amount of time will apply this factor. If we run in forced
compilation mode (`-Xcomp`), the build system will automatically
-adjust this factor to compensate for less performance. Defaults to 4.
+adjust this factor to compensate for less performance. Defaults to 1.
#### FAILURE_HANDLER_TIMEOUT
diff --git a/make/RunTests.gmk b/make/RunTests.gmk
index 947389f64f99c..7b05a0ba12fdb 100644
--- a/make/RunTests.gmk
+++ b/make/RunTests.gmk
@@ -946,8 +946,8 @@ define SetupRunJtregTestBody
JTREG_ALL_OPTIONS := $$(JTREG_JAVA_OPTIONS) $$(JTREG_VM_OPTIONS)
JTREG_AUTO_PROBLEM_LISTS :=
- # Please reach consensus before changing this.
- JTREG_AUTO_TIMEOUT_FACTOR := 4
+ # Please reach consensus before changing this. It was not easy changing it to a `1`.
+ JTREG_AUTO_TIMEOUT_FACTOR := 1
ifneq ($$(findstring -Xcomp, $$(JTREG_ALL_OPTIONS)), )
JTREG_AUTO_PROBLEM_LISTS += ProblemList-Xcomp.txt
diff --git a/make/conf/github-actions.conf b/make/conf/github-actions.conf
index bd73e909062dd..74a830cbcc289 100644
--- a/make/conf/github-actions.conf
+++ b/make/conf/github-actions.conf
@@ -29,21 +29,21 @@ GTEST_VERSION=1.14.0
JTREG_VERSION=8.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
-LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_linux-x64_bin.tar.gz
-LINUX_X64_BOOT_JDK_SHA256=59cdcaf255add4721de38eb411d4ecfe779356b61fb671aee63c7dec78054c2b
+LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz
+LINUX_X64_BOOT_JDK_SHA256=88b090fa80c6c1d084ec9a755233967458788e2c0777ae2e172230c5c692d7ef
ALPINE_LINUX_X64_BOOT_JDK_EXT=tar.gz
-ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin25-binaries/releases/download/jdk-25%2B36/OpenJDK25U-jdk_x64_alpine-linux_hotspot_25_36.tar.gz
-ALPINE_LINUX_X64_BOOT_JDK_SHA256=637e47474d411ed86134f413af7d5fef4180ddb0bf556347b7e74a88cf8904c8
+ALPINE_LINUX_X64_BOOT_JDK_URL=https://github.com/adoptium/temurin24-binaries/releases/download/jdk-24%2B36/OpenJDK24U-jdk_x64_alpine-linux_hotspot_24_36.tar.gz
+ALPINE_LINUX_X64_BOOT_JDK_SHA256=a642608f0da78344ee6812fb1490b8bc1d7ad5a18064c70994d6f330568c51cb
MACOS_AARCH64_BOOT_JDK_EXT=tar.gz
-MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-aarch64_bin.tar.gz
-MACOS_AARCH64_BOOT_JDK_SHA256=2006337bf326fdfdf6117081751ba38c1c8706d63419ecac7ff102ff7c776876
+MACOS_AARCH64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_macos-aarch64_bin.tar.gz
+MACOS_AARCH64_BOOT_JDK_SHA256=f7133238a12714a62c5ad2bd4da6741130be1a82512065da9ca23dee26b2d3d3
MACOS_X64_BOOT_JDK_EXT=tar.gz
-MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_macos-x64_bin.tar.gz
-MACOS_X64_BOOT_JDK_SHA256=47482ad9888991ecac9b2bcc131e2b53ff78aff275104cef85f66252308e8a09
+MACOS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_macos-x64_bin.tar.gz
+MACOS_X64_BOOT_JDK_SHA256=6bbfb1d01741cbe55ab90299cb91464b695de9a3ace85c15131aa2f50292f321
WINDOWS_X64_BOOT_JDK_EXT=zip
-WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk25/bd75d5f9689641da8e1daabeccb5528b/36/GPL/openjdk-25_windows-x64_bin.zip
-WINDOWS_X64_BOOT_JDK_SHA256=85bcc178461e2cb3c549ab9ca9dfa73afd54c09a175d6510d0884071867137d3
+WINDOWS_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_windows-x64_bin.zip
+WINDOWS_X64_BOOT_JDK_SHA256=11d1d9f6ac272d5361c8a0bef01894364081c7fb1a6914c2ad2fc312ae83d63b
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index 795335d7c3cf9..9706321d7b60c 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -387,8 +387,8 @@ var getJibProfilesCommon = function (input, data) {
};
};
- common.boot_jdk_version = "25";
- common.boot_jdk_build_number = "37";
+ common.boot_jdk_version = "24";
+ common.boot_jdk_build_number = "36";
common.boot_jdk_home = input.get("boot_jdk", "install_path") + "/jdk-"
+ common.boot_jdk_version
+ (input.build_os == "macosx" ? ".jdk/Contents/Home" : "");
diff --git a/make/conf/version-numbers.conf b/make/conf/version-numbers.conf
index 977809535ba6a..38d6e42dff91b 100644
--- a/make/conf/version-numbers.conf
+++ b/make/conf/version-numbers.conf
@@ -37,6 +37,6 @@ DEFAULT_VERSION_DATE=2026-03-17
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
-DEFAULT_ACCEPTABLE_BOOT_VERSIONS="25 26"
+DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25 26"
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
DEFAULT_PROMOTED_VERSION_PRE=ea
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index 5734519301e28..51cdf8c71dfe2 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1266,39 +1266,39 @@ source %{
// adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
// registers conditionally reserved.
- _ANY_REG32_mask.assignFrom(_ALL_REG32_mask);
- _ANY_REG32_mask.remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
+ _ANY_REG32_mask = _ALL_REG32_mask;
+ _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
- _ANY_REG_mask.assignFrom(_ALL_REG_mask);
+ _ANY_REG_mask = _ALL_REG_mask;
- _PTR_REG_mask.assignFrom(_ALL_REG_mask);
+ _PTR_REG_mask = _ALL_REG_mask;
- _NO_SPECIAL_REG32_mask.assignFrom(_ALL_REG32_mask);
- _NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask);
+ _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
+ _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
- _NO_SPECIAL_REG_mask.assignFrom(_ALL_REG_mask);
- _NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
+ _NO_SPECIAL_REG_mask = _ALL_REG_mask;
+ _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
- _NO_SPECIAL_PTR_REG_mask.assignFrom(_ALL_REG_mask);
- _NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
+ _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
+ _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
// r27 is not allocatable when compressed oops is on and heapbase is not
// zero, compressed klass pointers doesn't use r27 after JDK-8234794
if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
- _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
- _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
- _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(r27->as_VMReg()));
+ _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
+ _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
+ _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
}
// r29 is not allocatable when PreserveFramePointer is on
if (PreserveFramePointer) {
- _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
- _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
- _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
+ _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
+ _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
+ _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
}
- _NO_SPECIAL_NO_RFP_PTR_REG_mask.assignFrom(_NO_SPECIAL_PTR_REG_mask);
- _NO_SPECIAL_NO_RFP_PTR_REG_mask.remove(OptoReg::as_OptoReg(r29->as_VMReg()));
+ _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
+ _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
}
// Optimizaton of volatile gets and puts
@@ -1734,7 +1734,7 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
//=============================================================================
-const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
+const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
int ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
@@ -2520,10 +2520,10 @@ uint Matcher::int_pressure_limit()
// as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
// derived pointers and lastly fail to spill after reaching maximum
// number of iterations. Lowering the default pressure threshold to
- // (_NO_SPECIAL_REG32_mask.size() minus 1) forces CallNode to become
+ // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
// a high register pressure area of the code so that split_DEF can
// generate DefinitionSpillCopy for the derived pointer.
- uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.size() - 1;
+ uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
if (!PreserveFramePointer) {
// When PreserveFramePointer is off, frame pointer is allocatable,
// but different from other SOC registers, it is excluded from
@@ -2538,34 +2538,34 @@ uint Matcher::int_pressure_limit()
uint Matcher::float_pressure_limit()
{
// _FLOAT_REG_mask is generated by adlc from the float_reg register class.
- return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
+ return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
-const RegMask& Matcher::divI_proj_mask() {
+RegMask Matcher::divI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODI projection of divmodI.
-const RegMask& Matcher::modI_proj_mask() {
+RegMask Matcher::modI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for DIVL projection of divmodL.
-const RegMask& Matcher::divL_proj_mask() {
+RegMask Matcher::divL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODL projection of divmodL.
-const RegMask& Matcher::modL_proj_mask() {
+RegMask Matcher::modL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
diff --git a/src/hotspot/cpu/aarch64/aarch64_vector.ad b/src/hotspot/cpu/aarch64/aarch64_vector.ad
index 3379041b2ccac..ef35b66003d1b 100644
--- a/src/hotspot/cpu/aarch64/aarch64_vector.ad
+++ b/src/hotspot/cpu/aarch64/aarch64_vector.ad
@@ -7081,31 +7081,29 @@ instruct vcompress(vReg dst, vReg src, pRegGov pg) %{
%}
instruct vcompressB(vReg dst, vReg src, pReg pg, vReg tmp1, vReg tmp2,
- vReg tmp3, pReg ptmp, pRegGov pgtmp) %{
+ vReg tmp3, vReg tmp4, pReg ptmp, pRegGov pgtmp) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n) == T_BYTE);
- effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP ptmp, TEMP pgtmp);
+ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP ptmp, TEMP pgtmp);
match(Set dst (CompressV src pg));
- format %{ "vcompressB $dst, $src, $pg\t# KILL $tmp1, $tmp2, $tmp3, $ptmp, $pgtmp" %}
+ format %{ "vcompressB $dst, $src, $pg\t# KILL $tmp1, $tmp2, $tmp3, tmp4, $ptmp, $pgtmp" %}
ins_encode %{
- uint length_in_bytes = Matcher::vector_length_in_bytes(this);
__ sve_compress_byte($dst$$FloatRegister, $src$$FloatRegister, $pg$$PRegister,
- $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister,
- $ptmp$$PRegister, $pgtmp$$PRegister, length_in_bytes);
+ $tmp1$$FloatRegister,$tmp2$$FloatRegister,
+ $tmp3$$FloatRegister,$tmp4$$FloatRegister,
+ $ptmp$$PRegister, $pgtmp$$PRegister);
%}
ins_pipe(pipe_slow);
%}
-instruct vcompressS(vReg dst, vReg src, pReg pg, vReg tmp1, vReg tmp2, pRegGov pgtmp) %{
+instruct vcompressS(vReg dst, vReg src, pReg pg,
+ vReg tmp1, vReg tmp2, pRegGov pgtmp) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n) == T_SHORT);
effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP pgtmp);
match(Set dst (CompressV src pg));
format %{ "vcompressS $dst, $src, $pg\t# KILL $tmp1, $tmp2, $pgtmp" %}
ins_encode %{
- uint length_in_bytes = Matcher::vector_length_in_bytes(this);
- __ sve_dup($tmp1$$FloatRegister, __ H, 0);
__ sve_compress_short($dst$$FloatRegister, $src$$FloatRegister, $pg$$PRegister,
- $tmp1$$FloatRegister, $tmp2$$FloatRegister, $pgtmp$$PRegister,
- length_in_bytes);
+ $tmp1$$FloatRegister,$tmp2$$FloatRegister, $pgtmp$$PRegister);
%}
ins_pipe(pipe_slow);
%}
diff --git a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
index 6d296cbdb3ac3..012de7e46d809 100644
--- a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
+++ b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4
@@ -5069,31 +5069,29 @@ instruct vcompress(vReg dst, vReg src, pRegGov pg) %{
%}
instruct vcompressB(vReg dst, vReg src, pReg pg, vReg tmp1, vReg tmp2,
- vReg tmp3, pReg ptmp, pRegGov pgtmp) %{
+ vReg tmp3, vReg tmp4, pReg ptmp, pRegGov pgtmp) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n) == T_BYTE);
- effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP ptmp, TEMP pgtmp);
+ effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP ptmp, TEMP pgtmp);
match(Set dst (CompressV src pg));
- format %{ "vcompressB $dst, $src, $pg\t# KILL $tmp1, $tmp2, $tmp3, $ptmp, $pgtmp" %}
+ format %{ "vcompressB $dst, $src, $pg\t# KILL $tmp1, $tmp2, $tmp3, tmp4, $ptmp, $pgtmp" %}
ins_encode %{
- uint length_in_bytes = Matcher::vector_length_in_bytes(this);
__ sve_compress_byte($dst$$FloatRegister, $src$$FloatRegister, $pg$$PRegister,
- $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister,
- $ptmp$$PRegister, $pgtmp$$PRegister, length_in_bytes);
+ $tmp1$$FloatRegister,$tmp2$$FloatRegister,
+ $tmp3$$FloatRegister,$tmp4$$FloatRegister,
+ $ptmp$$PRegister, $pgtmp$$PRegister);
%}
ins_pipe(pipe_slow);
%}
-instruct vcompressS(vReg dst, vReg src, pReg pg, vReg tmp1, vReg tmp2, pRegGov pgtmp) %{
+instruct vcompressS(vReg dst, vReg src, pReg pg,
+ vReg tmp1, vReg tmp2, pRegGov pgtmp) %{
predicate(UseSVE > 0 && Matcher::vector_element_basic_type(n) == T_SHORT);
effect(TEMP_DEF dst, TEMP tmp1, TEMP tmp2, TEMP pgtmp);
match(Set dst (CompressV src pg));
format %{ "vcompressS $dst, $src, $pg\t# KILL $tmp1, $tmp2, $pgtmp" %}
ins_encode %{
- uint length_in_bytes = Matcher::vector_length_in_bytes(this);
- __ sve_dup($tmp1$$FloatRegister, __ H, 0);
__ sve_compress_short($dst$$FloatRegister, $src$$FloatRegister, $pg$$PRegister,
- $tmp1$$FloatRegister, $tmp2$$FloatRegister, $pgtmp$$PRegister,
- length_in_bytes);
+ $tmp1$$FloatRegister,$tmp2$$FloatRegister, $pgtmp$$PRegister);
%}
ins_pipe(pipe_slow);
%}
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
index a8f378e524fc3..4c4251fbe9f25 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp
@@ -3486,7 +3486,6 @@ template
INSN(sve_smaxv, 0b00000100, 0b001000001); // signed maximum reduction to scalar
INSN(sve_smin, 0b00000100, 0b001010000); // signed minimum vectors
INSN(sve_sminv, 0b00000100, 0b001010001); // signed minimum reduction to scalar
- INSN(sve_splice,0b00000101, 0b101100100); // splice two vectors under predicate control, destructive
INSN(sve_sub, 0b00000100, 0b000001000); // vector sub
INSN(sve_uaddv, 0b00000100, 0b000001001); // unsigned add reduction to scalar
INSN(sve_umax, 0b00000100, 0b001001000); // unsigned maximum vectors
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
index 328ef0c53e6bd..b61a0e4e3789d 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
@@ -2203,117 +2203,114 @@ void C2_MacroAssembler::sve_gen_mask_imm(PRegister dst, BasicType bt, uint32_t l
// Pack active elements of src, under the control of mask, into the lowest-numbered elements of dst.
// Any remaining elements of dst will be filled with zero.
// Clobbers: rscratch1
-// Preserves: mask, vzr
+// Preserves: src, mask
void C2_MacroAssembler::sve_compress_short(FloatRegister dst, FloatRegister src, PRegister mask,
- FloatRegister vzr, FloatRegister vtmp,
- PRegister pgtmp, unsigned vector_length_in_bytes) {
+ FloatRegister vtmp1, FloatRegister vtmp2,
+ PRegister pgtmp) {
assert(pgtmp->is_governing(), "This register has to be a governing predicate register");
- // When called by sve_compress_byte, src and vtmp may be the same register.
- assert_different_registers(dst, src, vzr);
- assert_different_registers(dst, vtmp, vzr);
+ assert_different_registers(dst, src, vtmp1, vtmp2);
assert_different_registers(mask, pgtmp);
- // high <-- low
- // Example input: src = hh gg ff ee dd cc bb aa, one character is 8 bits.
- // mask = 01 00 00 01 01 00 01 01, one character is 1 bit.
- // Expected result: dst = 00 00 00 hh ee dd bb aa
+
+ // Example input: src = 8888 7777 6666 5555 4444 3333 2222 1111
+ // mask = 0001 0000 0000 0001 0001 0000 0001 0001
+ // Expected result: dst = 0000 0000 0000 8888 5555 4444 2222 1111
+ sve_dup(vtmp2, H, 0);
// Extend lowest half to type INT.
- // dst = 00dd 00cc 00bb 00aa
+ // dst = 00004444 00003333 00002222 00001111
sve_uunpklo(dst, S, src);
- // pgtmp = 0001 0000 0001 0001
+ // pgtmp = 00000001 00000000 00000001 00000001
sve_punpklo(pgtmp, mask);
// Pack the active elements in size of type INT to the right,
// and fill the remainings with zero.
- // dst = 0000 00dd 00bb 00aa
+ // dst = 00000000 00004444 00002222 00001111
sve_compact(dst, S, dst, pgtmp);
// Narrow the result back to type SHORT.
- // dst = 00 00 00 00 00 dd bb aa
- sve_uzp1(dst, H, dst, vzr);
-
- // Return if the vector length is no more than MaxVectorSize/2, since the
- // highest half is invalid.
- if (vector_length_in_bytes <= (MaxVectorSize >> 1)) {
- return;
- }
-
+ // dst = 0000 0000 0000 0000 0000 4444 2222 1111
+ sve_uzp1(dst, H, dst, vtmp2);
// Count the active elements of lowest half.
// rscratch1 = 3
sve_cntp(rscratch1, S, ptrue, pgtmp);
// Repeat to the highest half.
- // pgtmp = 0001 0000 0000 0001
+ // pgtmp = 00000001 00000000 00000000 00000001
sve_punpkhi(pgtmp, mask);
- // vtmp = 00hh 00gg 00ff 00ee
- sve_uunpkhi(vtmp, S, src);
- // vtmp = 0000 0000 00hh 00ee
- sve_compact(vtmp, S, vtmp, pgtmp);
- // vtmp = 00 00 00 00 00 00 hh ee
- sve_uzp1(vtmp, H, vtmp, vzr);
-
- // pgtmp = 00 00 00 00 00 01 01 01
- sve_whilelt(pgtmp, H, zr, rscratch1);
- // Compressed low: dst = 00 00 00 00 00 dd bb aa
- // Compressed high: vtmp = 00 00 00 00 00 00 hh ee
- // Combine the compressed low with the compressed high:
- // dst = 00 00 00 hh ee dd bb aa
- sve_splice(dst, H, pgtmp, vtmp);
+ // vtmp1 = 00008888 00007777 00006666 00005555
+ sve_uunpkhi(vtmp1, S, src);
+ // vtmp1 = 00000000 00000000 00008888 00005555
+ sve_compact(vtmp1, S, vtmp1, pgtmp);
+ // vtmp1 = 0000 0000 0000 0000 0000 0000 8888 5555
+ sve_uzp1(vtmp1, H, vtmp1, vtmp2);
+
+ // Compressed low: dst = 0000 0000 0000 0000 0000 4444 2222 1111
+ // Compressed high: vtmp1 = 0000 0000 0000 0000 0000 0000 8888 5555
+ // Left shift(cross lane) compressed high with TRUE_CNT lanes,
+ // TRUE_CNT is the number of active elements in the compressed low.
+ neg(rscratch1, rscratch1);
+ // vtmp2 = {4 3 2 1 0 -1 -2 -3}
+ sve_index(vtmp2, H, rscratch1, 1);
+ // vtmp1 = 0000 0000 0000 8888 5555 0000 0000 0000
+ sve_tbl(vtmp1, H, vtmp1, vtmp2);
+
+ // Combine the compressed high(after shifted) with the compressed low.
+ // dst = 0000 0000 0000 8888 5555 4444 2222 1111
+ sve_orr(dst, dst, vtmp1);
}
// Clobbers: rscratch1, rscratch2
// Preserves: src, mask
void C2_MacroAssembler::sve_compress_byte(FloatRegister dst, FloatRegister src, PRegister mask,
- FloatRegister vtmp1, FloatRegister vtmp2, FloatRegister vtmp3,
- PRegister ptmp, PRegister pgtmp, unsigned vector_length_in_bytes) {
+ FloatRegister vtmp1, FloatRegister vtmp2,
+ FloatRegister vtmp3, FloatRegister vtmp4,
+ PRegister ptmp, PRegister pgtmp) {
assert(pgtmp->is_governing(), "This register has to be a governing predicate register");
- assert_different_registers(dst, src, vtmp1, vtmp2, vtmp3);
+ assert_different_registers(dst, src, vtmp1, vtmp2, vtmp3, vtmp4);
assert_different_registers(mask, ptmp, pgtmp);
- // high <-- low
- // Example input: src = q p n m l k j i h g f e d c b a, one character is 8 bits.
- // mask = 0 1 0 0 0 0 0 1 0 1 0 0 0 1 0 1, one character is 1 bit.
- // Expected result: dst = 0 0 0 0 0 0 0 0 0 0 0 p i g c a
- FloatRegister vzr = vtmp3;
- sve_dup(vzr, B, 0);
+ // Example input: src = 88 77 66 55 44 33 22 11
+ // mask = 01 00 00 01 01 00 01 01
+ // Expected result: dst = 00 00 00 88 55 44 22 11
+ sve_dup(vtmp4, B, 0);
// Extend lowest half to type SHORT.
- // vtmp1 = 0h 0g 0f 0e 0d 0c 0b 0a
+ // vtmp1 = 0044 0033 0022 0011
sve_uunpklo(vtmp1, H, src);
- // ptmp = 00 01 00 00 00 01 00 01
+ // ptmp = 0001 0000 0001 0001
sve_punpklo(ptmp, mask);
- // Pack the active elements in size of type SHORT to the right,
- // and fill the remainings with zero.
- // dst = 00 00 00 00 00 0g 0c 0a
- unsigned extended_size = vector_length_in_bytes << 1;
- sve_compress_short(dst, vtmp1, ptmp, vzr, vtmp2, pgtmp, extended_size > MaxVectorSize ? MaxVectorSize : extended_size);
- // Narrow the result back to type BYTE.
- // dst = 0 0 0 0 0 0 0 0 0 0 0 0 0 g c a
- sve_uzp1(dst, B, dst, vzr);
-
- // Return if the vector length is no more than MaxVectorSize/2, since the
- // highest half is invalid.
- if (vector_length_in_bytes <= (MaxVectorSize >> 1)) {
- return;
- }
// Count the active elements of lowest half.
// rscratch2 = 3
sve_cntp(rscratch2, H, ptrue, ptmp);
+ // Pack the active elements in size of type SHORT to the right,
+ // and fill the remainings with zero.
+ // dst = 0000 0044 0022 0011
+ sve_compress_short(dst, vtmp1, ptmp, vtmp2, vtmp3, pgtmp);
+ // Narrow the result back to type BYTE.
+ // dst = 00 00 00 00 00 44 22 11
+ sve_uzp1(dst, B, dst, vtmp4);
// Repeat to the highest half.
- // ptmp = 00 01 00 00 00 00 00 01
+ // ptmp = 0001 0000 0000 0001
sve_punpkhi(ptmp, mask);
- // vtmp2 = 0q 0p 0n 0m 0l 0k 0j 0i
+ // vtmp1 = 0088 0077 0066 0055
sve_uunpkhi(vtmp2, H, src);
- // vtmp1 = 00 00 00 00 00 00 0p 0i
- sve_compress_short(vtmp1, vtmp2, ptmp, vzr, vtmp2, pgtmp, extended_size - MaxVectorSize);
- // vtmp1 = 0 0 0 0 0 0 0 0 0 0 0 0 0 0 p i
- sve_uzp1(vtmp1, B, vtmp1, vzr);
-
- // ptmp = 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1
- sve_whilelt(ptmp, B, zr, rscratch2);
- // Compressed low: dst = 0 0 0 0 0 0 0 0 0 0 0 0 0 g c a
- // Compressed high: vtmp1 = 0 0 0 0 0 0 0 0 0 0 0 0 0 0 p i
- // Combine the compressed low with the compressed high:
- // dst = 0 0 0 0 0 0 0 0 0 0 0 p i g c a
- sve_splice(dst, B, ptmp, vtmp1);
+ // vtmp1 = 0000 0000 0088 0055
+ sve_compress_short(vtmp1, vtmp2, ptmp, vtmp3, vtmp4, pgtmp);
+
+ sve_dup(vtmp4, B, 0);
+ // vtmp1 = 00 00 00 00 00 00 88 55
+ sve_uzp1(vtmp1, B, vtmp1, vtmp4);
+
+ // Compressed low: dst = 00 00 00 00 00 44 22 11
+ // Compressed high: vtmp1 = 00 00 00 00 00 00 88 55
+ // Left shift(cross lane) compressed high with TRUE_CNT lanes,
+ // TRUE_CNT is the number of active elements in the compressed low.
+ neg(rscratch2, rscratch2);
+ // vtmp2 = {4 3 2 1 0 -1 -2 -3}
+ sve_index(vtmp2, B, rscratch2, 1);
+ // vtmp1 = 00 00 00 88 55 00 00 00
+ sve_tbl(vtmp1, B, vtmp1, vtmp2);
+ // Combine the compressed high(after shifted) with the compressed low.
+ // dst = 00 00 00 88 55 44 22 11
+ sve_orr(dst, dst, vtmp1);
}
void C2_MacroAssembler::neon_reverse_bits(FloatRegister dst, FloatRegister src, BasicType bt, bool isQ) {
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
index 09850a60c64d3..cb8ded142f48a 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
@@ -173,12 +173,13 @@
// lowest-numbered elements of dst. Any remaining elements of dst will
// be filled with zero.
void sve_compress_byte(FloatRegister dst, FloatRegister src, PRegister mask,
- FloatRegister vtmp1, FloatRegister vtmp2, FloatRegister vtmp3,
- PRegister ptmp, PRegister pgtmp, unsigned vector_length_in_bytes);
+ FloatRegister vtmp1, FloatRegister vtmp2,
+ FloatRegister vtmp3, FloatRegister vtmp4,
+ PRegister ptmp, PRegister pgtmp);
void sve_compress_short(FloatRegister dst, FloatRegister src, PRegister mask,
- FloatRegister vzr, FloatRegister vtmp,
- PRegister pgtmp, unsigned vector_length_in_bytes);
+ FloatRegister vtmp1, FloatRegister vtmp2,
+ PRegister pgtmp);
void neon_reverse_bits(FloatRegister dst, FloatRegister src, BasicType bt, bool isQ);
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index 6f8795494a2bb..607912e6e494f 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -1704,14 +1704,3 @@ void InterpreterMacroAssembler::load_method_entry(Register cache, Register index
add(cache, cache, Array::base_offset_in_bytes());
lea(cache, Address(cache, index));
}
-
-#ifdef ASSERT
-void InterpreterMacroAssembler::verify_field_offset(Register reg) {
- // Verify the field offset is not in the header, implicitly checks for 0
- Label L;
- subs(zr, reg, oopDesc::base_offset_in_bytes());
- br(Assembler::GE, L);
- stop("bad field offset");
- bind(L);
-}
-#endif
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
index e07e6e49f535d..e896a2a943041 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp
@@ -319,8 +319,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_resolved_indy_entry(Register cache, Register index);
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
-
- void verify_field_offset(Register reg) NOT_DEBUG_RETURN;
};
#endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 1400978931986..a37edab8578db 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -148,34 +148,56 @@ extern "C" void disnm(intptr_t p);
// strictly should be 64 bit movz #imm16<<0
// 110___10100 (i.e. requires insn[31:21] == 11010010100)
//
+class RelocActions {
+protected:
+ typedef int (*reloc_insn)(address insn_addr, address &target);
-static uint32_t insn_at(address insn_addr, int n) {
- return ((uint32_t*)insn_addr)[n];
-}
+ virtual reloc_insn adrpMem() = 0;
+ virtual reloc_insn adrpAdd() = 0;
+ virtual reloc_insn adrpMovk() = 0;
+
+ const address _insn_addr;
+ const uint32_t _insn;
-template
-class RelocActions : public AllStatic {
+ static uint32_t insn_at(address insn_addr, int n) {
+ return ((uint32_t*)insn_addr)[n];
+ }
+ uint32_t insn_at(int n) const {
+ return insn_at(_insn_addr, n);
+ }
public:
- static int ALWAYSINLINE run(address insn_addr, address &target) {
+ RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {}
+ RelocActions(address insn_addr, uint32_t insn)
+ : _insn_addr(insn_addr), _insn(insn) {}
+
+ virtual int unconditionalBranch(address insn_addr, address &target) = 0;
+ virtual int conditionalBranch(address insn_addr, address &target) = 0;
+ virtual int testAndBranch(address insn_addr, address &target) = 0;
+ virtual int loadStore(address insn_addr, address &target) = 0;
+ virtual int adr(address insn_addr, address &target) = 0;
+ virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0;
+ virtual int immediate(address insn_addr, address &target) = 0;
+ virtual void verify(address insn_addr, address &target) = 0;
+
+ int ALWAYSINLINE run(address insn_addr, address &target) {
int instructions = 1;
- uint32_t insn = insn_at(insn_addr, 0);
- uint32_t dispatch = Instruction_aarch64::extract(insn, 30, 25);
+ uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25);
switch(dispatch) {
case 0b001010:
case 0b001011: {
- instructions = T::unconditionalBranch(insn_addr, target);
+ instructions = unconditionalBranch(insn_addr, target);
break;
}
case 0b101010: // Conditional branch (immediate)
case 0b011010: { // Compare & branch (immediate)
- instructions = T::conditionalBranch(insn_addr, target);
- break;
+ instructions = conditionalBranch(insn_addr, target);
+ break;
}
case 0b011011: {
- instructions = T::testAndBranch(insn_addr, target);
+ instructions = testAndBranch(insn_addr, target);
break;
}
case 0b001100:
@@ -187,9 +209,9 @@ class RelocActions : public AllStatic {
case 0b111100:
case 0b111110: {
// load/store
- if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
+ if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) {
// Load register (literal)
- instructions = T::loadStore(insn_addr, target);
+ instructions = loadStore(insn_addr, target);
break;
} else {
// nothing to do
@@ -202,27 +224,27 @@ class RelocActions : public AllStatic {
case 0b101000:
case 0b111000: {
// adr/adrp
- assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
- int shift = Instruction_aarch64::extract(insn, 31, 31);
+ assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
+ int shift = Instruction_aarch64::extract(_insn, 31, 31);
if (shift) {
- uint32_t insn2 = insn_at(insn_addr, 1);
+ uint32_t insn2 = insn_at(1);
if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 &&
- Instruction_aarch64::extract(insn, 4, 0) ==
+ Instruction_aarch64::extract(_insn, 4, 0) ==
Instruction_aarch64::extract(insn2, 9, 5)) {
- instructions = T::adrp(insn_addr, target, T::adrpMem);
+ instructions = adrp(insn_addr, target, adrpMem());
} else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 &&
- Instruction_aarch64::extract(insn, 4, 0) ==
+ Instruction_aarch64::extract(_insn, 4, 0) ==
Instruction_aarch64::extract(insn2, 4, 0)) {
- instructions = T::adrp(insn_addr, target, T::adrpAdd);
+ instructions = adrp(insn_addr, target, adrpAdd());
} else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 &&
- Instruction_aarch64::extract(insn, 4, 0) ==
+ Instruction_aarch64::extract(_insn, 4, 0) ==
Instruction_aarch64::extract(insn2, 4, 0)) {
- instructions = T::adrp(insn_addr, target, T::adrpMovk);
+ instructions = adrp(insn_addr, target, adrpMovk());
} else {
ShouldNotReachHere();
}
} else {
- instructions = T::adr(insn_addr, target);
+ instructions = adr(insn_addr, target);
}
break;
}
@@ -230,7 +252,7 @@ class RelocActions : public AllStatic {
case 0b011001:
case 0b101001:
case 0b111001: {
- instructions = T::immediate(insn_addr, target);
+ instructions = immediate(insn_addr, target);
break;
}
default: {
@@ -238,36 +260,42 @@ class RelocActions : public AllStatic {
}
}
- T::verify(insn_addr, target);
+ verify(insn_addr, target);
return instructions * NativeInstruction::instruction_size;
}
};
-class Patcher : public AllStatic {
+class Patcher : public RelocActions {
+ virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; }
+ virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; }
+ virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; }
+
public:
- static int unconditionalBranch(address insn_addr, address &target) {
+ Patcher(address insn_addr) : RelocActions(insn_addr) {}
+
+ virtual int unconditionalBranch(address insn_addr, address &target) {
intptr_t offset = (target - insn_addr) >> 2;
Instruction_aarch64::spatch(insn_addr, 25, 0, offset);
return 1;
}
- static int conditionalBranch(address insn_addr, address &target) {
+ virtual int conditionalBranch(address insn_addr, address &target) {
intptr_t offset = (target - insn_addr) >> 2;
Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
return 1;
}
- static int testAndBranch(address insn_addr, address &target) {
+ virtual int testAndBranch(address insn_addr, address &target) {
intptr_t offset = (target - insn_addr) >> 2;
Instruction_aarch64::spatch(insn_addr, 18, 5, offset);
return 1;
}
- static int loadStore(address insn_addr, address &target) {
+ virtual int loadStore(address insn_addr, address &target) {
intptr_t offset = (target - insn_addr) >> 2;
Instruction_aarch64::spatch(insn_addr, 23, 5, offset);
return 1;
}
- static int adr(address insn_addr, address &target) {
+ virtual int adr(address insn_addr, address &target) {
#ifdef ASSERT
- assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
+ assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
#endif
// PC-rel. addressing
ptrdiff_t offset = target - insn_addr;
@@ -277,18 +305,17 @@ class Patcher : public AllStatic {
Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
return 1;
}
- template
- static int adrp(address insn_addr, address &target, U inner) {
+ virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
int instructions = 1;
#ifdef ASSERT
- assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 28, 24) == 0b10000, "must be");
+ assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
#endif
ptrdiff_t offset = target - insn_addr;
instructions = 2;
precond(inner != nullptr);
// Give the inner reloc a chance to modify the target.
address adjusted_target = target;
- instructions = inner(insn_addr, adjusted_target);
+ instructions = (*inner)(insn_addr, adjusted_target);
uintptr_t pc_page = (uintptr_t)insn_addr >> 12;
uintptr_t adr_page = (uintptr_t)adjusted_target >> 12;
offset = adr_page - pc_page;
@@ -298,7 +325,7 @@ class Patcher : public AllStatic {
Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo);
return instructions;
}
- static int adrpMem(address insn_addr, address &target) {
+ static int adrpMem_impl(address insn_addr, address &target) {
uintptr_t dest = (uintptr_t)target;
int offset_lo = dest & 0xfff;
uint32_t insn2 = insn_at(insn_addr, 1);
@@ -307,21 +334,21 @@ class Patcher : public AllStatic {
guarantee(((dest >> size) << size) == dest, "misaligned target");
return 2;
}
- static int adrpAdd(address insn_addr, address &target) {
+ static int adrpAdd_impl(address insn_addr, address &target) {
uintptr_t dest = (uintptr_t)target;
int offset_lo = dest & 0xfff;
Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
return 2;
}
- static int adrpMovk(address insn_addr, address &target) {
+ static int adrpMovk_impl(address insn_addr, address &target) {
uintptr_t dest = uintptr_t(target);
Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
target = address(dest);
return 2;
}
- static int immediate(address insn_addr, address &target) {
- assert(Instruction_aarch64::extract(insn_at(insn_addr, 0), 31, 21) == 0b11010010100, "must be");
+ virtual int immediate(address insn_addr, address &target) {
+ assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
uint64_t dest = (uint64_t)target;
// Move wide constant
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
@@ -331,7 +358,7 @@ class Patcher : public AllStatic {
Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
return 3;
}
- static void verify(address insn_addr, address &target) {
+ virtual void verify(address insn_addr, address &target) {
#ifdef ASSERT
address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
if (!(address_is == target)) {
@@ -365,54 +392,56 @@ static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) {
return false;
}
-class AArch64Decoder : public AllStatic {
+class AArch64Decoder : public RelocActions {
+ virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; }
+ virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; }
+ virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; }
+
public:
+ AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {}
- static int loadStore(address insn_addr, address &target) {
- intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
+ virtual int loadStore(address insn_addr, address &target) {
+ intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
target = insn_addr + (offset << 2);
return 1;
}
- static int unconditionalBranch(address insn_addr, address &target) {
- intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 25, 0);
+ virtual int unconditionalBranch(address insn_addr, address &target) {
+ intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0);
target = insn_addr + (offset << 2);
return 1;
}
- static int conditionalBranch(address insn_addr, address &target) {
- intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 23, 5);
+ virtual int conditionalBranch(address insn_addr, address &target) {
+ intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5);
target = address(((uint64_t)insn_addr + (offset << 2)));
return 1;
}
- static int testAndBranch(address insn_addr, address &target) {
- intptr_t offset = Instruction_aarch64::sextract(insn_at(insn_addr, 0), 18, 5);
+ virtual int testAndBranch(address insn_addr, address &target) {
+ intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5);
target = address(((uint64_t)insn_addr + (offset << 2)));
return 1;
}
- static int adr(address insn_addr, address &target) {
+ virtual int adr(address insn_addr, address &target) {
// PC-rel. addressing
- uint32_t insn = insn_at(insn_addr, 0);
- intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
- offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
+ intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
+ offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
target = address((uint64_t)insn_addr + offset);
return 1;
}
- template
- static int adrp(address insn_addr, address &target, U inner) {
- uint32_t insn = insn_at(insn_addr, 0);
- assert(Instruction_aarch64::extract(insn, 28, 24) == 0b10000, "must be");
- intptr_t offset = Instruction_aarch64::extract(insn, 30, 29);
- offset |= Instruction_aarch64::sextract(insn, 23, 5) << 2;
+ virtual int adrp(address insn_addr, address &target, reloc_insn inner) {
+ assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be");
+ intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29);
+ offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2;
int shift = 12;
offset <<= shift;
uint64_t target_page = ((uint64_t)insn_addr) + offset;
target_page &= ((uint64_t)-1) << shift;
- uint32_t insn2 = insn_at(insn_addr, 1);
+ uint32_t insn2 = insn_at(1);
target = address(target_page);
precond(inner != nullptr);
- inner(insn_addr, target);
+ (*inner)(insn_addr, target);
return 2;
}
- static int adrpMem(address insn_addr, address &target) {
+ static int adrpMem_impl(address insn_addr, address &target) {
uint32_t insn2 = insn_at(insn_addr, 1);
// Load/store register (unsigned immediate)
ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
@@ -421,14 +450,14 @@ class AArch64Decoder : public AllStatic {
target += byte_offset;
return 2;
}
- static int adrpAdd(address insn_addr, address &target) {
+ static int adrpAdd_impl(address insn_addr, address &target) {
uint32_t insn2 = insn_at(insn_addr, 1);
// add (immediate)
ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10);
target += byte_offset;
return 2;
}
- static int adrpMovk(address insn_addr, address &target) {
+ static int adrpMovk_impl(address insn_addr, address &target) {
uint32_t insn2 = insn_at(insn_addr, 1);
uint64_t dest = uint64_t(target);
dest = (dest & 0xffff0000ffffffff) |
@@ -447,33 +476,35 @@ class AArch64Decoder : public AllStatic {
return 2;
}
}
- static int immediate(address insn_addr, address &target) {
+ virtual int immediate(address insn_addr, address &target) {
uint32_t *insns = (uint32_t *)insn_addr;
- assert(Instruction_aarch64::extract(insns[0], 31, 21) == 0b11010010100, "must be");
+ assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
// Move wide constant: movz, movk, movk. See movptr().
assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
- target = address(uint64_t(Instruction_aarch64::extract(insns[0], 20, 5))
- + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
- + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
+ target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
+ + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
+ + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
return 3;
}
- static void verify(address insn_addr, address &target) {
+ virtual void verify(address insn_addr, address &target) {
}
};
-address MacroAssembler::target_addr_for_insn(address insn_addr) {
+address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
+ AArch64Decoder decoder(insn_addr, insn);
address target;
- RelocActions::run(insn_addr, target);
+ decoder.run(insn_addr, target);
return target;
}
// Patch any kind of instruction; there may be several instructions.
// Return the total length (in bytes) of the instructions.
int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) {
- return RelocActions::run(insn_addr, target);
+ Patcher patcher(insn_addr);
+ return patcher.run(insn_addr, target);
}
int MacroAssembler::patch_oop(address insn_addr, address o) {
@@ -515,11 +546,11 @@ int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) {
return 2 * NativeInstruction::instruction_size;
}
-address MacroAssembler::target_addr_for_insn_or_null(address insn_addr) {
- if (NativeInstruction::is_ldrw_to_zr(insn_addr)) {
+address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) {
+ if (NativeInstruction::is_ldrw_to_zr(address(&insn))) {
return nullptr;
}
- return MacroAssembler::target_addr_for_insn(insn_addr);
+ return MacroAssembler::target_addr_for_insn(insn_addr, insn);
}
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index d5a16e424e428..705bd19093c1d 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -676,8 +676,16 @@ class MacroAssembler: public Assembler {
static bool needs_explicit_null_check(intptr_t offset);
static bool uses_implicit_null_check(void* address);
- static address target_addr_for_insn(address insn_addr);
- static address target_addr_for_insn_or_null(address insn_addr);
+ static address target_addr_for_insn(address insn_addr, unsigned insn);
+ static address target_addr_for_insn_or_null(address insn_addr, unsigned insn);
+ static address target_addr_for_insn(address insn_addr) {
+ unsigned insn = *(unsigned*)insn_addr;
+ return target_addr_for_insn(insn_addr, insn);
+ }
+ static address target_addr_for_insn_or_null(address insn_addr) {
+ unsigned insn = *(unsigned*)insn_addr;
+ return target_addr_for_insn_or_null(insn_addr, insn);
+ }
// Required platform-specific helpers for Label::patch_instructions.
// They _shadow_ the declarations in AbstractAssembler, which are undefined.
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index f4774f31bbd42..5195432f54ea2 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -168,7 +168,6 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
int byte_no)
{
- assert_different_registers(bc_reg, temp_reg);
if (!RewriteBytecodes) return;
Label L_patch_done;
@@ -232,12 +231,9 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
__ stop("patching the wrong bytecode");
__ bind(L_okay);
#endif
- // Patch bytecode with release store to coordinate with ResolvedFieldEntry loads
- // in fast bytecode codelets. load_field_entry has a memory barrier that gains
- // the needed ordering, together with control dependency on entering the fast codelet
- // itself.
- __ lea(temp_reg, at_bcp(0));
- __ stlrb(bc_reg, temp_reg);
+
+ // patch bytecode
+ __ strb(bc_reg, at_bcp(0));
__ bind(L_patch_done);
}
@@ -3098,7 +3094,6 @@ void TemplateTable::fast_storefield(TosState state)
// R1: field offset, R2: field holder, R5: flags
load_resolved_field_entry(r2, r2, noreg, r1, r5);
- __ verify_field_offset(r1);
{
Label notVolatile;
@@ -3188,8 +3183,6 @@ void TemplateTable::fast_accessfield(TosState state)
__ load_field_entry(r2, r1);
__ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
- __ verify_field_offset(r1);
-
__ load_unsigned_byte(r3, Address(r2, in_bytes(ResolvedFieldEntry::flags_offset())));
// r0: object
@@ -3256,9 +3249,7 @@ void TemplateTable::fast_xaccess(TosState state)
__ ldr(r0, aaddress(0));
// access constant pool cache
__ load_field_entry(r2, r3, 2);
-
__ load_sized_value(r1, Address(r2, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
- __ verify_field_offset(r1);
// 8179954: We need to make sure that the code generated for
// volatile accesses forms a sequentially-consistent set of
diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad
index 31a442be62456..68fece5263d34 100644
--- a/src/hotspot/cpu/arm/arm.ad
+++ b/src/hotspot/cpu/arm/arm.ad
@@ -1131,27 +1131,27 @@ bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
}
// Register for DIVI projection of divmodI
-const RegMask& Matcher::divI_proj_mask() {
+RegMask Matcher::divI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODI projection of divmodI
-const RegMask& Matcher::modI_proj_mask() {
+RegMask Matcher::modI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for DIVL projection of divmodL
-const RegMask& Matcher::divL_proj_mask() {
+RegMask Matcher::divL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODL projection of divmodL
-const RegMask& Matcher::modL_proj_mask() {
+RegMask Matcher::modL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
bool maybe_far_call(const CallNode *n) {
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp
index 9140dd7ca4edf..d3969427db300 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc.hpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc.hpp
@@ -133,13 +133,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size);
void load_resolved_indy_entry(Register cache, Register index);
- void load_field_or_method_entry(bool is_method, Register cache, Register index, int bcp_offset, bool for_fast_bytecode);
- void load_field_entry(Register cache, Register index, int bcp_offset = 1, bool for_fast_bytecode = false) {
- load_field_or_method_entry(false, cache, index, bcp_offset, for_fast_bytecode);
- }
- void load_method_entry(Register cache, Register index, int bcp_offset = 1, bool for_fast_bytecode = false) {
- load_field_or_method_entry(true, cache, index, bcp_offset, for_fast_bytecode);
- }
+ void load_field_entry(Register cache, Register index, int bcp_offset = 1);
+ void load_method_entry(Register cache, Register index, int bcp_offset = 1);
void get_u4(Register Rdst, Register Rsrc, int offset, signedOrNot is_signed);
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index 503cc25943253..8df2cc5d273ff 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -468,33 +468,33 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe
add(cache, cache, index);
}
-void InterpreterMacroAssembler::load_field_or_method_entry(bool is_method, Register cache, Register index, int bcp_offset, bool for_fast_bytecode) {
- const int entry_size = is_method ? sizeof(ResolvedMethodEntry) : sizeof(ResolvedFieldEntry),
- base_offset = is_method ? Array::base_offset_in_bytes() : Array::base_offset_in_bytes(),
- entries_offset = is_method ? in_bytes(ConstantPoolCache::method_entries_offset()) : in_bytes(ConstantPoolCache::field_entries_offset());
-
+void InterpreterMacroAssembler::load_field_entry(Register cache, Register index, int bcp_offset) {
// Get index out of bytecode pointer
get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
// Take shortcut if the size is a power of 2
- if (is_power_of_2(entry_size)) {
+ if (is_power_of_2(sizeof(ResolvedFieldEntry))) {
// Scale index by power of 2
- sldi(index, index, log2i_exact(entry_size));
+ sldi(index, index, log2i_exact(sizeof(ResolvedFieldEntry)));
} else {
// Scale the index to be the entry index * sizeof(ResolvedFieldEntry)
- mulli(index, index, entry_size);
+ mulli(index, index, sizeof(ResolvedFieldEntry));
}
// Get address of field entries array
- ld_ptr(cache, entries_offset, R27_constPoolCache);
- addi(cache, cache, base_offset);
+ ld_ptr(cache, in_bytes(ConstantPoolCache::field_entries_offset()), R27_constPoolCache);
+ addi(cache, cache, Array::base_offset_in_bytes());
add(cache, cache, index);
+}
- if (for_fast_bytecode) {
- // Prevent speculative loading from ResolvedFieldEntry/ResolvedMethodEntry as it can miss the info written by another thread.
- // TemplateTable::patch_bytecode uses release-store.
- // We reached here via control dependency (Bytecode dispatch has used the rewritten Bytecode).
- // So, we can use control-isync based ordering.
- isync();
- }
+void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
+ // Get index out of bytecode pointer
+ get_cache_index_at_bcp(index, bcp_offset, sizeof(u2));
+ // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
+ mulli(index, index, sizeof(ResolvedMethodEntry));
+
+ // Get address of field entries array
+ ld_ptr(cache, ConstantPoolCache::method_entries_offset(), R27_constPoolCache);
+ addi(cache, cache, Array::base_offset_in_bytes());
+ add(cache, cache, index); // method_entries + base_offset + scaled index
}
// Load object from cpool->resolved_references(index).
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 03dbd0e780ba2..2c83b2d576589 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -2450,27 +2450,27 @@ bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
}
// Register for DIVI projection of divmodI.
-const RegMask& Matcher::divI_proj_mask() {
+RegMask Matcher::divI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODI projection of divmodI.
-const RegMask& Matcher::modI_proj_mask() {
+RegMask Matcher::modI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for DIVL projection of divmodL.
-const RegMask& Matcher::divL_proj_mask() {
+RegMask Matcher::divL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODL projection of divmodL.
-const RegMask& Matcher::modL_proj_mask() {
+RegMask Matcher::modL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
%}
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index 09acd1c067da9..41fbe66647ecc 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -148,9 +148,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg
__ bind(L_fast_patch);
}
- // Patch bytecode with release store to coordinate with ResolvedFieldEntry
- // and ResolvedMethodEntry loads in fast bytecode codelets.
- __ release();
+ // Patch bytecode.
__ stb(Rnew_bc, 0, R14_bcp);
__ bind(L_patch_done);
@@ -314,7 +312,6 @@ void TemplateTable::fast_aldc(LdcType type) {
// We are resolved if the resolved reference cache entry contains a
// non-null object (CallSite, etc.)
__ get_cache_index_at_bcp(R31, 1, index_size); // Load index.
- // Only rewritten during link time. So, no need for memory barriers for accessing resolved info.
__ load_resolved_reference_at_index(R17_tos, R31, R11_scratch1, R12_scratch2, &is_null);
// Convert null sentinel to null
@@ -3117,7 +3114,7 @@ void TemplateTable::fast_storefield(TosState state) {
const ConditionRegister CR_is_vol = CR2; // Non-volatile condition register (survives runtime call in do_oop_store).
// Constant pool already resolved => Load flags and offset of field.
- __ load_field_entry(Rcache, Rscratch, 1, /* for_fast_bytecode */ true);
+ __ load_field_entry(Rcache, Rscratch);
jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
load_resolved_field_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12
@@ -3198,7 +3195,7 @@ void TemplateTable::fast_accessfield(TosState state) {
// R12_scratch2 used by load_field_cp_cache_entry
// Constant pool already resolved. Get the field offset.
- __ load_field_entry(Rcache, Rscratch, 1, /* for_fast_bytecode */ true);
+ __ load_field_entry(Rcache, Rscratch);
load_resolved_field_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12
// JVMTI support
@@ -3337,7 +3334,7 @@ void TemplateTable::fast_xaccess(TosState state) {
__ ld(Rclass_or_obj, 0, R18_locals);
// Constant pool already resolved. Get the field offset.
- __ load_field_entry(Rcache, Rscratch, 2, /* for_fast_bytecode */ true);
+ __ load_field_entry(Rcache, Rscratch, 2);
load_resolved_field_entry(noreg, Rcache, noreg, Roffset, Rflags, false); // Uses R11, R12
// JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
@@ -3498,7 +3495,7 @@ void TemplateTable::fast_invokevfinal(int byte_no) {
assert(byte_no == f2_byte, "use this argument");
Register Rcache = R31;
- __ load_method_entry(Rcache, R11_scratch1, 1, /* for_fast_bytecode */ true);
+ __ load_method_entry(Rcache, R11_scratch1);
invokevfinal_helper(Rcache, R11_scratch1, R12_scratch2, R22_tmp2, R23_tmp3);
}
diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.cpp b/src/hotspot/cpu/ppc/vm_version_ppc.cpp
index 8b1de7546506f..e2dfd4ecec97c 100644
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp
@@ -99,10 +99,6 @@ void VM_Version::initialize() {
FLAG_SET_ERGO(TrapBasedRangeChecks, false);
}
- if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
- FLAG_SET_ERGO(UsePopCountInstruction, true);
- }
-
if (PowerArchitecturePPC64 >= 9) {
// Performance is good since Power9.
if (FLAG_IS_DEFAULT(SuperwordUseVSX)) {
diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
index 549c9cda7b611..7c4b844440738 100644
--- a/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
+++ b/src/hotspot/cpu/riscv/interp_masm_riscv.cpp
@@ -1841,15 +1841,6 @@ void InterpreterMacroAssembler::load_method_entry(Register cache, Register index
}
#ifdef ASSERT
-void InterpreterMacroAssembler::verify_field_offset(Register reg) {
- // Verify the field offset is not in the header, implicitly checks for 0
- Label L;
- mv(t0, oopDesc::base_offset_in_bytes());
- bge(reg, t0, L);
- stop("bad field offset");
- bind(L);
-}
-
void InterpreterMacroAssembler::verify_access_flags(Register access_flags, uint32_t flag,
const char* msg, bool stop_by_hit) {
Label L;
diff --git a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp
index 295f1b221916c..0732191ea83a1 100644
--- a/src/hotspot/cpu/riscv/interp_masm_riscv.hpp
+++ b/src/hotspot/cpu/riscv/interp_masm_riscv.hpp
@@ -300,10 +300,11 @@ class InterpreterMacroAssembler: public MacroAssembler {
void load_field_entry(Register cache, Register index, int bcp_offset = 1);
void load_method_entry(Register cache, Register index, int bcp_offset = 1);
- void verify_field_offset(Register reg) NOT_DEBUG_RETURN;
+#ifdef ASSERT
void verify_access_flags(Register access_flags, uint32_t flag,
- const char* msg, bool stop_by_hit = true) NOT_DEBUG_RETURN;
- void verify_frame_setup() NOT_DEBUG_RETURN;
+ const char* msg, bool stop_by_hit = true);
+ void verify_frame_setup();
+#endif
};
#endif // CPU_RISCV_INTERP_MASM_RISCV_HPP
diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad
index 83c59af9113e9..009acd628a0d2 100644
--- a/src/hotspot/cpu/riscv/riscv.ad
+++ b/src/hotspot/cpu/riscv/riscv.ad
@@ -1092,40 +1092,40 @@ RegMask _NO_SPECIAL_NO_FP_PTR_REG_mask;
void reg_mask_init() {
- _ANY_REG32_mask.assignFrom(_ALL_REG32_mask);
- _ANY_REG32_mask.remove(OptoReg::as_OptoReg(x0->as_VMReg()));
+ _ANY_REG32_mask = _ALL_REG32_mask;
+ _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(x0->as_VMReg()));
- _ANY_REG_mask.assignFrom(_ALL_REG_mask);
- _ANY_REG_mask.subtract(_ZR_REG_mask);
+ _ANY_REG_mask = _ALL_REG_mask;
+ _ANY_REG_mask.SUBTRACT(_ZR_REG_mask);
- _PTR_REG_mask.assignFrom(_ALL_REG_mask);
- _PTR_REG_mask.subtract(_ZR_REG_mask);
+ _PTR_REG_mask = _ALL_REG_mask;
+ _PTR_REG_mask.SUBTRACT(_ZR_REG_mask);
- _NO_SPECIAL_REG32_mask.assignFrom(_ALL_REG32_mask);
- _NO_SPECIAL_REG32_mask.subtract(_NON_ALLOCATABLE_REG32_mask);
+ _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
+ _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
- _NO_SPECIAL_REG_mask.assignFrom(_ALL_REG_mask);
- _NO_SPECIAL_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
+ _NO_SPECIAL_REG_mask = _ALL_REG_mask;
+ _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
- _NO_SPECIAL_PTR_REG_mask.assignFrom(_ALL_REG_mask);
- _NO_SPECIAL_PTR_REG_mask.subtract(_NON_ALLOCATABLE_REG_mask);
+ _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
+ _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
// x27 is not allocatable when compressed oops is on
if (UseCompressedOops) {
- _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
- _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
- _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x27->as_VMReg()));
+ _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
+ _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
+ _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x27->as_VMReg()));
}
// x8 is not allocatable when PreserveFramePointer is on
if (PreserveFramePointer) {
- _NO_SPECIAL_REG32_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
- _NO_SPECIAL_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
- _NO_SPECIAL_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
+ _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
+ _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
+ _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
}
- _NO_SPECIAL_NO_FP_PTR_REG_mask.assignFrom(_NO_SPECIAL_PTR_REG_mask);
- _NO_SPECIAL_NO_FP_PTR_REG_mask.remove(OptoReg::as_OptoReg(x8->as_VMReg()));
+ _NO_SPECIAL_NO_FP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
+ _NO_SPECIAL_NO_FP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(x8->as_VMReg()));
}
void PhaseOutput::pd_perform_mach_node_analysis() {
@@ -1326,7 +1326,7 @@ uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
}
//=============================================================================
-const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
+const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
int ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
@@ -2104,10 +2104,10 @@ uint Matcher::int_pressure_limit()
// as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
// derived pointers and lastly fail to spill after reaching maximum
// number of iterations. Lowering the default pressure threshold to
- // (_NO_SPECIAL_REG32_mask.size() minus 1) forces CallNode to become
+ // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
// a high register pressure area of the code so that split_DEF can
// generate DefinitionSpillCopy for the derived pointer.
- uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.size() - 1;
+ uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
if (!PreserveFramePointer) {
// When PreserveFramePointer is off, frame pointer is allocatable,
// but different from other SOC registers, it is excluded from
@@ -2122,34 +2122,34 @@ uint Matcher::int_pressure_limit()
uint Matcher::float_pressure_limit()
{
// _FLOAT_REG_mask is generated by adlc from the float_reg register class.
- return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.size() : FLOATPRESSURE;
+ return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
-const RegMask& Matcher::divI_proj_mask() {
+RegMask Matcher::divI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODI projection of divmodI.
-const RegMask& Matcher::modI_proj_mask() {
+RegMask Matcher::modI_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for DIVL projection of divmodL.
-const RegMask& Matcher::divL_proj_mask() {
+RegMask Matcher::divL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
// Register for MODL projection of divmodL.
-const RegMask& Matcher::modL_proj_mask() {
+RegMask Matcher::modL_proj_mask() {
ShouldNotReachHere();
- return RegMask::EMPTY;
+ return RegMask();
}
bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
diff --git a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
index 692335d8c084a..61f4aa3e722a2 100644
--- a/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateInterpreterGenerator_riscv.cpp
@@ -1073,7 +1073,9 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
}
// start execution
+#ifdef ASSERT
__ verify_frame_setup();
+#endif
// jvmti support
__ notify_method_entry();
@@ -1539,7 +1541,9 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
}
// start execution
+#ifdef ASSERT
__ verify_frame_setup();
+#endif
// jvmti support
__ notify_method_entry();
diff --git a/src/hotspot/cpu/riscv/templateTable_riscv.cpp b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
index bd4a89d819967..2697b3e46dc14 100644
--- a/src/hotspot/cpu/riscv/templateTable_riscv.cpp
+++ b/src/hotspot/cpu/riscv/templateTable_riscv.cpp
@@ -133,7 +133,6 @@ Address TemplateTable::at_bcp(int offset) {
void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
Register temp_reg, bool load_bc_into_bc_reg /*=true*/,
int byte_no) {
- assert_different_registers(bc_reg, temp_reg);
if (!RewriteBytecodes) { return; }
Label L_patch_done;
@@ -197,11 +196,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
__ bind(L_okay);
#endif
- // Patch bytecode with release store to coordinate with ResolvedFieldEntry loads
- // in fast bytecode codelets. load_field_entry has a memory barrier that gains
- // the needed ordering, together with control dependency on entering the fast codelet
- // itself.
- __ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
+ // patch bytecode
__ sb(bc_reg, at_bcp(0));
__ bind(L_patch_done);
}
@@ -3033,7 +3028,6 @@ void TemplateTable::fast_storefield(TosState state) {
// X11: field offset, X12: field holder, X13: flags
load_resolved_field_entry(x12, x12, noreg, x11, x13);
- __ verify_field_offset(x11);
{
Label notVolatile;
@@ -3121,8 +3115,6 @@ void TemplateTable::fast_accessfield(TosState state) {
__ load_field_entry(x12, x11);
__ load_sized_value(x11, Address(x12, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
- __ verify_field_offset(x11);
-
__ load_unsigned_byte(x13, Address(x12, in_bytes(ResolvedFieldEntry::flags_offset())));
// x10: object
@@ -3178,9 +3170,7 @@ void TemplateTable::fast_xaccess(TosState state) {
__ ld(x10, aaddress(0));
// access constant pool cache
__ load_field_entry(x12, x13, 2);
-
__ load_sized_value(x11, Address(x12, in_bytes(ResolvedFieldEntry::field_offset_offset())), sizeof(int), true /*is_signed*/);
- __ verify_field_offset(x11);
// make sure exception is reported in correct bcp range (getfield is
// next instruction)
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
index 6f4babc872ff1..9d6146a838907 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
@@ -103,6 +103,17 @@ void VM_Version::common_initialize() {
useRVA23U64Profile();
}
+ // Enable vendor specific features
+
+ if (mvendorid.enabled()) {
+ // Rivos
+ if (mvendorid.value() == RIVOS) {
+ if (FLAG_IS_DEFAULT(UseConservativeFence)) {
+ FLAG_SET_DEFAULT(UseConservativeFence, false);
+ }
+ }
+ }
+
if (UseZic64b) {
if (CacheLineSize != 64) {
assert(!FLAG_IS_DEFAULT(CacheLineSize), "default cache line size should be 64 bytes");
@@ -188,7 +199,7 @@ void VM_Version::common_initialize() {
FLAG_SET_DEFAULT(UsePopCountInstruction, false);
}
- if (UseZicboz && zicboz_block_size.value() > 0) {
+ if (UseZicboz && zicboz_block_size.enabled() && zicboz_block_size.value() > 0) {
assert(is_power_of_2(zicboz_block_size.value()), "Sanity");
if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
FLAG_SET_DEFAULT(UseBlockZeroing, true);
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.hpp b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
index f74992cbc37a0..346ca35dc1ede 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.hpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp
@@ -52,19 +52,53 @@ class VM_Version : public Abstract_VM_Version {
const char* const _pretty;
const bool _feature_string;
const uint64_t _linux_feature_bit;
-
+ int64_t _value;
public:
RVFeatureValue(const char* pretty, int linux_bit_num, bool fstring) :
- _pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit(linux_bit_num)) {
+ _pretty(pretty), _feature_string(fstring), _linux_feature_bit(nth_bit(linux_bit_num)),
+ _value(-1) {
+ }
+ virtual void enable_feature(int64_t value = 0) {
+ _value = value;
+ }
+ virtual void disable_feature() {
+ _value = -1;
}
- virtual void enable_feature(int64_t value = 0) = 0;
- virtual void disable_feature() = 0;
const char* pretty() { return _pretty; }
uint64_t feature_bit() { return _linux_feature_bit; }
bool feature_string() { return _feature_string; }
+ int64_t value() { return _value; }
virtual bool enabled() = 0;
virtual void update_flag() = 0;
- virtual void log_enabled() = 0;
+
+ protected:
+ bool deps_all_enabled(RVFeatureValue* dep0, ...) {
+ assert(dep0 != nullptr, "must not");
+
+ va_list va;
+ va_start(va, dep0);
+ RVFeatureValue* next = dep0;
+ bool enabled = true;
+ while (next != nullptr && enabled) {
+ enabled = next->enabled();
+ next = va_arg(va, RVFeatureValue*);
+ }
+ va_end(va);
+ return enabled;
+ }
+
+ void deps_string(stringStream& ss, RVFeatureValue* dep0, ...) {
+ assert(dep0 != nullptr, "must not");
+ ss.print("%s (%s)", dep0->pretty(), dep0->enabled() ? "enabled" : "disabled");
+
+ va_list va;
+ va_start(va, dep0);
+ RVFeatureValue* next = nullptr;
+ while ((next = va_arg(va, RVFeatureValue*)) != nullptr) {
+ ss.print(", %s (%s)", next->pretty(), next->enabled() ? "enabled" : "disabled");
+ }
+ va_end(va);
+ }
};
#define UPDATE_DEFAULT(flag) \
@@ -83,9 +117,8 @@ class VM_Version : public Abstract_VM_Version {
#define UPDATE_DEFAULT_DEP(flag, dep0, ...) \
void update_flag() { \
assert(enabled(), "Must be."); \
- DEBUG_ONLY(verify_deps(dep0, ##__VA_ARGS__)); \
if (FLAG_IS_DEFAULT(flag)) { \
- if (deps_all_enabled(dep0, ##__VA_ARGS__)) { \
+ if (this->deps_all_enabled(dep0, ##__VA_ARGS__)) { \
FLAG_SET_DEFAULT(flag, true); \
} else { \
FLAG_SET_DEFAULT(flag, false); \
@@ -116,96 +149,40 @@ class VM_Version : public Abstract_VM_Version {
class RVExtFeatureValue : public RVFeatureValue {
const uint32_t _cpu_feature_index;
-
public:
RVExtFeatureValue(const char* pretty, int linux_bit_num, uint32_t cpu_feature_index, bool fstring) :
RVFeatureValue(pretty, linux_bit_num, fstring),
_cpu_feature_index(cpu_feature_index) {
}
- int cpu_feature_index() {
- // Can be used to check, for example, v is declared before Zvfh in RV_EXT_FEATURE_FLAGS.
- return _cpu_feature_index;
- }
bool enabled() {
return RVExtFeatures::current()->support_feature(_cpu_feature_index);
}
void enable_feature(int64_t value = 0) {
+ RVFeatureValue::enable_feature(value);
RVExtFeatures::current()->set_feature(_cpu_feature_index);
}
void disable_feature() {
+ RVFeatureValue::disable_feature();
RVExtFeatures::current()->clear_feature(_cpu_feature_index);
}
- void log_enabled();
-
- protected:
- bool deps_all_enabled(RVExtFeatureValue* dep0, ...) {
- assert(dep0 != nullptr, "must not");
-
- va_list va;
- va_start(va, dep0);
- RVExtFeatureValue* next = dep0;
- bool enabled = true;
- while (next != nullptr && enabled) {
- enabled = next->enabled();
- next = va_arg(va, RVExtFeatureValue*);
- }
- va_end(va);
- return enabled;
- }
-
- void deps_string(stringStream& ss, RVExtFeatureValue* dep0, ...) {
- assert(dep0 != nullptr, "must not");
- ss.print("%s (%s)", dep0->pretty(), dep0->enabled() ? "enabled" : "disabled");
-
- va_list va;
- va_start(va, dep0);
- RVExtFeatureValue* next = nullptr;
- while ((next = va_arg(va, RVExtFeatureValue*)) != nullptr) {
- ss.print(", %s (%s)", next->pretty(), next->enabled() ? "enabled" : "disabled");
- }
- va_end(va);
- }
-
-#ifdef ASSERT
- void verify_deps(RVExtFeatureValue* dep0, ...) {
- assert(dep0 != nullptr, "must not");
- assert(cpu_feature_index() >= 0, "must");
-
- va_list va;
- va_start(va, dep0);
- RVExtFeatureValue* next = dep0;
- while (next != nullptr) {
- assert(next->cpu_feature_index() >= 0, "must");
- // We only need to check depenency relationship for extension flags.
- // The dependant ones must be declared before this, for example, v must be declared
- // before Zvfh in RV_EXT_FEATURE_FLAGS. The reason is in setup_cpu_available_features
- // we need to make sure v is `update_flag`ed before Zvfh, so Zvfh is `update_flag`ed
- // based on v.
- assert(cpu_feature_index() > next->cpu_feature_index(), "Invalid");
- next = va_arg(va, RVExtFeatureValue*);
- }
- va_end(va);
- }
-#endif // ASSERT
};
class RVNonExtFeatureValue : public RVFeatureValue {
- static const int64_t DEFAULT_VALUE = -1;
- int64_t _value;
-
+ bool _enabled;
public:
RVNonExtFeatureValue(const char* pretty, int linux_bit_num, bool fstring) :
RVFeatureValue(pretty, linux_bit_num, fstring),
- _value(DEFAULT_VALUE) {
+ _enabled(false) {
}
- bool enabled() { return _value != DEFAULT_VALUE; }
- void enable_feature(int64_t value) {
- assert(value != DEFAULT_VALUE, "Sanity");
- _value = value;
+ bool enabled() { return _enabled; }
+ void enable_feature(int64_t value = 0) {
+ RVFeatureValue::enable_feature(value);
+ _enabled = true;
+ }
+ void disable_feature() {
+ RVFeatureValue::disable_feature();
+ _enabled = false;
}
- void disable_feature() { _value = DEFAULT_VALUE; }
- int64_t value() { return _value; }
- void log_enabled();
};
public:
@@ -305,14 +282,14 @@ class VM_Version : public Abstract_VM_Version {
decl(marchid , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
/* A unique encoding of the version of the processor implementation. */ \
decl(mimpid , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
- /* Manufactory JEDEC id encoded, ISA vol 2 3.1.2.. */ \
- decl(mvendorid , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
/* SATP bits (number of virtual addr bits) mbare, sv39, sv48, sv57, sv64 */ \
decl(satp_mode , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
/* Performance of misaligned scalar accesses (unknown, emulated, slow, fast, unsupported) */ \
decl(unaligned_scalar , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
/* Performance of misaligned vector accesses (unknown, unspported, slow, fast) */ \
decl(unaligned_vector , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
+ /* Manufactory JEDEC id encoded, ISA vol 2 3.1.2.. */ \
+ decl(mvendorid , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
decl(zicboz_block_size , RV_NO_FLAG_BIT, false, NO_UPDATE_DEFAULT) \
#define DECLARE_RV_NON_EXT_FEATURE(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
diff --git a/src/hotspot/cpu/s390/gc/g1/g1_s390.ad b/src/hotspot/cpu/s390/gc/g1/g1_s390.ad
index 000ac3bc5ba5b..7aed374fdaebd 100644
--- a/src/hotspot/cpu/s390/gc/g1/g1_s390.ad
+++ b/src/hotspot/cpu/s390/gc/g1/g1_s390.ad
@@ -356,7 +356,7 @@ instruct g1CompareAndExchangeP(iRegP mem_ptr, rarg5RegP oldval, iRegP_N2P newval
__ z_lgr($res$$Register, $oldval$$Register); // previous content
- __ z_csg($res$$Register, $newval$$Register, 0, $mem_ptr$$reg);
+ __ z_csg($oldval$$Register, $newval$$Register, 0, $mem_ptr$$reg);
write_barrier_post(masm, this,
$mem_ptr$$Register /* store_addr */,
diff --git a/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp b/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp
index 307034ca0cd5d..ae8b8766159c6 100644
--- a/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp
+++ b/src/hotspot/cpu/s390/javaFrameAnchor_s390.hpp
@@ -35,32 +35,38 @@
// 3 - restoring an old state (javaCalls).
inline void clear(void) {
- // No hardware barriers are necessary. All members are volatile and the profiler
- // is run from a signal handler and only observers the thread its running on.
-
// Clearing _last_Java_sp must be first.
-
+ OrderAccess::release();
_last_Java_sp = nullptr;
+ // Fence?
+ OrderAccess::fence();
_last_Java_pc = nullptr;
}
inline void set(intptr_t* sp, address pc) {
_last_Java_pc = pc;
+
+ OrderAccess::release();
_last_Java_sp = sp;
}
void copy(JavaFrameAnchor* src) {
- // No hardware barriers are necessary. All members are volatile and the profiler
- // is run from a signal handler and only observers the thread its running on.
-
+ // In order to make sure the transition state is valid for "this"
// we must clear _last_Java_sp before copying the rest of the new data.
+ // Hack Alert: Temporary bugfix for 4717480/4721647
+ // To act like previous version (pd_cache_state) don't null _last_Java_sp
+ // unless the value is changing.
+ //
if (_last_Java_sp != src->_last_Java_sp) {
+ OrderAccess::release();
_last_Java_sp = nullptr;
+ OrderAccess::fence();
}
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true.
+ OrderAccess::release();
_last_Java_sp = src->_last_Java_sp;
}
@@ -74,7 +80,7 @@
intptr_t* last_Java_fp(void) { return nullptr; }
intptr_t* last_Java_sp() const { return _last_Java_sp; }
- void set_last_Java_sp(intptr_t* sp) { _last_Java_sp = sp; }
+ void set_last_Java_sp(intptr_t* sp) { OrderAccess::release(); _last_Java_sp = sp; }
address last_Java_pc(void) { return _last_Java_pc; }
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index ab991896b53d8..cfc8b19534b59 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -1961,22 +1961,22 @@ bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
}
// Register for DIVI projection of divmodI
-const RegMask& Matcher::divI_proj_mask() {
+RegMask Matcher::divI_proj_mask() {
return _Z_RARG4_INT_REG_mask;
}
// Register for MODI projection of divmodI
-const RegMask& Matcher::modI_proj_mask() {
+RegMask Matcher::modI_proj_mask() {
return _Z_RARG3_INT_REG_mask;
}
// Register for DIVL projection of divmodL
-const RegMask& Matcher::divL_proj_mask() {
+RegMask Matcher::divL_proj_mask() {
return _Z_RARG4_LONG_REG_mask;
}
// Register for MODL projection of divmodL
-const RegMask& Matcher::modL_proj_mask() {
+RegMask Matcher::modL_proj_mask() {
return _Z_RARG3_LONG_REG_mask;
}
diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
index 09c5d93dbb3a2..925444792caac 100644
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp
@@ -471,33 +471,33 @@ void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
// Create mask of caller saved registers that need to
// be saved/restored if live
RegMask caller_saved;
- caller_saved.insert(OptoReg::as_OptoReg(rax->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r8->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r9->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r10->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r11->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rax->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rcx->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rdx->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rsi->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(rdi->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r8->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r9->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r10->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r11->as_VMReg()));
if (UseAPX) {
- caller_saved.insert(OptoReg::as_OptoReg(r16->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r17->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r18->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r19->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r20->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r21->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r22->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r23->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r24->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r25->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r26->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r27->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r28->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r29->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r30->as_VMReg()));
- caller_saved.insert(OptoReg::as_OptoReg(r31->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r16->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r17->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r18->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r19->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r20->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r21->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r22->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r23->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r24->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r25->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r26->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r27->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r28->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r29->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r30->as_VMReg()));
+ caller_saved.Insert(OptoReg::as_OptoReg(r31->as_VMReg()));
}
int gp_spill_size = 0;
@@ -511,7 +511,7 @@ void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
if (vm_reg->is_Register()) {
- if (caller_saved.member(opto_reg)) {
+ if (caller_saved.Member(opto_reg)) {
_gp_registers.append(vm_reg->as_Register());
gp_spill_size += 8;
}
diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index 62306b562d6d4..b40f9e2924a97 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -497,96 +497,96 @@ void reg_mask_init() {
// _ALL_REG_mask is generated by adlc from the all_reg register class below.
// We derive a number of subsets from it.
- _ANY_REG_mask.assignFrom(_ALL_REG_mask);
+ _ANY_REG_mask = _ALL_REG_mask;
if (PreserveFramePointer) {
- _ANY_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
- _ANY_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
+ _ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+ _ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
}
if (need_r12_heapbase()) {
- _ANY_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg()));
- _ANY_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg()->next()));
+ _ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()));
+ _ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()->next()));
}
- _PTR_REG_mask.assignFrom(_ANY_REG_mask);
- _PTR_REG_mask.remove(OptoReg::as_OptoReg(rsp->as_VMReg()));
- _PTR_REG_mask.remove(OptoReg::as_OptoReg(rsp->as_VMReg()->next()));
- _PTR_REG_mask.remove(OptoReg::as_OptoReg(r15->as_VMReg()));
- _PTR_REG_mask.remove(OptoReg::as_OptoReg(r15->as_VMReg()->next()));
+ _PTR_REG_mask = _ANY_REG_mask;
+ _PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg()));
+ _PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg()->next()));
+ _PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg()));
+ _PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg()->next()));
if (!UseAPX) {
for (uint i = 0; i < sizeof(egprs)/sizeof(Register); i++) {
- _PTR_REG_mask.remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
- _PTR_REG_mask.remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()->next()));
+ _PTR_REG_mask.Remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
+ _PTR_REG_mask.Remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()->next()));
}
}
- _STACK_OR_PTR_REG_mask.assignFrom(_PTR_REG_mask);
- _STACK_OR_PTR_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask());
+ _STACK_OR_PTR_REG_mask = _PTR_REG_mask;
+ _STACK_OR_PTR_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
- _PTR_REG_NO_RBP_mask.assignFrom(_PTR_REG_mask);
- _PTR_REG_NO_RBP_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
- _PTR_REG_NO_RBP_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
+ _PTR_REG_NO_RBP_mask = _PTR_REG_mask;
+ _PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+ _PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
- _PTR_NO_RAX_REG_mask.assignFrom(_PTR_REG_mask);
- _PTR_NO_RAX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()));
- _PTR_NO_RAX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
+ _PTR_NO_RAX_REG_mask = _PTR_REG_mask;
+ _PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
+ _PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
- _PTR_NO_RAX_RBX_REG_mask.assignFrom(_PTR_NO_RAX_REG_mask);
- _PTR_NO_RAX_RBX_REG_mask.remove(OptoReg::as_OptoReg(rbx->as_VMReg()));
- _PTR_NO_RAX_RBX_REG_mask.remove(OptoReg::as_OptoReg(rbx->as_VMReg()->next()));
+ _PTR_NO_RAX_RBX_REG_mask = _PTR_NO_RAX_REG_mask;
+ _PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg()));
+ _PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg()->next()));
- _LONG_REG_mask.assignFrom(_PTR_REG_mask);
- _STACK_OR_LONG_REG_mask.assignFrom(_LONG_REG_mask);
- _STACK_OR_LONG_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask());
+ _LONG_REG_mask = _PTR_REG_mask;
+ _STACK_OR_LONG_REG_mask = _LONG_REG_mask;
+ _STACK_OR_LONG_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
- _LONG_NO_RAX_RDX_REG_mask.assignFrom(_LONG_REG_mask);
- _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()));
- _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
- _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
- _LONG_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg()->next()));
+ _LONG_NO_RAX_RDX_REG_mask = _LONG_REG_mask;
+ _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
+ _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
+ _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
+ _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()->next()));
- _LONG_NO_RCX_REG_mask.assignFrom(_LONG_REG_mask);
- _LONG_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
- _LONG_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg()->next()));
+ _LONG_NO_RCX_REG_mask = _LONG_REG_mask;
+ _LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
+ _LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()->next()));
- _LONG_NO_RBP_R13_REG_mask.assignFrom(_LONG_REG_mask);
- _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
- _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
- _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg()));
- _LONG_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg()->next()));
+ _LONG_NO_RBP_R13_REG_mask = _LONG_REG_mask;
+ _LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+ _LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
+ _LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(r13->as_VMReg()));
+ _LONG_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(r13->as_VMReg()->next()));
- _INT_REG_mask.assignFrom(_ALL_INT_REG_mask);
+ _INT_REG_mask = _ALL_INT_REG_mask;
if (!UseAPX) {
for (uint i = 0; i < sizeof(egprs)/sizeof(Register); i++) {
- _INT_REG_mask.remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
+ _INT_REG_mask.Remove(OptoReg::as_OptoReg(egprs[i]->as_VMReg()));
}
}
if (PreserveFramePointer) {
- _INT_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+ _INT_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
}
if (need_r12_heapbase()) {
- _INT_REG_mask.remove(OptoReg::as_OptoReg(r12->as_VMReg()));
+ _INT_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()));
}
- _STACK_OR_INT_REG_mask.assignFrom(_INT_REG_mask);
- _STACK_OR_INT_REG_mask.or_with(STACK_OR_STACK_SLOTS_mask());
+ _STACK_OR_INT_REG_mask = _INT_REG_mask;
+ _STACK_OR_INT_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
- _INT_NO_RAX_RDX_REG_mask.assignFrom(_INT_REG_mask);
- _INT_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rax->as_VMReg()));
- _INT_NO_RAX_RDX_REG_mask.remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
+ _INT_NO_RAX_RDX_REG_mask = _INT_REG_mask;
+ _INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
+ _INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
- _INT_NO_RCX_REG_mask.assignFrom(_INT_REG_mask);
- _INT_NO_RCX_REG_mask.remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
+ _INT_NO_RCX_REG_mask = _INT_REG_mask;
+ _INT_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
- _INT_NO_RBP_R13_REG_mask.assignFrom(_INT_REG_mask);
- _INT_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
- _INT_NO_RBP_R13_REG_mask.remove(OptoReg::as_OptoReg(r13->as_VMReg()));
+ _INT_NO_RBP_R13_REG_mask = _INT_REG_mask;
+ _INT_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+ _INT_NO_RBP_R13_REG_mask.Remove(OptoReg::as_OptoReg(r13->as_VMReg()));
// _FLOAT_REG_LEGACY_mask/_FLOAT_REG_EVEX_mask is generated by adlc
// from the float_reg_legacy/float_reg_evex register class.
- _FLOAT_REG_mask.assignFrom(VM_Version::supports_evex() ? _FLOAT_REG_EVEX_mask : _FLOAT_REG_LEGACY_mask);
+ _FLOAT_REG_mask = VM_Version::supports_evex() ? _FLOAT_REG_EVEX_mask : _FLOAT_REG_LEGACY_mask;
}
static bool generate_vzeroupper(Compile* C) {
@@ -756,7 +756,7 @@ static void emit_fp_min_max(MacroAssembler* masm, XMMRegister dst,
}
//=============================================================================
-const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::EMPTY;
+const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
int ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
@@ -1658,7 +1658,7 @@ bool Matcher::is_spillable_arg(int reg)
uint Matcher::int_pressure_limit()
{
- return (INTPRESSURE == -1) ? _INT_REG_mask.size() : INTPRESSURE;
+ return (INTPRESSURE == -1) ? _INT_REG_mask.Size() : INTPRESSURE;
}
uint Matcher::float_pressure_limit()
@@ -1666,7 +1666,7 @@ uint Matcher::float_pressure_limit()
// After experiment around with different values, the following default threshold
// works best for LCM's register pressure scheduling on x64.
uint dec_count = VM_Version::supports_evex() ? 4 : 2;
- uint default_float_pressure_threshold = _FLOAT_REG_mask.size() - dec_count;
+ uint default_float_pressure_threshold = _FLOAT_REG_mask.Size() - dec_count;
return (FLOATPRESSURE == -1) ? default_float_pressure_threshold : FLOATPRESSURE;
}
@@ -1678,22 +1678,22 @@ bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
}
// Register for DIVI projection of divmodI
-const RegMask& Matcher::divI_proj_mask() {
+RegMask Matcher::divI_proj_mask() {
return INT_RAX_REG_mask();
}
// Register for MODI projection of divmodI
-const RegMask& Matcher::modI_proj_mask() {
+RegMask Matcher::modI_proj_mask() {
return INT_RDX_REG_mask();
}
// Register for DIVL projection of divmodL
-const RegMask& Matcher::divL_proj_mask() {
+RegMask Matcher::divL_proj_mask() {
return LONG_RAX_REG_mask();
}
// Register for MODL projection of divmodL
-const RegMask& Matcher::modL_proj_mask() {
+RegMask Matcher::modL_proj_mask() {
return LONG_RDX_REG_mask();
}
diff --git a/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp
index 3acaa9ab8f94f..d0c06e2ebf1bc 100644
--- a/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp
+++ b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp
@@ -46,7 +46,3 @@ uint32_t ZNUMA::memory_id(uintptr_t addr) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}
-
-int ZNUMA::numa_id_to_node(uint32_t numa_id) {
- ShouldNotCallThis();
-}
diff --git a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
index ab7498b313c5b..74e696559401f 100644
--- a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp
@@ -32,35 +32,12 @@
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
-static uint* z_numa_id_to_node = nullptr;
-static uint32_t* z_node_to_numa_id = nullptr;
-
void ZNUMA::pd_initialize() {
_enabled = UseNUMA;
- size_t configured_nodes = 0;
-
- if (UseNUMA) {
- const size_t max_nodes = os::Linux::numa_num_configured_nodes();
- z_numa_id_to_node = NEW_C_HEAP_ARRAY(uint, max_nodes, mtGC);
- configured_nodes = os::numa_get_leaf_groups(z_numa_id_to_node, 0);
-
- z_node_to_numa_id = NEW_C_HEAP_ARRAY(uint32_t, max_nodes, mtGC);
-
- // Fill the array with invalid NUMA ids
- for (uint32_t i = 0; i < max_nodes; i++) {
- z_node_to_numa_id[i] = (uint32_t)-1;
- }
-
- // Fill the reverse mappings
- for (uint32_t i = 0; i < configured_nodes; i++) {
- z_node_to_numa_id[z_numa_id_to_node[i]] = i;
- }
- }
-
// UseNUMA and is_faked() are mutually excluded in zArguments.cpp.
_count = UseNUMA
- ? configured_nodes
+ ? os::Linux::numa_max_node() + 1
: !FLAG_IS_DEFAULT(ZFakeNUMA)
? ZFakeNUMA
: 1; // No NUMA nodes
@@ -77,7 +54,7 @@ uint32_t ZNUMA::id() {
return 0;
}
- return z_node_to_numa_id[os::Linux::get_node_by_cpu(ZCPU::id())];
+ return os::Linux::get_node_by_cpu(ZCPU::id());
}
uint32_t ZNUMA::memory_id(uintptr_t addr) {
@@ -86,21 +63,14 @@ uint32_t ZNUMA::memory_id(uintptr_t addr) {
return 0;
}
- int node = -1;
+ uint32_t id = (uint32_t)-1;
- if (ZSyscall::get_mempolicy(&node, nullptr, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
+ if (ZSyscall::get_mempolicy((int*)&id, nullptr, 0, (void*)addr, MPOL_F_NODE | MPOL_F_ADDR) == -1) {
ZErrno err;
fatal("Failed to get NUMA id for memory at " PTR_FORMAT " (%s)", addr, err.to_string());
}
- DEBUG_ONLY(const int max_nodes = os::Linux::numa_num_configured_nodes();)
- assert(node < max_nodes, "NUMA node is out of bounds node=%d, max=%d", node, max_nodes);
-
- return z_node_to_numa_id[node];
-}
-
-int ZNUMA::numa_id_to_node(uint32_t numa_id) {
- assert(numa_id < _count, "NUMA id out of range 0 <= %ud <= %ud", numa_id, _count);
+ assert(id < _count, "Invalid NUMA id");
- return (int)z_numa_id_to_node[numa_id];
+ return id;
}
diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
index 25ffd0b8078e4..84dfcbd6614eb 100644
--- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
+++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp
@@ -629,7 +629,7 @@ bool ZPhysicalMemoryBacking::commit_inner(zbacking_offset offset, size_t length)
size_t ZPhysicalMemoryBacking::commit_numa_preferred(zbacking_offset offset, size_t length, uint32_t numa_id) const {
// Setup NUMA policy to allocate memory from a preferred node
- os::Linux::numa_set_preferred(ZNUMA::numa_id_to_node(numa_id));
+ os::Linux::numa_set_preferred((int)numa_id);
const size_t committed = commit_default(offset, length);
diff --git a/src/hotspot/os/posix/perfMemory_posix.cpp b/src/hotspot/os/posix/perfMemory_posix.cpp
index 2cc0263d2913b..ed83487265cb0 100644
--- a/src/hotspot/os/posix/perfMemory_posix.cpp
+++ b/src/hotspot/os/posix/perfMemory_posix.cpp
@@ -26,7 +26,6 @@
#include "classfile/vmSymbols.hpp"
#include "jvm_io.h"
#include "logging/log.hpp"
-#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "nmt/memTracker.hpp"
@@ -72,7 +71,9 @@ static char* create_standard_memory(size_t size) {
// commit memory
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
- log_debug(perf)("could not commit PerfData memory");
+ if (PrintMiscellaneous && Verbose) {
+ warning("Could not commit PerfData memory\n");
+ }
os::release_memory(mapAddress, size);
return nullptr;
}
@@ -296,12 +297,11 @@ static DIR *open_directory_secure(const char* dirname) {
RESTARTABLE(::open(dirname, O_RDONLY|O_NOFOLLOW), result);
if (result == OS_ERR) {
// Directory doesn't exist or is a symlink, so there is nothing to cleanup.
- if (log_is_enabled(Debug, perf)) {
- LogStreamHandle(Debug, perf) log;
+ if (PrintMiscellaneous && Verbose) {
if (errno == ELOOP) {
- log.print_cr("directory %s is a symlink and is not secure", dirname);
+ warning("directory %s is a symlink and is not secure\n", dirname);
} else {
- log.print_cr("could not open directory %s: %s", dirname, os::strerror(errno));
+ warning("could not open directory %s: %s\n", dirname, os::strerror(errno));
}
}
return dirp;
@@ -371,7 +371,9 @@ static DIR *open_directory_secure_cwd(const char* dirname, int *saved_cwd_fd) {
// handle errors, otherwise shared memory files will be created in cwd.
result = fchdir(fd);
if (result == OS_ERR) {
- log_debug(perf)("could not change to directory %s", dirname);
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not change to directory %s", dirname);
+ }
if (*saved_cwd_fd != -1) {
::close(*saved_cwd_fd);
*saved_cwd_fd = -1;
@@ -409,12 +411,16 @@ static bool is_file_secure(int fd, const char *filename) {
// Determine if the file is secure.
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
- log_debug(perf)("fstat failed on %s: %s", filename, os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("fstat failed on %s: %s\n", filename, os::strerror(errno));
+ }
return false;
}
if (statbuf.st_nlink > 1) {
// A file with multiple links is not expected.
- log_debug(perf)("file %s has multiple links", filename);
+ if (PrintMiscellaneous && Verbose) {
+ warning("file %s has multiple links\n", filename);
+ }
return false;
}
return true;
@@ -441,10 +447,10 @@ static char* get_user_name(uid_t uid) {
int result = getpwuid_r(uid, &pwent, pwbuf, (size_t)bufsize, &p);
if (result != 0 || p == nullptr || p->pw_name == nullptr || *(p->pw_name) == '\0') {
- if (log_is_enabled(Debug, perf)) {
- LogStreamHandle(Debug, perf) log;
+ if (PrintMiscellaneous && Verbose) {
if (result != 0) {
- log.print_cr("Could not retrieve passwd entry: %s", os::strerror(result));
+ warning("Could not retrieve passwd entry: %s\n",
+ os::strerror(result));
}
else if (p == nullptr) {
// this check is added to protect against an observed problem
@@ -457,11 +463,13 @@ static char* get_user_name(uid_t uid) {
// message may result in an erroneous message.
// Bug Id 89052 was opened with RedHat.
//
- log.print_cr("Could not retrieve passwd entry: %s", os::strerror(errno));
+ warning("Could not retrieve passwd entry: %s\n",
+ os::strerror(errno));
}
else {
- log.print_cr("Could not determine user name: %s",
- p->pw_name == nullptr ? "pw_name = null" : "pw_name zero length");
+ warning("Could not determine user name: %s\n",
+ p->pw_name == nullptr ? "pw_name = null" :
+ "pw_name zero length");
}
}
FREE_C_HEAP_ARRAY(char, pwbuf);
@@ -672,10 +680,10 @@ static void remove_file(const char* path) {
// maliciously planted, the directory's presence won't hurt anything.
//
RESTARTABLE(::unlink(path), result);
- if (log_is_enabled(Debug, perf) && result == OS_ERR) {
+ if (PrintMiscellaneous && Verbose && result == OS_ERR) {
if (errno != ENOENT) {
- log_debug(perf)("could not unlink shared memory backing store file %s : %s",
- path, os::strerror(errno));
+ warning("Could not unlink shared memory backing"
+ " store file %s : %s\n", path, os::strerror(errno));
}
}
}
@@ -811,16 +819,23 @@ static bool make_user_tmp_dir(const char* dirname) {
// The directory already exists and was probably created by another
// JVM instance. However, this could also be the result of a
// deliberate symlink. Verify that the existing directory is safe.
+ //
if (!is_directory_secure(dirname)) {
// directory is not secure
- log_debug(perf)("%s directory is insecure", dirname);
+ if (PrintMiscellaneous && Verbose) {
+ warning("%s directory is insecure\n", dirname);
+ }
return false;
}
}
else {
// we encountered some other failure while attempting
// to create the directory
- log_debug(perf)("could not create directory %s: %s", dirname, os::strerror(errno));
+ //
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not create directory %s: %s\n",
+ dirname, os::strerror(errno));
+ }
return false;
}
}
@@ -857,12 +872,11 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
int fd;
RESTARTABLE(os::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IRUSR|S_IWUSR), fd);
if (fd == OS_ERR) {
- if (log_is_enabled(Debug, perf)) {
- LogStreamHandle(Debug, perf) log;
+ if (PrintMiscellaneous && Verbose) {
if (errno == ELOOP) {
- log.print_cr("file %s is a symlink and is not secure", filename);
+ warning("file %s is a symlink and is not secure\n", filename);
} else {
- log.print_cr("could not create file %s: %s", filename, os::strerror(errno));
+ warning("could not create file %s: %s\n", filename, os::strerror(errno));
}
}
// close the directory and reset the current working directory
@@ -910,14 +924,18 @@ static int create_sharedmem_file(const char* dirname, const char* filename, size
// truncate the file to get rid of any existing data
RESTARTABLE(::ftruncate(fd, (off_t)0), result);
if (result == OS_ERR) {
- log_debug(perf)("could not truncate shared memory file: %s", os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not truncate shared memory file: %s\n", os::strerror(errno));
+ }
::close(fd);
return -1;
}
// set the file size
RESTARTABLE(::ftruncate(fd, (off_t)size), result);
if (result == OS_ERR) {
- log_debug(perf)("could not set shared memory file size: %s", os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not set shared memory file size: %s\n", os::strerror(errno));
+ }
::close(fd);
return -1;
}
@@ -1039,7 +1057,9 @@ static char* mmap_create_shared(size_t size) {
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
- log_debug(perf)("mmap failed - %s", os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("mmap failed - %s\n", os::strerror(errno));
+ }
remove_file(filename);
FREE_C_HEAP_ARRAY(char, filename);
return nullptr;
@@ -1115,7 +1135,9 @@ static size_t sharedmem_filesize(int fd, TRAPS) {
RESTARTABLE(::fstat(fd, &statbuf), result);
if (result == OS_ERR) {
- log_debug(perf)("fstat failed: %s", os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("fstat failed: %s\n", os::strerror(errno));
+ }
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
}
@@ -1190,7 +1212,9 @@ static void mmap_attach_shared(int vmid, char** addr, size_t* sizep, TRAPS) {
assert(result != OS_ERR, "could not close file");
if (mapAddress == MAP_FAILED) {
- log_debug(perf)("mmap failed: %s", os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("mmap failed: %s\n", os::strerror(errno));
+ }
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");
}
@@ -1220,9 +1244,13 @@ void PerfMemory::create_memory_region(size_t size) {
else {
_start = create_shared_memory(size);
if (_start == nullptr) {
+
// creation of the shared memory region failed, attempt
// to create a contiguous, non-shared memory region instead.
- log_debug(perf)("Reverting to non-shared PerfMemory region.");
+ //
+ if (PrintMiscellaneous && Verbose) {
+ warning("Reverting to non-shared PerfMemory region.\n");
+ }
FLAG_SET_ERGO(PerfDisableSharedMem, true);
_start = create_standard_memory(size);
}
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index 5833e324070b0..714eac12d2245 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -42,7 +42,6 @@
#include "signals_posix.hpp"
#include "suspendResume_posix.hpp"
#include "utilities/checkedCast.hpp"
-#include "utilities/deferredStatic.hpp"
#include "utilities/events.hpp"
#include "utilities/ostream.hpp"
#include "utilities/parseInteger.hpp"
@@ -168,9 +167,9 @@ static get_signal_t get_signal_action = nullptr;
// suspend/resume support
#if defined(__APPLE__)
-static DeferredStatic sr_semaphore;
+ static OSXSemaphore sr_semaphore;
#else
-static DeferredStatic sr_semaphore;
+ static PosixSemaphore sr_semaphore;
#endif
// Signal number used to suspend/resume a thread
@@ -178,7 +177,7 @@ static DeferredStatic sr_semaphore;
int PosixSignals::SR_signum = SIGUSR2;
// sun.misc.Signal support
-static DeferredStatic sig_semaphore;
+static Semaphore* sig_semaphore = nullptr;
// a counter for each possible signal value
static volatile jint pending_signals[NSIG+1] = { 0 };
@@ -352,16 +351,17 @@ static void jdk_misc_signal_init() {
::memset((void*)pending_signals, 0, sizeof(pending_signals));
// Initialize signal semaphore
- int sem_count = 0;
- sig_semaphore.initialize(sem_count);
+ sig_semaphore = new Semaphore();
}
void os::signal_notify(int sig) {
- // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
- // initialization isn't called.
- if (!ReduceSignalUsage) {
+ if (sig_semaphore != nullptr) {
AtomicAccess::inc(&pending_signals[sig]);
sig_semaphore->signal();
+ } else {
+ // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
+ // initialization isn't called.
+ assert(ReduceSignalUsage, "signal semaphore should be created");
}
}
@@ -1696,7 +1696,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
pthread_sigmask(SIG_BLOCK, nullptr, &suspend_set);
sigdelset(&suspend_set, PosixSignals::SR_signum);
- sr_semaphore->signal();
+ sr_semaphore.signal();
// wait here until we are resumed
while (1) {
@@ -1705,7 +1705,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
SuspendResume::State result = osthread->sr.running();
if (result == SuspendResume::SR_RUNNING) {
// double check AIX doesn't need this!
- sr_semaphore->signal();
+ sr_semaphore.signal();
break;
} else if (result != SuspendResume::SR_SUSPENDED) {
ShouldNotReachHere();
@@ -1731,9 +1731,6 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
}
static int SR_initialize() {
- int sem_count = 0;
- sr_semaphore.initialize(sem_count);
-
struct sigaction act;
char *s;
// Get signal number to use for suspend/resume
@@ -1781,7 +1778,7 @@ static int sr_notify(OSThread* osthread) {
// but this seems the normal response to library errors
bool PosixSignals::do_suspend(OSThread* osthread) {
assert(osthread->sr.is_running(), "thread should be running");
- assert(!sr_semaphore->trywait(), "semaphore has invalid state");
+ assert(!sr_semaphore.trywait(), "semaphore has invalid state");
// mark as suspended and send signal
if (osthread->sr.request_suspend() != SuspendResume::SR_SUSPEND_REQUEST) {
@@ -1796,7 +1793,7 @@ bool PosixSignals::do_suspend(OSThread* osthread) {
// managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
while (true) {
- if (sr_semaphore->timedwait(2)) {
+ if (sr_semaphore.timedwait(2)) {
break;
} else {
// timeout
@@ -1805,7 +1802,7 @@ bool PosixSignals::do_suspend(OSThread* osthread) {
return false;
} else if (cancelled == SuspendResume::SR_SUSPENDED) {
// make sure that we consume the signal on the semaphore as well
- sr_semaphore->wait();
+ sr_semaphore.wait();
break;
} else {
ShouldNotReachHere();
@@ -1820,7 +1817,7 @@ bool PosixSignals::do_suspend(OSThread* osthread) {
void PosixSignals::do_resume(OSThread* osthread) {
assert(osthread->sr.is_suspended(), "thread should be suspended");
- assert(!sr_semaphore->trywait(), "invalid semaphore state");
+ assert(!sr_semaphore.trywait(), "invalid semaphore state");
if (osthread->sr.request_wakeup() != SuspendResume::SR_WAKEUP_REQUEST) {
// failed to switch to WAKEUP_REQUEST
@@ -1830,7 +1827,7 @@ void PosixSignals::do_resume(OSThread* osthread) {
while (true) {
if (sr_notify(osthread) == 0) {
- if (sr_semaphore->timedwait(2)) {
+ if (sr_semaphore.timedwait(2)) {
if (osthread->sr.is_running()) {
return;
}
diff --git a/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp b/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp
index e2bd68035844c..dc7521dde563f 100644
--- a/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp
+++ b/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp
@@ -46,7 +46,3 @@ uint32_t ZNUMA::memory_id(uintptr_t addr) {
// NUMA support not enabled, assume everything belongs to node zero
return 0;
}
-
-int ZNUMA::numa_id_to_node(uint32_t numa_id) {
- ShouldNotCallThis();
-}
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 7934c9d6ffbd4..ba05d390c9fe1 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -6259,106 +6259,3 @@ const void* os::get_saved_assert_context(const void** sigInfo) {
*sigInfo = nullptr;
return nullptr;
}
-
-/*
- * Windows/x64 does not use stack frames the way expected by Java:
- * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
- * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
- * not be RBP.
- * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
- *
- * So it's not possible to print the native stack using the
- * while (...) {... fr = os::get_sender_for_C_frame(&fr); }
- * loop in vmError.cpp. We need to roll our own loop.
- * This approach works for Windows AArch64 as well.
- */
-bool os::win32::platform_print_native_stack(outputStream* st, const void* context,
- char* buf, int buf_size, address& lastpc)
-{
- CONTEXT ctx;
- if (context != nullptr) {
- memcpy(&ctx, context, sizeof(ctx));
- } else {
- RtlCaptureContext(&ctx);
- }
-
- st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
-
- DWORD machine_type;
- STACKFRAME stk;
- memset(&stk, 0, sizeof(stk));
- stk.AddrStack.Mode = AddrModeFlat;
- stk.AddrFrame.Mode = AddrModeFlat;
- stk.AddrPC.Mode = AddrModeFlat;
-
-#if defined(_M_AMD64)
- stk.AddrStack.Offset = ctx.Rsp;
- stk.AddrFrame.Offset = ctx.Rbp;
- stk.AddrPC.Offset = ctx.Rip;
- machine_type = IMAGE_FILE_MACHINE_AMD64;
-#elif defined(_M_ARM64)
- stk.AddrStack.Offset = ctx.Sp;
- stk.AddrFrame.Offset = ctx.Fp;
- stk.AddrPC.Offset = ctx.Pc;
- machine_type = IMAGE_FILE_MACHINE_ARM64;
-#else
- #error unknown architecture
-#endif
-
- // Ensure we consider dynamically loaded DLLs
- SymbolEngine::refreshModuleList();
-
- int count = 0;
- address lastpc_internal = 0;
- while (count++ < StackPrintLimit) {
- intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
- intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
- address pc = (address)stk.AddrPC.Offset;
-
- if (pc != nullptr) {
- if (count == 2 && lastpc_internal == pc) {
- // Skip it -- StackWalk64() may return the same PC
- // (but different SP) on the first try.
- } else {
- // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
- // may not contain what Java expects, and may cause the frame() constructor
- // to crash. Let's just print out the symbolic address.
- frame::print_C_frame(st, buf, buf_size, pc);
- // print source file and line, if available
- char buf[128];
- int line_no;
- if (SymbolEngine::get_source_info(pc, buf, sizeof(buf), &line_no)) {
- st->print(" (%s:%d)", buf, line_no);
- } else {
- st->print(" (no source info available)");
- }
- st->cr();
- }
- lastpc_internal = pc;
- }
-
- PVOID p = WindowsDbgHelp::symFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
- if (p == nullptr) {
- // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
- lastpc = lastpc_internal;
- break;
- }
-
- BOOL result = WindowsDbgHelp::stackWalk64(
- machine_type, // __in DWORD MachineType,
- GetCurrentProcess(), // __in HANDLE hProcess,
- GetCurrentThread(), // __in HANDLE hThread,
- &stk, // __inout LP STACKFRAME64 StackFrame,
- &ctx); // __inout PVOID ContextRecord,
-
- if (!result) {
- break;
- }
- }
- if (count > StackPrintLimit) {
- st->print_cr("......");
- }
- st->cr();
-
- return true;
-}
diff --git a/src/hotspot/os/windows/perfMemory_windows.cpp b/src/hotspot/os/windows/perfMemory_windows.cpp
index f54a2b52ccadf..a9b2eebb7be0c 100644
--- a/src/hotspot/os/windows/perfMemory_windows.cpp
+++ b/src/hotspot/os/windows/perfMemory_windows.cpp
@@ -24,7 +24,6 @@
#include "classfile/vmSymbols.hpp"
#include "logging/log.hpp"
-#include "logging/logStream.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "nmt/memTracker.hpp"
@@ -42,7 +41,11 @@
#include
#include
#include
-#include
+
+typedef BOOL (WINAPI *SetSecurityDescriptorControlFnPtr)(
+ IN PSECURITY_DESCRIPTOR pSecurityDescriptor,
+ IN SECURITY_DESCRIPTOR_CONTROL ControlBitsOfInterest,
+ IN SECURITY_DESCRIPTOR_CONTROL ControlBitsToSet);
// Standard Memory Implementation Details
@@ -59,7 +62,9 @@ static char* create_standard_memory(size_t size) {
// commit memory
if (!os::commit_memory(mapAddress, size, !ExecMem)) {
- log_debug(perf)("could not commit PerfData memory");
+ if (PrintMiscellaneous && Verbose) {
+ warning("Could not commit PerfData memory\n");
+ }
os::release_memory(mapAddress, size);
return nullptr;
}
@@ -85,21 +90,25 @@ static void delete_standard_memory(char* addr, size_t size) {
static void save_memory_to_file(char* addr, size_t size) {
const char* destfile = PerfMemory::get_perfdata_file_path();
- assert(destfile[0] != '\0', "invalid PerfData file path");
+ assert(destfile[0] != '\0', "invalid Perfdata file path");
int fd = ::_open(destfile, _O_BINARY|_O_CREAT|_O_WRONLY|_O_TRUNC,
_S_IREAD|_S_IWRITE);
if (fd == OS_ERR) {
- log_debug(perf)("could not create PerfData save file: %s: %s",
- destfile, os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("Could not create Perfdata save file: %s: %s\n",
+ destfile, os::strerror(errno));
+ }
} else {
for (size_t remaining = size; remaining > 0;) {
int nbytes = ::_write(fd, addr, (unsigned int)remaining);
if (nbytes == OS_ERR) {
- log_debug(perf)("could not write PerfData save file: %s: %s",
- destfile, os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("Could not write Perfdata save file: %s: %s\n",
+ destfile, os::strerror(errno));
+ }
break;
}
@@ -108,8 +117,10 @@ static void save_memory_to_file(char* addr, size_t size) {
}
int result = ::_close(fd);
- if (result == OS_ERR) {
- log_debug(perf)("could not close %s: %s", destfile, os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ if (result == OS_ERR) {
+ warning("Could not close %s: %s\n", destfile, os::strerror(errno));
+ }
}
}
@@ -209,8 +220,10 @@ static bool is_directory_secure(const char* path) {
}
else {
// unexpected error, declare the path insecure
- log_debug(perf)("could not get attributes for file %s: lasterror = %d",
- path, lasterror);
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not get attributes for file %s: "
+ " lasterror = %d\n", path, lasterror);
+ }
return false;
}
}
@@ -221,7 +234,9 @@ static bool is_directory_secure(const char* path) {
// as some types of reparse points might be acceptable, but it
// is probably more secure to avoid these conditions.
//
- log_debug(perf)("%s is a reparse point", path);
+ if (PrintMiscellaneous && Verbose) {
+ warning("%s is a reparse point\n", path);
+ }
return false;
}
@@ -238,8 +253,10 @@ static bool is_directory_secure(const char* path) {
// this is either a regular file or some other type of file,
// any of which are unexpected and therefore insecure.
//
- log_debug(perf)("%s is not a directory, file attributes : "
- INTPTR_FORMAT, path, fa);
+ if (PrintMiscellaneous && Verbose) {
+ warning("%s is not a directory, file attributes = "
+ INTPTR_FORMAT "\n", path, fa);
+ }
return false;
}
}
@@ -475,9 +492,11 @@ static void remove_file(const char* dirname, const char* filename) {
strcat(path, filename);
if (::unlink(path) == OS_ERR) {
- if (errno != ENOENT) {
- log_debug(perf)("could not unlink shared memory backing store file %s : %s",
- path, os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ if (errno != ENOENT) {
+ warning("Could not unlink shared memory backing"
+ " store file %s : %s\n", path, os::strerror(errno));
+ }
}
}
@@ -496,16 +515,20 @@ static bool is_alive(int pid) {
HANDLE ph = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, pid);
if (ph == nullptr) {
// the process does not exist.
- DWORD lastError = GetLastError();
- if (lastError != ERROR_INVALID_PARAMETER) {
- log_debug(perf)("OpenProcess failed: %d", lastError);
+ if (PrintMiscellaneous && Verbose) {
+ DWORD lastError = GetLastError();
+ if (lastError != ERROR_INVALID_PARAMETER) {
+ warning("OpenProcess failed: %d\n", GetLastError());
+ }
}
return false;
}
DWORD exit_status;
if (!GetExitCodeProcess(ph, &exit_status)) {
- log_debug(perf)("GetExitCodeProcess failed: %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("GetExitCodeProcess failed: %d\n", GetLastError());
+ }
CloseHandle(ph);
return false;
}
@@ -522,13 +545,17 @@ static bool is_filesystem_secure(const char* path) {
char fs_type[MAX_PATH];
if (PerfBypassFileSystemCheck) {
- log_debug(perf)("bypassing file system criteria checks for %s", path);
+ if (PrintMiscellaneous && Verbose) {
+ warning("bypassing file system criteria checks for %s\n", path);
+ }
return true;
}
char* first_colon = strchr((char *)path, ':');
if (first_colon == nullptr) {
- log_debug(perf)("expected device specifier in path: %s", path);
+ if (PrintMiscellaneous && Verbose) {
+ warning("expected device specifier in path: %s\n", path);
+ }
return false;
}
@@ -549,22 +576,29 @@ static bool is_filesystem_secure(const char* path) {
if (!GetVolumeInformation(root_path, nullptr, 0, nullptr, &maxpath,
&flags, fs_type, MAX_PATH)) {
// we can't get information about the volume, so assume unsafe.
- log_debug(perf)("could not get device information for %s: path = %s: lasterror = %d",
- root_path, path, GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not get device information for %s: "
+ " path = %s: lasterror = %d\n",
+ root_path, path, GetLastError());
+ }
return false;
}
if ((flags & FS_PERSISTENT_ACLS) == 0) {
// file system doesn't support ACLs, declare file system unsafe
- log_debug(perf)("file system type %s on device %s does not support ACLs",
- fs_type, root_path);
+ if (PrintMiscellaneous && Verbose) {
+ warning("file system type %s on device %s does not support"
+ " ACLs\n", fs_type, root_path);
+ }
return false;
}
if ((flags & FS_VOL_IS_COMPRESSED) != 0) {
// file system is compressed, declare file system unsafe
- log_debug(perf)("file system type %s on device %s is compressed",
- fs_type, root_path);
+ if (PrintMiscellaneous && Verbose) {
+ warning("file system type %s on device %s is compressed\n",
+ fs_type, root_path);
+ }
return false;
}
@@ -670,7 +704,9 @@ static HANDLE create_file_mapping(const char* name, HANDLE fh, LPSECURITY_ATTRIB
name); /* LPCTSTR name for object */
if (fmh == nullptr) {
- log_debug(perf)("CreateFileMapping failed, lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("CreateFileMapping failed, lasterror = %d\n", GetLastError());
+ }
return nullptr;
}
@@ -681,7 +717,9 @@ static HANDLE create_file_mapping(const char* name, HANDLE fh, LPSECURITY_ATTRIB
// the other processes either exit or close their mapping objects
// and/or mapped views of this mapping object.
//
- log_debug(perf)("file mapping already exists, lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("file mapping already exists, lasterror = %d\n", GetLastError());
+ }
CloseHandle(fmh);
return nullptr;
@@ -745,7 +783,9 @@ static PSID get_user_sid(HANDLE hProcess) {
// get the process token
if (!OpenProcessToken(hProcess, TOKEN_READ, &hAccessToken)) {
- log_debug(perf)("OpenProcessToken failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("OpenProcessToken failure: lasterror = %d \n", GetLastError());
+ }
return nullptr;
}
@@ -755,8 +795,10 @@ static PSID get_user_sid(HANDLE hProcess) {
if (!GetTokenInformation(hAccessToken, TokenUser, nullptr, rsize, &rsize)) {
DWORD lasterror = GetLastError();
if (lasterror != ERROR_INSUFFICIENT_BUFFER) {
- log_debug(perf)("GetTokenInformation failure: lasterror = %d, rsize = %d",
- lasterror, rsize);
+ if (PrintMiscellaneous && Verbose) {
+ warning("GetTokenInformation failure: lasterror = %d,"
+ " rsize = %d\n", lasterror, rsize);
+ }
CloseHandle(hAccessToken);
return nullptr;
}
@@ -766,8 +808,10 @@ static PSID get_user_sid(HANDLE hProcess) {
// get the user token information
if (!GetTokenInformation(hAccessToken, TokenUser, token_buf, rsize, &rsize)) {
- log_debug(perf)("GetTokenInformation failure: lasterror = %d, rsize = %d",
- GetLastError(), rsize);
+ if (PrintMiscellaneous && Verbose) {
+ warning("GetTokenInformation failure: lasterror = %d,"
+ " rsize = %d\n", GetLastError(), rsize);
+ }
FREE_C_HEAP_ARRAY(char, token_buf);
CloseHandle(hAccessToken);
return nullptr;
@@ -777,8 +821,10 @@ static PSID get_user_sid(HANDLE hProcess) {
PSID pSID = NEW_C_HEAP_ARRAY(char, nbytes, mtInternal);
if (!CopySid(nbytes, pSID, token_buf->User.Sid)) {
- log_debug(perf)("GetTokenInformation failure: lasterror = %d, rsize = %d",
- GetLastError(), rsize);
+ if (PrintMiscellaneous && Verbose) {
+ warning("GetTokenInformation failure: lasterror = %d,"
+ " rsize = %d\n", GetLastError(), rsize);
+ }
FREE_C_HEAP_ARRAY(char, token_buf);
FREE_C_HEAP_ARRAY(char, pSID);
CloseHandle(hAccessToken);
@@ -820,8 +866,10 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
// retrieve any existing access control list.
if (!GetSecurityDescriptorDacl(pSD, &exists, &oldACL, &isdefault)) {
- log_debug(perf)("GetSecurityDescriptor failure: lasterror = %d",
- GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("GetSecurityDescriptor failure: lasterror = %d \n",
+ GetLastError());
+ }
return false;
}
@@ -838,8 +886,10 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
if (!GetAclInformation(oldACL, &aclinfo,
sizeof(ACL_SIZE_INFORMATION),
AclSizeInformation)) {
- log_debug(perf)("GetAclInformation failure: lasterror = %d", GetLastError());
- return false;
+ if (PrintMiscellaneous && Verbose) {
+ warning("GetAclInformation failure: lasterror = %d \n", GetLastError());
+ return false;
+ }
}
} else {
aclinfo.AceCount = 0; // assume null DACL
@@ -864,7 +914,9 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
newACL = (PACL) NEW_C_HEAP_ARRAY(char, newACLsize, mtInternal);
if (!InitializeAcl(newACL, newACLsize, ACL_REVISION)) {
- log_debug(perf)("InitializeAcl failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
+ }
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
@@ -875,7 +927,9 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
while (ace_index < aclinfo.AceCount) {
LPVOID ace;
if (!GetAce(oldACL, ace_index, &ace)) {
- log_debug(perf)("InitializeAcl failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
+ }
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
@@ -900,7 +954,9 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
if (matches == 0) {
if (!AddAce(newACL, ACL_REVISION, MAXDWORD, ace,
((PACE_HEADER)ace)->AceSize)) {
- log_debug(perf)("AddAce failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("AddAce failure: lasterror = %d \n", GetLastError());
+ }
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
@@ -913,8 +969,10 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
for (int i = 0; i < ace_count; i++) {
if (!AddAccessAllowedAce(newACL, ACL_REVISION,
aces[i].mask, aces[i].pSid)) {
- log_debug(perf)("AddAccessAllowedAce failure: lasterror = %d",
- GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("AddAccessAllowedAce failure: lasterror = %d \n",
+ GetLastError());
+ }
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
@@ -927,13 +985,17 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
while (ace_index < aclinfo.AceCount) {
LPVOID ace;
if (!GetAce(oldACL, ace_index, &ace)) {
- log_debug(perf)("InitializeAcl failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("InitializeAcl failure: lasterror = %d \n", GetLastError());
+ }
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
if (!AddAce(newACL, ACL_REVISION, MAXDWORD, ace,
((PACE_HEADER)ace)->AceSize)) {
- log_debug(perf)("AddAce failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("AddAce failure: lasterror = %d \n", GetLastError());
+ }
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
@@ -943,23 +1005,39 @@ static bool add_allow_aces(PSECURITY_DESCRIPTOR pSD,
// add the new ACL to the security descriptor.
if (!SetSecurityDescriptorDacl(pSD, TRUE, newACL, FALSE)) {
- log_debug(perf)("SetSecurityDescriptorDacl failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("SetSecurityDescriptorDacl failure:"
+ " lasterror = %d \n", GetLastError());
+ }
FREE_C_HEAP_ARRAY(char, newACL);
return false;
}
- // We do not want to further propagate inherited DACLs, so making them
- // protected prevents that.
- if (!SetSecurityDescriptorControl(pSD, SE_DACL_PROTECTED, SE_DACL_PROTECTED)) {
- log_debug(perf)("SetSecurityDescriptorControl failure: lasterror = %d", GetLastError());
- FREE_C_HEAP_ARRAY(char, newACL);
- return false;
+ // if running on windows 2000 or later, set the automatic inheritance
+ // control flags.
+ SetSecurityDescriptorControlFnPtr _SetSecurityDescriptorControl;
+ _SetSecurityDescriptorControl = (SetSecurityDescriptorControlFnPtr)
+ GetProcAddress(GetModuleHandle(TEXT("advapi32.dll")),
+ "SetSecurityDescriptorControl");
+
+ if (_SetSecurityDescriptorControl != nullptr) {
+ // We do not want to further propagate inherited DACLs, so making them
+ // protected prevents that.
+ if (!_SetSecurityDescriptorControl(pSD, SE_DACL_PROTECTED,
+ SE_DACL_PROTECTED)) {
+ if (PrintMiscellaneous && Verbose) {
+ warning("SetSecurityDescriptorControl failure:"
+ " lasterror = %d \n", GetLastError());
+ }
+ FREE_C_HEAP_ARRAY(char, newACL);
+ return false;
+ }
}
-
- // Note, the security descriptor maintains a reference to the newACL, not
- // a copy of it. Therefore, the newACL is not freed here. It is freed when
- // the security descriptor containing its reference is freed.
- return true;
+ // Note, the security descriptor maintains a reference to the newACL, not
+ // a copy of it. Therefore, the newACL is not freed here. It is freed when
+ // the security descriptor containing its reference is freed.
+ //
+ return true;
}
// method to create a security attributes structure, which contains a
@@ -979,7 +1057,10 @@ static LPSECURITY_ATTRIBUTES make_security_attr(ace_data_t aces[], int count) {
// initialize the security descriptor
if (!InitializeSecurityDescriptor(pSD, SECURITY_DESCRIPTOR_REVISION)) {
- log_debug(perf)("InitializeSecurityDescriptor failure: lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("InitializeSecurityDescriptor failure: "
+ "lasterror = %d \n", GetLastError());
+ }
free_security_desc(pSD);
return nullptr;
}
@@ -1032,7 +1113,11 @@ static LPSECURITY_ATTRIBUTES make_user_everybody_admin_security_attr(
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0, &administratorsSid)) {
- log_debug(perf)("AllocateAndInitializeSid failure: lasterror = %d", GetLastError());
+
+ if (PrintMiscellaneous && Verbose) {
+ warning("AllocateAndInitializeSid failure: "
+ "lasterror = %d \n", GetLastError());
+ }
return nullptr;
}
@@ -1046,7 +1131,11 @@ static LPSECURITY_ATTRIBUTES make_user_everybody_admin_security_attr(
if (!AllocateAndInitializeSid( &SIDAuthEverybody, 1, SECURITY_WORLD_RID,
0, 0, 0, 0, 0, 0, 0, &everybodySid)) {
- log_debug(perf)("AllocateAndInitializeSid failure: lasterror = %d", GetLastError());
+
+ if (PrintMiscellaneous && Verbose) {
+ warning("AllocateAndInitializeSid failure: "
+ "lasterror = %d \n", GetLastError());
+ }
return nullptr;
}
@@ -1147,7 +1236,9 @@ static bool make_user_tmp_dir(const char* dirname) {
//
if (!is_directory_secure(dirname)) {
// directory is not secure
- log_debug(perf)("%s directory is insecure", dirname);
+ if (PrintMiscellaneous && Verbose) {
+ warning("%s directory is insecure\n", dirname);
+ }
free_security_attr(pDirSA);
return false;
}
@@ -1158,11 +1249,16 @@ static bool make_user_tmp_dir(const char* dirname) {
// DACLs might fix the corrupted the DACLs.
SECURITY_INFORMATION secInfo = DACL_SECURITY_INFORMATION;
if (!SetFileSecurity(dirname, secInfo, pDirSA->lpSecurityDescriptor)) {
- lasterror = GetLastError();
- log_debug(perf)("SetFileSecurity failed for %s directory. lasterror = %d", dirname, lasterror);
+ if (PrintMiscellaneous && Verbose) {
+ lasterror = GetLastError();
+ warning("SetFileSecurity failed for %s directory. lasterror %d \n",
+ dirname, lasterror);
+ }
}
} else {
- log_debug(perf)("CreateDirectory failed: %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("CreateDirectory failed: %d\n", GetLastError());
+ }
free_security_attr(pDirSA);
return false;
}
@@ -1229,7 +1325,9 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
if (fh == INVALID_HANDLE_VALUE) {
DWORD lasterror = GetLastError();
- log_debug(perf)("could not create file %s: %d", filename, lasterror);
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not create file %s: %d\n", filename, lasterror);
+ }
free_security_attr(lpSmoSA);
return nullptr;
}
@@ -1255,8 +1353,10 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
struct stat statbuf;
int ret_code = ::stat(filename, &statbuf);
if (ret_code == OS_ERR) {
- log_debug(perf)("could not get status information from file %s: %s",
- filename, os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("Could not get status information from file %s: %s\n",
+ filename, os::strerror(errno));
+ }
CloseHandle(fmh);
CloseHandle(fh);
fh = nullptr;
@@ -1269,7 +1369,9 @@ static HANDLE create_sharedmem_resources(const char* dirname, const char* filena
// call it when we observe the size as zero (0).
if (statbuf.st_size == 0 && FlushFileBuffers(fh) != TRUE) {
DWORD lasterror = GetLastError();
- log_debug(perf)("could not flush file %s: %d", filename, lasterror);
+ if (PrintMiscellaneous && Verbose) {
+ warning("could not flush file %s: %d\n", filename, lasterror);
+ }
CloseHandle(fmh);
CloseHandle(fh);
fh = nullptr;
@@ -1300,8 +1402,10 @@ static HANDLE open_sharedmem_object(const char* objectname, DWORD ofm_access, TR
if (fmh == nullptr) {
DWORD lasterror = GetLastError();
- log_debug(perf)("OpenFileMapping failed for shared memory object %s:"
- " lasterror = %d", objectname, lasterror);
+ if (PrintMiscellaneous && Verbose) {
+ warning("OpenFileMapping failed for shared memory object %s:"
+ " lasterror = %d\n", objectname, lasterror);
+ }
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
err_msg("Could not open PerfMemory, error %d", lasterror),
INVALID_HANDLE_VALUE);
@@ -1381,7 +1485,9 @@ static char* mapping_create_shared(size_t size) {
(DWORD)size); /* DWORD Number of bytes to map */
if (mapAddress == nullptr) {
- log_debug(perf)("MapViewOfFile failed, lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("MapViewOfFile failed, lasterror = %d\n", GetLastError());
+ }
CloseHandle(sharedmem_fileMapHandle);
sharedmem_fileMapHandle = nullptr;
return nullptr;
@@ -1445,14 +1551,20 @@ static size_t sharedmem_filesize(const char* filename, TRAPS) {
// inconsistencies
//
if (::stat(filename, &statbuf) == OS_ERR) {
- log_debug(perf)("stat %s failed: %s", filename, os::strerror(errno));
+ if (PrintMiscellaneous && Verbose) {
+ warning("stat %s failed: %s\n", filename, os::strerror(errno));
+ }
THROW_MSG_0(vmSymbols::java_io_IOException(),
"Could not determine PerfMemory size");
}
if ((statbuf.st_size == 0) || (statbuf.st_size % os::vm_page_size() != 0)) {
- log_debug(perf)("unexpected file size: size = %zu", statbuf.st_size);
- THROW_MSG_0(vmSymbols::java_io_IOException(), "Invalid PerfMemory size");
+ if (PrintMiscellaneous && Verbose) {
+ warning("unexpected file size: size = %zu\n",
+ statbuf.st_size);
+ }
+ THROW_MSG_0(vmSymbols::java_io_IOException(),
+ "Invalid PerfMemory size");
}
return statbuf.st_size;
@@ -1525,7 +1637,9 @@ static void open_file_mapping(int vmid, char** addrp, size_t* sizep, TRAPS) {
size); /* DWORD Number of bytes to map */
if (mapAddress == nullptr) {
- log_debug(perf)("MapViewOfFile failed, lasterror = %d", GetLastError());
+ if (PrintMiscellaneous && Verbose) {
+ warning("MapViewOfFile failed, lasterror = %d\n", GetLastError());
+ }
CloseHandle(fmh);
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(),
"Could not map PerfMemory");
@@ -1594,7 +1708,9 @@ void PerfMemory::create_memory_region(size_t size) {
// creation of the shared memory region failed, attempt
// to create a contiguous, non-shared memory region instead.
//
- log_debug(perf)("Reverting to non-shared PerfMemory region.");
+ if (PrintMiscellaneous && Verbose) {
+ warning("Reverting to non-shared PerfMemory region.\n");
+ }
FLAG_SET_ERGO(PerfDisableSharedMem, true);
_start = create_standard_memory(size);
}
diff --git a/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp b/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
index ec756c44fe6f4..017d8a43666ab 100644
--- a/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/riscv_hwprobe.cpp
@@ -167,19 +167,26 @@ static bool is_set(int64_t key, uint64_t value_mask) {
void RiscvHwprobe::add_features_from_query_result() {
assert(rw_hwprobe_completed, "hwprobe not init yet.");
- // ====== extensions ======
- //
+ if (is_valid(RISCV_HWPROBE_KEY_MVENDORID)) {
+ VM_Version::mvendorid.enable_feature(query[RISCV_HWPROBE_KEY_MVENDORID].value);
+ }
+ if (is_valid(RISCV_HWPROBE_KEY_MARCHID)) {
+ VM_Version::marchid.enable_feature(query[RISCV_HWPROBE_KEY_MARCHID].value);
+ }
+ if (is_valid(RISCV_HWPROBE_KEY_MIMPID)) {
+ VM_Version::mimpid.enable_feature(query[RISCV_HWPROBE_KEY_MIMPID].value);
+ }
if (is_set(RISCV_HWPROBE_KEY_BASE_BEHAVIOR, RISCV_HWPROBE_BASE_BEHAVIOR_IMA)) {
- VM_Version::ext_a.enable_feature();
VM_Version::ext_i.enable_feature();
VM_Version::ext_m.enable_feature();
- }
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_C)) {
- VM_Version::ext_c.enable_feature();
+ VM_Version::ext_a.enable_feature();
}
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_FD)) {
- VM_Version::ext_d.enable_feature();
VM_Version::ext_f.enable_feature();
+ VM_Version::ext_d.enable_feature();
+ }
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_C)) {
+ VM_Version::ext_c.enable_feature();
}
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_IMA_V)) {
// Linux signal return bug when using vector with vlen > 128b in pre 6.8.5.
@@ -195,29 +202,21 @@ void RiscvHwprobe::add_features_from_query_result() {
VM_Version::ext_v.enable_feature();
}
}
-
-#ifndef PRODUCT
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZACAS)) {
- VM_Version::ext_Zacas.enable_feature();
- }
-#endif
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBA)) {
VM_Version::ext_Zba.enable_feature();
}
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBB)) {
VM_Version::ext_Zbb.enable_feature();
}
-#ifndef PRODUCT
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBKB)) {
- VM_Version::ext_Zbkb.enable_feature();
- }
-#endif
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBS)) {
VM_Version::ext_Zbs.enable_feature();
}
#ifndef PRODUCT
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZFA)) {
- VM_Version::ext_Zfa.enable_feature();
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZICBOZ)) {
+ VM_Version::ext_Zicboz.enable_feature();
+ }
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZBKB)) {
+ VM_Version::ext_Zbkb.enable_feature();
}
#endif
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZFH)) {
@@ -227,28 +226,15 @@ void RiscvHwprobe::add_features_from_query_result() {
VM_Version::ext_Zfhmin.enable_feature();
}
#ifndef PRODUCT
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZICBOZ)) {
- VM_Version::ext_Zicboz.enable_feature();
- }
- // Currently tests shows that cmove using Zicond instructions will bring
- // performance regression, but to get a test coverage all the time, will
- // still prefer to enabling it in debug version.
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZICOND)) {
- VM_Version::ext_Zicond.enable_feature();
- }
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZTSO)) {
- VM_Version::ext_Ztso.enable_feature();
- }
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVBB)) {
VM_Version::ext_Zvbb.enable_feature();
}
+#endif
+#ifndef PRODUCT
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVBC)) {
VM_Version::ext_Zvbc.enable_feature();
}
#endif
- if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVFH)) {
- VM_Version::ext_Zvfh.enable_feature();
- }
#ifndef PRODUCT
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVKNED) &&
is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVKNHB) &&
@@ -257,18 +243,30 @@ void RiscvHwprobe::add_features_from_query_result() {
VM_Version::ext_Zvkn.enable_feature();
}
#endif
-
- // ====== non-extensions ======
- //
- if (is_valid(RISCV_HWPROBE_KEY_MARCHID)) {
- VM_Version::marchid.enable_feature(query[RISCV_HWPROBE_KEY_MARCHID].value);
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVFH)) {
+ VM_Version::ext_Zvfh.enable_feature();
}
- if (is_valid(RISCV_HWPROBE_KEY_MIMPID)) {
- VM_Version::mimpid.enable_feature(query[RISCV_HWPROBE_KEY_MIMPID].value);
+#ifndef PRODUCT
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZFA)) {
+ VM_Version::ext_Zfa.enable_feature();
}
- if (is_valid(RISCV_HWPROBE_KEY_MVENDORID)) {
- VM_Version::mvendorid.enable_feature(query[RISCV_HWPROBE_KEY_MVENDORID].value);
+#endif
+#ifndef PRODUCT
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZTSO)) {
+ VM_Version::ext_Ztso.enable_feature();
+ }
+#endif
+#ifndef PRODUCT
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZACAS)) {
+ VM_Version::ext_Zacas.enable_feature();
+ }
+ // Currently tests shows that cmove using Zicond instructions will bring
+ // performance regression, but to get a test coverage all the time, will
+ // still prefer to enabling it in debug version.
+ if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZICOND)) {
+ VM_Version::ext_Zicond.enable_feature();
}
+#endif
// RISCV_HWPROBE_KEY_CPUPERF_0 is deprecated and returns similar values
// to RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF. Keep it there for backward
// compatibility with old kernels.
@@ -279,6 +277,7 @@ void RiscvHwprobe::add_features_from_query_result() {
VM_Version::unaligned_scalar.enable_feature(
query[RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF].value);
}
+
if (is_valid(RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF)) {
VM_Version::unaligned_vector.enable_feature(
query[RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF].value);
diff --git a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
index 0799de014a976..e414a3889c231 100644
--- a/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/vm_version_linux_riscv.cpp
@@ -103,14 +103,6 @@ uint32_t VM_Version::cpu_vector_length() {
return (uint32_t)read_csr(CSR_VLENB);
}
-void VM_Version::RVExtFeatureValue::log_enabled() {
- log_debug(os, cpu)("Enabled RV64 feature \"%s\"", pretty());
-}
-
-void VM_Version::RVNonExtFeatureValue::log_enabled() {
- log_debug(os, cpu)("Enabled RV64 feature \"%s\" (%ld)", pretty(), value());
-}
-
void VM_Version::setup_cpu_available_features() {
assert(ext_i.feature_bit() == HWCAP_ISA_I, "Bit for I must follow Linux HWCAP");
@@ -152,8 +144,9 @@ void VM_Version::setup_cpu_available_features() {
continue;
}
- _feature_list[i]->log_enabled();
-
+ log_debug(os, cpu)("Enabled RV64 feature \"%s\" (%ld)",
+ _feature_list[i]->pretty(),
+ _feature_list[i]->value());
// The feature string
if (_feature_list[i]->feature_string()) {
const char* tmp = _feature_list[i]->pretty();
diff --git a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.inline.hpp b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.inline.hpp
index 568b6e3938ee7..794aa12155b17 100644
--- a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.inline.hpp
+++ b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.inline.hpp
@@ -26,17 +26,10 @@
#define OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_INLINE_HPP
#include "runtime/os.hpp"
-#include "os_windows.hpp"
inline bool os::register_code_area(char *low, char *high) {
// Using Vectored Exception Handling
return true;
}
-#define HAVE_PLATFORM_PRINT_NATIVE_STACK 1
-inline bool os::platform_print_native_stack(outputStream* st, const void* context,
- char *buf, int buf_size, address& lastpc) {
- return os::win32::platform_print_native_stack(st, context, buf, buf_size, lastpc);
-}
-
#endif // OS_CPU_WINDOWS_AARCH64_OS_WINDOWS_AARCH64_INLINE_HPP
diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
index 53f9647983248..c188919595c9d 100644
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
@@ -197,6 +197,98 @@ bool handle_FLT_exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
}
#endif
+#ifdef HAVE_PLATFORM_PRINT_NATIVE_STACK
+/*
+ * Windows/x64 does not use stack frames the way expected by Java:
+ * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
+ * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
+ * not be RBP.
+ * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
+ *
+ * So it's not possible to print the native stack using the
+ * while (...) {... fr = os::get_sender_for_C_frame(&fr); }
+ * loop in vmError.cpp. We need to roll our own loop.
+ */
+bool os::win32::platform_print_native_stack(outputStream* st, const void* context,
+ char *buf, int buf_size, address& lastpc)
+{
+ CONTEXT ctx;
+ if (context != nullptr) {
+ memcpy(&ctx, context, sizeof(ctx));
+ } else {
+ RtlCaptureContext(&ctx);
+ }
+
+ st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
+
+ STACKFRAME stk;
+ memset(&stk, 0, sizeof(stk));
+ stk.AddrStack.Offset = ctx.Rsp;
+ stk.AddrStack.Mode = AddrModeFlat;
+ stk.AddrFrame.Offset = ctx.Rbp;
+ stk.AddrFrame.Mode = AddrModeFlat;
+ stk.AddrPC.Offset = ctx.Rip;
+ stk.AddrPC.Mode = AddrModeFlat;
+
+ // Ensure we consider dynamically loaded dll's
+ SymbolEngine::refreshModuleList();
+
+ int count = 0;
+ address lastpc_internal = 0;
+ while (count++ < StackPrintLimit) {
+ intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
+ intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
+ address pc = (address)stk.AddrPC.Offset;
+
+ if (pc != nullptr) {
+ if (count == 2 && lastpc_internal == pc) {
+ // Skip it -- StackWalk64() may return the same PC
+ // (but different SP) on the first try.
+ } else {
+ // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
+ // may not contain what Java expects, and may cause the frame() constructor
+ // to crash. Let's just print out the symbolic address.
+ frame::print_C_frame(st, buf, buf_size, pc);
+ // print source file and line, if available
+ char buf[128];
+ int line_no;
+ if (SymbolEngine::get_source_info(pc, buf, sizeof(buf), &line_no)) {
+ st->print(" (%s:%d)", buf, line_no);
+ } else {
+ st->print(" (no source info available)");
+ }
+ st->cr();
+ }
+ lastpc_internal = pc;
+ }
+
+ PVOID p = WindowsDbgHelp::symFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
+ if (!p) {
+ // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
+ lastpc = lastpc_internal;
+ break;
+ }
+
+ BOOL result = WindowsDbgHelp::stackWalk64(
+ IMAGE_FILE_MACHINE_AMD64, // __in DWORD MachineType,
+ GetCurrentProcess(), // __in HANDLE hProcess,
+ GetCurrentThread(), // __in HANDLE hThread,
+ &stk, // __inout LP STACKFRAME64 StackFrame,
+ &ctx); // __inout PVOID ContextRecord,
+
+ if (!result) {
+ break;
+ }
+ }
+ if (count > StackPrintLimit) {
+ st->print_cr("......");
+ }
+ st->cr();
+
+ return true;
+}
+#endif // HAVE_PLATFORM_PRINT_NATIVE_STACK
+
address os::fetch_frame_from_context(const void* ucVoid,
intptr_t** ret_sp, intptr_t** ret_fp) {
diff --git a/src/hotspot/share/adlc/archDesc.cpp b/src/hotspot/share/adlc/archDesc.cpp
index 2461903ea268f..263752c521d6f 100644
--- a/src/hotspot/share/adlc/archDesc.cpp
+++ b/src/hotspot/share/adlc/archDesc.cpp
@@ -899,12 +899,10 @@ int ArchDesc::emit_msg(int quiet, int flag, int line, const char *fmt,
// Construct the name of the register mask.
static const char *getRegMask(const char *reg_class_name) {
- if (reg_class_name == nullptr) {
- return "RegMask::EMPTY";
- }
+ if( reg_class_name == nullptr ) return "RegMask::Empty";
if (strcmp(reg_class_name,"Universe")==0) {
- return "RegMask::EMPTY";
+ return "RegMask::Empty";
} else if (strcmp(reg_class_name,"stack_slots")==0) {
return "(Compile::current()->FIRST_STACK_mask())";
} else if (strcmp(reg_class_name, "dynamic")==0) {
@@ -922,7 +920,7 @@ static const char *getRegMask(const char *reg_class_name) {
// Convert a register class name to its register mask.
const char *ArchDesc::reg_class_to_reg_mask(const char *rc_name) {
- const char* reg_mask = "RegMask::EMPTY";
+ const char *reg_mask = "RegMask::Empty";
if( _register ) {
RegClass *reg_class = _register->getRegClass(rc_name);
@@ -941,7 +939,7 @@ const char *ArchDesc::reg_class_to_reg_mask(const char *rc_name) {
// Obtain the name of the RegMask for an OperandForm
const char *ArchDesc::reg_mask(OperandForm &opForm) {
- const char* regMask = "RegMask::EMPTY";
+ const char *regMask = "RegMask::Empty";
// Check constraints on result's register class
const char *result_class = opForm.constrained_reg_class();
@@ -970,9 +968,9 @@ const char *ArchDesc::reg_mask(InstructForm &inForm) {
abort();
}
- // Instructions producing 'Universe' use RegMask::EMPTY
+ // Instructions producing 'Universe' use RegMask::Empty
if (strcmp(result,"Universe") == 0) {
- return "RegMask::EMPTY";
+ return "RegMask::Empty";
}
// Lookup this result operand and get its register class
diff --git a/src/hotspot/share/adlc/formssel.cpp b/src/hotspot/share/adlc/formssel.cpp
index 182587d2f2fde..b938d5b75608d 100644
--- a/src/hotspot/share/adlc/formssel.cpp
+++ b/src/hotspot/share/adlc/formssel.cpp
@@ -2422,7 +2422,7 @@ const char *OperandForm::constrained_reg_class() const {
// Return the register class associated with 'leaf'.
const char *OperandForm::in_reg_class(uint leaf, FormDict &globals) {
- const char* reg_class = nullptr; // "RegMask::EMPTY";
+ const char *reg_class = nullptr; // "RegMask::Empty";
if((_matrule == nullptr) || (_matrule->is_chain_rule(globals))) {
reg_class = constrained_reg_class();
diff --git a/src/hotspot/share/adlc/output_c.cpp b/src/hotspot/share/adlc/output_c.cpp
index 110db7f0e9810..caf2c9952a681 100644
--- a/src/hotspot/share/adlc/output_c.cpp
+++ b/src/hotspot/share/adlc/output_c.cpp
@@ -2837,7 +2837,7 @@ static void defineIn_RegMask(FILE *fp, FormDict &globals, OperandForm &oper) {
if (strcmp(first_reg_class, "stack_slots") == 0) {
fprintf(fp," return &(Compile::current()->FIRST_STACK_mask());\n");
} else if (strcmp(first_reg_class, "dynamic") == 0) {
- fprintf(fp, " return &RegMask::EMPTY;\n");
+ fprintf(fp," return &RegMask::Empty;\n");
} else {
const char* first_reg_class_to_upper = toUpper(first_reg_class);
fprintf(fp," return &%s_mask();\n", first_reg_class_to_upper);
diff --git a/src/hotspot/share/cds/aotConstantPoolResolver.cpp b/src/hotspot/share/cds/aotConstantPoolResolver.cpp
index 8b4e60dece2d8..6cc3a81c2ae1a 100644
--- a/src/hotspot/share/cds/aotConstantPoolResolver.cpp
+++ b/src/hotspot/share/cds/aotConstantPoolResolver.cpp
@@ -225,38 +225,7 @@ void AOTConstantPoolResolver::preresolve_field_and_method_cp_entries(JavaThread*
Bytecodes::Code raw_bc = bcs.raw_code();
switch (raw_bc) {
case Bytecodes::_getfield:
- // no-fast bytecode
- case Bytecodes::_nofast_getfield:
- // fast bytecodes
- case Bytecodes::_fast_agetfield:
- case Bytecodes::_fast_bgetfield:
- case Bytecodes::_fast_cgetfield:
- case Bytecodes::_fast_dgetfield:
- case Bytecodes::_fast_fgetfield:
- case Bytecodes::_fast_igetfield:
- case Bytecodes::_fast_lgetfield:
- case Bytecodes::_fast_sgetfield:
- raw_bc = Bytecodes::_getfield;
- maybe_resolve_fmi_ref(ik, m, raw_bc, bcs.get_index_u2(), preresolve_list, THREAD);
- if (HAS_PENDING_EXCEPTION) {
- CLEAR_PENDING_EXCEPTION; // just ignore
- }
- break;
-
case Bytecodes::_putfield:
- // no-fast bytecode
- case Bytecodes::_nofast_putfield:
- // fast bytecodes
- case Bytecodes::_fast_aputfield:
- case Bytecodes::_fast_bputfield:
- case Bytecodes::_fast_zputfield:
- case Bytecodes::_fast_cputfield:
- case Bytecodes::_fast_dputfield:
- case Bytecodes::_fast_fputfield:
- case Bytecodes::_fast_iputfield:
- case Bytecodes::_fast_lputfield:
- case Bytecodes::_fast_sputfield:
- raw_bc = Bytecodes::_putfield;
maybe_resolve_fmi_ref(ik, m, raw_bc, bcs.get_index_u2(), preresolve_list, THREAD);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION; // just ignore
diff --git a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp
index 3653f9d518c3f..e7145b25457c0 100644
--- a/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp
+++ b/src/hotspot/share/cds/aotLinkedClassBulkLoader.cpp
@@ -42,8 +42,6 @@
#include "oops/trainingData.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
-#include "runtime/serviceThread.hpp"
-#include "utilities/growableArray.hpp"
void AOTLinkedClassBulkLoader::serialize(SerializeClosure* soc) {
AOTLinkedClassTable::get()->serialize(soc);
@@ -55,8 +53,6 @@ void AOTLinkedClassBulkLoader::serialize(SerializeClosure* soc) {
// step in restoring the JVM's state from the snapshot recorded in the AOT cache: other AOT optimizations
// such as AOT compiled methods can make direct references to the preloaded classes, knowing that
// these classes are guaranteed to be in at least the "loaded" state.
-//
-// Note: we can't link the classes yet because SharedRuntime is not yet ready to generate adapters.
void AOTLinkedClassBulkLoader::preload_classes(JavaThread* current) {
preload_classes_impl(current);
if (current->has_pending_exception()) {
@@ -116,44 +112,6 @@ void AOTLinkedClassBulkLoader::preload_classes_in_table(Array* c
}
}
-// Some cached heap objects may hold references to methods in aot-linked
-// classes (via MemberName). We need to make sure all classes are
-// linked before executing any bytecode.
-void AOTLinkedClassBulkLoader::link_classes(JavaThread* current) {
- link_classes_impl(current);
- if (current->has_pending_exception()) {
- exit_on_exception(current);
- }
-}
-
-void AOTLinkedClassBulkLoader::link_classes_impl(TRAPS) {
- precond(CDSConfig::is_using_aot_linked_classes());
-
- AOTLinkedClassTable* table = AOTLinkedClassTable::get();
-
- link_classes_in_table(table->boot1(), CHECK);
- link_classes_in_table(table->boot2(), CHECK);
- link_classes_in_table(table->platform(), CHECK);
- link_classes_in_table(table->app(), CHECK);
-}
-
-void AOTLinkedClassBulkLoader::link_classes_in_table(Array* classes, TRAPS) {
- if (classes != nullptr) {
- for (int i = 0; i < classes->length(); i++) {
- // NOTE: CDSConfig::is_preserving_verification_constraints() is required
- // when storing ik in the AOT cache. This means we don't have to verify
- // ik at all.
- //
- // Without is_preserving_verification_constraints(), ik->link_class() may cause
- // class loading, which may result in invocation of ClassLoader::loadClass() calls,
- // which CANNOT happen because we are not ready to execute any Java byecodes yet
- // at this point.
- InstanceKlass* ik = classes->at(i);
- ik->link_class(CHECK);
- }
- }
-}
-
#ifdef ASSERT
void AOTLinkedClassBulkLoader::validate_module_of_preloaded_classes() {
oop javabase_module_oop = ModuleEntryTable::javabase_moduleEntry()->module_oop();
@@ -215,21 +173,25 @@ void AOTLinkedClassBulkLoader::validate_module(Klass* k, const char* category_na
}
#endif
-void AOTLinkedClassBulkLoader::init_javabase_classes(JavaThread* current) {
- init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), current);
+// Link all java.base classes in the AOTLinkedClassTable. Of those classes,
+// move the ones that have been AOT-initialized to the "initialized" state.
+void AOTLinkedClassBulkLoader::link_or_init_javabase_classes(JavaThread* current) {
+ link_or_init_classes_for_loader(Handle(), AOTLinkedClassTable::get()->boot1(), current);
if (current->has_pending_exception()) {
exit_on_exception(current);
}
}
-void AOTLinkedClassBulkLoader::init_non_javabase_classes(JavaThread* current) {
- init_non_javabase_classes_impl(current);
+// Do the same thing as link_or_init_javabase_classes(), but for the classes that are not
+// in the java.base module.
+void AOTLinkedClassBulkLoader::link_or_init_non_javabase_classes(JavaThread* current) {
+ link_or_init_non_javabase_classes_impl(current);
if (current->has_pending_exception()) {
exit_on_exception(current);
}
}
-void AOTLinkedClassBulkLoader::init_non_javabase_classes_impl(TRAPS) {
+void AOTLinkedClassBulkLoader::link_or_init_non_javabase_classes_impl(TRAPS) {
assert(CDSConfig::is_using_aot_linked_classes(), "sanity");
DEBUG_ONLY(validate_module_of_preloaded_classes());
@@ -246,9 +208,9 @@ void AOTLinkedClassBulkLoader::init_non_javabase_classes_impl(TRAPS) {
assert(h_system_loader() != nullptr, "must be");
AOTLinkedClassTable* table = AOTLinkedClassTable::get();
- init_classes_for_loader(Handle(), table->boot2(), CHECK);
- init_classes_for_loader(h_platform_loader, table->platform(), CHECK);
- init_classes_for_loader(h_system_loader, table->app(), CHECK);
+ link_or_init_classes_for_loader(Handle(), table->boot2(), CHECK);
+ link_or_init_classes_for_loader(h_platform_loader, table->platform(), CHECK);
+ link_or_init_classes_for_loader(h_system_loader, table->app(), CHECK);
if (Universe::is_fully_initialized() && VerifyDuringStartup) {
// Make sure we're still in a clean state.
@@ -280,9 +242,8 @@ void AOTLinkedClassBulkLoader::exit_on_exception(JavaThread* current) {
log_error(aot)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
"%zuM", MaxHeapSize/M);
} else {
- oop message = java_lang_Throwable::message(current->pending_exception());
log_error(aot)("%s: %s", current->pending_exception()->klass()->external_name(),
- message == nullptr ? "(no message)" : java_lang_String::as_utf8_string(message));
+ java_lang_String::as_utf8_string(java_lang_Throwable::message(current->pending_exception())));
}
vm_exit_during_initialization("Unexpected exception when loading aot-linked classes.");
}
@@ -328,13 +289,23 @@ void AOTLinkedClassBulkLoader::initiate_loading(JavaThread* current, const char*
// - classes that were AOT-initialized by AOTClassInitializer
// - the classes of all objects that are reachable from the archived mirrors of
// the AOT-linked classes for .
-void AOTLinkedClassBulkLoader::init_classes_for_loader(Handle class_loader, Array* classes, TRAPS) {
+void AOTLinkedClassBulkLoader::link_or_init_classes_for_loader(Handle class_loader, Array* classes, TRAPS) {
if (classes != nullptr) {
for (int i = 0; i < classes->length(); i++) {
InstanceKlass* ik = classes->at(i);
- assert(ik->class_loader_data() != nullptr, "must be");
+ if (ik->class_loader_data() == nullptr) {
+ // This class is not yet loaded. We will initialize it in a later phase.
+ // For example, we have loaded only AOTLinkedClassCategory::BOOT1 classes
+ // but k is part of AOTLinkedClassCategory::BOOT2.
+ continue;
+ }
if (ik->has_aot_initialized_mirror()) {
ik->initialize_with_aot_initialized_mirror(CHECK);
+ } else {
+ // Some cached heap objects may hold references to methods in aot-linked
+ // classes (via MemberName). We need to make sure all classes are
+ // linked to allow such MemberNames to be invoked.
+ ik->link_class(CHECK);
}
}
}
diff --git a/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp b/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp
index 31fdac386fe51..77400a861040a 100644
--- a/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp
+++ b/src/hotspot/share/cds/aotLinkedClassBulkLoader.hpp
@@ -52,11 +52,10 @@ class AOTLinkedClassBulkLoader : AllStatic {
static void preload_classes_impl(TRAPS);
static void preload_classes_in_table(Array* classes,
const char* category_name, Handle loader, TRAPS);
- static void initiate_loading(JavaThread* current, const char* category, Handle initiating_loader, Array* classes);
- static void link_classes_impl(TRAPS);
- static void link_classes_in_table(Array* classes, TRAPS);
- static void init_non_javabase_classes_impl(TRAPS);
- static void init_classes_for_loader(Handle class_loader, Array* classes, TRAPS);
+ static void initiate_loading(JavaThread* current, const char* category, Handle initiating_loader,
+ Array* classes);
+ static void link_or_init_non_javabase_classes_impl(TRAPS);
+ static void link_or_init_classes_for_loader(Handle class_loader, Array* classes, TRAPS);
static void replay_training_at_init(Array* classes, TRAPS) NOT_CDS_RETURN;
#ifdef ASSERT
@@ -68,10 +67,9 @@ class AOTLinkedClassBulkLoader : AllStatic {
public:
static void serialize(SerializeClosure* soc) NOT_CDS_RETURN;
- static void preload_classes(JavaThread* current) NOT_CDS_RETURN;
- static void link_classes(JavaThread* current) NOT_CDS_RETURN;
- static void init_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
- static void init_non_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
+ static void preload_classes(JavaThread* current);
+ static void link_or_init_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
+ static void link_or_init_non_javabase_classes(JavaThread* current) NOT_CDS_RETURN;
static void exit_on_exception(JavaThread* current);
static void replay_training_at_init_for_preloaded_classes(TRAPS) NOT_CDS_RETURN;
diff --git a/src/hotspot/share/cds/aotMapLogger.cpp b/src/hotspot/share/cds/aotMapLogger.cpp
index 151c15048c28e..b0e410b5cf1bb 100644
--- a/src/hotspot/share/cds/aotMapLogger.cpp
+++ b/src/hotspot/share/cds/aotMapLogger.cpp
@@ -135,14 +135,12 @@ class AOTMapLogger::RuntimeGatherArchivedMetaspaceObjs : public UniqueMetaspaceC
virtual bool do_unique_ref(Ref* ref, bool read_only) {
ArchivedObjInfo info;
- if (AOTMetaspace::in_aot_cache(ref->obj())) {
- info._src_addr = ref->obj();
- info._buffered_addr = ref->obj();
- info._requested_addr = ref->obj();
- info._bytes = ref->size() * BytesPerWord;
- info._type = ref->msotype();
- _objs.append(info);
- }
+ info._src_addr = ref->obj();
+ info._buffered_addr = ref->obj();
+ info._requested_addr = ref->obj();
+ info._bytes = ref->size() * BytesPerWord;
+ info._type = ref->msotype();
+ _objs.append(info);
return true; // keep iterating
}
diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp
index a5d1f78b76fa0..7c6b925470a09 100644
--- a/src/hotspot/share/cds/cdsConfig.cpp
+++ b/src/hotspot/share/cds/cdsConfig.cpp
@@ -943,9 +943,8 @@ bool CDSConfig::is_preserving_verification_constraints() {
return AOTClassLinking;
} else if (is_dumping_final_static_archive()) { // writing AOT cache
return is_dumping_aot_linked_classes();
- } else if (is_dumping_classic_static_archive()) {
- return is_dumping_aot_linked_classes();
} else {
+ // For simplicity, we don't support this optimization with the old CDS workflow.
return false;
}
}
diff --git a/src/hotspot/share/cds/finalImageRecipes.cpp b/src/hotspot/share/cds/finalImageRecipes.cpp
index a9bbc398736d3..dfe74acd6c1e5 100644
--- a/src/hotspot/share/cds/finalImageRecipes.cpp
+++ b/src/hotspot/share/cds/finalImageRecipes.cpp
@@ -127,14 +127,6 @@ void FinalImageRecipes::record_recipes_for_constantpool() {
}
if (cp_indices.length() > 0) {
- LogStreamHandle(Trace, aot, resolve) log;
- if (log.is_enabled()) {
- log.print("ConstantPool entries for %s to be pre-resolved:", k->external_name());
- for (int i = 0; i < cp_indices.length(); i++) {
- log.print(" %d", cp_indices.at(i));
- }
- log.print("\n");
- }
tmp_cp_recipes.append(ArchiveUtils::archive_array(&cp_indices));
} else {
tmp_cp_recipes.append(nullptr);
diff --git a/src/hotspot/share/cds/runTimeClassInfo.cpp b/src/hotspot/share/cds/runTimeClassInfo.cpp
index fe940ca6c183d..832b0ce893223 100644
--- a/src/hotspot/share/cds/runTimeClassInfo.cpp
+++ b/src/hotspot/share/cds/runTimeClassInfo.cpp
@@ -41,7 +41,7 @@ void RunTimeClassInfo::init(DumpTimeClassInfo& info) {
_num_loader_constraints = info.num_loader_constraints();
int i;
- if (CDSConfig::is_preserving_verification_constraints()) {
+ if (CDSConfig::is_preserving_verification_constraints() && CDSConfig::is_dumping_final_static_archive()) {
// The production run doesn't need the verifier constraints, as we can guarantee that all classes checked by
// the verifier during AOT training/assembly phases cannot be replaced in the production run.
_num_verifier_constraints = 0;
diff --git a/src/hotspot/share/classfile/stackMapTable.cpp b/src/hotspot/share/classfile/stackMapTable.cpp
index 85fb4de868658..9e02956aceb6c 100644
--- a/src/hotspot/share/classfile/stackMapTable.cpp
+++ b/src/hotspot/share/classfile/stackMapTable.cpp
@@ -132,16 +132,8 @@ bool StackMapTable::match_stackmap(
}
void StackMapTable::check_jump_target(
- StackMapFrame* frame, int bci, int offset, TRAPS) const {
+ StackMapFrame* frame, int32_t target, TRAPS) const {
ErrorContext ctx;
- // Jump targets must be within the method and the method size is limited. See JVMS 4.11
- int min_offset = -1 * max_method_code_size;
- if (offset < min_offset || offset > max_method_code_size) {
- frame->verifier()->verify_error(ErrorContext::bad_stackmap(bci, frame),
- "Illegal target of jump or branch (bci %d + offset %d)", bci, offset);
- return;
- }
- int target = bci + offset;
bool match = match_stackmap(
frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
if (!match || (target < 0 || target >= _code_length)) {
diff --git a/src/hotspot/share/classfile/stackMapTable.hpp b/src/hotspot/share/classfile/stackMapTable.hpp
index 9b46fa89345d9..6d4c0ce36c086 100644
--- a/src/hotspot/share/classfile/stackMapTable.hpp
+++ b/src/hotspot/share/classfile/stackMapTable.hpp
@@ -67,7 +67,7 @@ class StackMapTable : public StackObj {
// Check jump instructions. Make sure there are no uninitialized
// instances on backward branch.
- void check_jump_target(StackMapFrame* frame, int bci, int offset, TRAPS) const;
+ void check_jump_target(StackMapFrame* frame, int32_t target, TRAPS) const;
// The following methods are only used inside this class.
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp
index cb2ae96348ead..b092e71f4e761 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp
@@ -855,28 +855,6 @@ class UnregisteredClassesDuplicationChecker : StackObj {
}
};
-void SystemDictionaryShared::link_all_exclusion_check_candidates(InstanceKlass* ik) {
- bool need_to_link = false;
- {
- MutexLocker ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
- ExclusionCheckCandidates candidates(ik);
-
- candidates.iterate_all([&] (InstanceKlass* k, DumpTimeClassInfo* info) {
- if (!k->is_linked()) {
- need_to_link = true;
- }
- });
- }
- if (need_to_link) {
- JavaThread* THREAD = JavaThread::current();
- if (log_is_enabled(Info, aot, link)) {
- ResourceMark rm(THREAD);
- log_info(aot, link)("Link all loaded classes for %s", ik->external_name());
- }
- AOTMetaspace::link_all_loaded_classes(THREAD);
- }
-}
-
// Returns true if the class should be excluded. This can be called by
// AOTConstantPoolResolver before or after we enter the CDS safepoint.
// When called before the safepoint, we need to link the class so that
@@ -900,19 +878,27 @@ bool SystemDictionaryShared::should_be_excluded(Klass* k) {
InstanceKlass* ik = InstanceKlass::cast(k);
if (!SafepointSynchronize::is_at_safepoint()) {
- {
- // fast path
- MutexLocker ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
- DumpTimeClassInfo* p = get_info_locked(ik);
- if (p->has_checked_exclusion()) {
- return p->is_excluded();
+ if (!ik->is_linked()) {
+ // should_be_excluded_impl() below doesn't link unlinked classes. We come
+ // here only when we are trying to aot-link constant pool entries, so
+ // we'd better link the class.
+ JavaThread* THREAD = JavaThread::current();
+ ik->link_class(THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ CLEAR_PENDING_EXCEPTION;
+ return true; // linking failed -- let's exclude it
}
- }
- link_all_exclusion_check_candidates(ik);
+ // Also link any classes that were loaded for the verification of ik or its supertypes.
+ // Otherwise we might miss the verification constraints of those classes.
+ AOTMetaspace::link_all_loaded_classes(THREAD);
+ }
MutexLocker ml(DumpTimeTable_lock, Mutex::_no_safepoint_check_flag);
DumpTimeClassInfo* p = get_info_locked(ik);
+ if (p->is_excluded()) {
+ return true;
+ }
return should_be_excluded_impl(ik, p);
} else {
// When called within the CDS safepoint, the correctness of this function
@@ -926,7 +912,7 @@ bool SystemDictionaryShared::should_be_excluded(Klass* k) {
// No need to check for is_linked() as all eligible classes should have
// already been linked in AOTMetaspace::link_class_for_cds().
- // Don't take DumpTimeTable_lock as we are in safepoint.
+ // Can't take the lock as we are in safepoint.
DumpTimeClassInfo* p = _dumptime_table->get(ik);
if (p->is_excluded()) {
return true;
@@ -1434,10 +1420,6 @@ void SystemDictionaryShared::get_all_archived_classes(bool is_static_archive, Gr
get_archive(is_static_archive)->_builtin_dictionary.iterate([&] (const RunTimeClassInfo* record) {
classes->append(record->klass());
});
-
- get_archive(is_static_archive)->_unregistered_dictionary.iterate([&] (const RunTimeClassInfo* record) {
- classes->append(record->klass());
- });
}
class SharedDictionaryPrinter : StackObj {
diff --git a/src/hotspot/share/classfile/systemDictionaryShared.hpp b/src/hotspot/share/classfile/systemDictionaryShared.hpp
index 2619a642fd1da..5ff57653dd092 100644
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp
@@ -175,7 +175,6 @@ class SystemDictionaryShared: public SystemDictionary {
static void write_dictionary(RunTimeSharedDictionary* dictionary,
bool is_builtin);
static bool is_jfr_event_class(InstanceKlass *k);
- static void link_all_exclusion_check_candidates(InstanceKlass* ik);
static bool should_be_excluded_impl(InstanceKlass* k, DumpTimeClassInfo* info);
// exclusion checks
diff --git a/src/hotspot/share/classfile/verifier.cpp b/src/hotspot/share/classfile/verifier.cpp
index 38dba1d3d5fbb..9b93e283362fd 100644
--- a/src/hotspot/share/classfile/verifier.cpp
+++ b/src/hotspot/share/classfile/verifier.cpp
@@ -781,6 +781,7 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
// Merge with the next instruction
{
+ int target;
VerificationType type, type2;
VerificationType atype;
@@ -1605,8 +1606,9 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
case Bytecodes::_ifle:
current_frame.pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this));
+ target = bcs.dest();
stackmap_table.check_jump_target(
- ¤t_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
+ ¤t_frame, target, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_if_acmpeq :
case Bytecodes::_if_acmpne :
@@ -1617,16 +1619,19 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
case Bytecodes::_ifnonnull :
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
+ target = bcs.dest();
stackmap_table.check_jump_target
- (¤t_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
+ (¤t_frame, target, CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_goto :
+ target = bcs.dest();
stackmap_table.check_jump_target(
- ¤t_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
+ ¤t_frame, target, CHECK_VERIFY(this));
no_control_flow = true; break;
case Bytecodes::_goto_w :
+ target = bcs.dest_w();
stackmap_table.check_jump_target(
- ¤t_frame, bcs.bci(), bcs.get_offset_s4(), CHECK_VERIFY(this));
+ ¤t_frame, target, CHECK_VERIFY(this));
no_control_flow = true; break;
case Bytecodes::_tableswitch :
case Bytecodes::_lookupswitch :
@@ -2275,14 +2280,15 @@ void ClassVerifier::verify_switch(
}
}
}
- stackmap_table->check_jump_target(current_frame, bci, default_offset, CHECK_VERIFY(this));
+ int target = bci + default_offset;
+ stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
for (int i = 0; i < keys; i++) {
// Because check_jump_target() may safepoint, the bytecode could have
// moved, which means 'aligned_bcp' is no good and needs to be recalculated.
aligned_bcp = align_up(bcs->bcp() + 1, jintSize);
- int offset = (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
+ target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
stackmap_table->check_jump_target(
- current_frame, bci, offset, CHECK_VERIFY(this));
+ current_frame, target, CHECK_VERIFY(this));
}
NOT_PRODUCT(aligned_bcp = nullptr); // no longer valid at this point
}
diff --git a/src/hotspot/share/classfile/vmIntrinsics.hpp b/src/hotspot/share/classfile/vmIntrinsics.hpp
index 0895418ef848b..c9c5c925f86e4 100644
--- a/src/hotspot/share/classfile/vmIntrinsics.hpp
+++ b/src/hotspot/share/classfile/vmIntrinsics.hpp
@@ -467,8 +467,8 @@ class methodHandle;
do_intrinsic(_Reference_clear0, java_lang_ref_Reference, clear0_name, void_method_signature, F_RN) \
do_intrinsic(_PhantomReference_clear0, java_lang_ref_PhantomReference, clear0_name, void_method_signature, F_RN) \
\
- /* support for com.sun.crypto.provider.AES_Crypt and some of its callers */ \
- do_class(com_sun_crypto_provider_aescrypt, "com/sun/crypto/provider/AES_Crypt") \
+ /* support for com.sun.crypto.provider.AESCrypt and some of its callers */ \
+ do_class(com_sun_crypto_provider_aescrypt, "com/sun/crypto/provider/AESCrypt") \
do_intrinsic(_aescrypt_encryptBlock, com_sun_crypto_provider_aescrypt, encryptBlock_name, byteArray_int_byteArray_int_signature, F_R) \
do_intrinsic(_aescrypt_decryptBlock, com_sun_crypto_provider_aescrypt, decryptBlock_name, byteArray_int_byteArray_int_signature, F_R) \
do_name( encryptBlock_name, "implEncryptBlock") \
diff --git a/src/hotspot/share/code/codeBlob.cpp b/src/hotspot/share/code/codeBlob.cpp
index e901d5606161c..18e7752013957 100644
--- a/src/hotspot/share/code/codeBlob.cpp
+++ b/src/hotspot/share/code/codeBlob.cpp
@@ -910,7 +910,6 @@ void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const
nm->print_nmethod(true);
} else {
nm->print_on(st);
- nm->print_code_snippet(st, addr);
}
return;
}
diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp
index 8e6a179748039..7274b627f3e6b 100644
--- a/src/hotspot/share/code/nmethod.cpp
+++ b/src/hotspot/share/code/nmethod.cpp
@@ -23,7 +23,6 @@
*/
#include "asm/assembler.inline.hpp"
-#include "cds/cdsConfig.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/dependencies.hpp"
@@ -1148,7 +1147,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
+ align_up(speculations_len , oopSize)
#endif
+ align_up(debug_info->data_size() , oopSize)
- + ImmutableDataReferencesCounterSize;
+ + align_up(ImmutableDataReferencesCounterSize, oopSize);
// First, allocate space for immutable data in C heap.
address immutable_data = nullptr;
@@ -1323,7 +1322,6 @@ nmethod::nmethod(
#if INCLUDE_JVMCI
_speculations_offset = 0;
#endif
- _immutable_data_reference_counter_offset = 0;
code_buffer->copy_code_and_locs_to(this);
code_buffer->copy_values_to(this);
@@ -1422,6 +1420,15 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm.
_method = nm._method;
_osr_link = nullptr;
+ // Increment number of references to immutable data to share it between nmethods
+ _immutable_data_size = nm._immutable_data_size;
+ if (_immutable_data_size > 0) {
+ _immutable_data = nm._immutable_data;
+ set_immutable_data_references_counter(get_immutable_data_references_counter() + 1);
+ } else {
+ _immutable_data = blob_end();
+ }
+
_exception_cache = nullptr;
_gc_data = nullptr;
_oops_do_mark_nmethods = nullptr;
@@ -1437,7 +1444,6 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm.
_entry_offset = nm._entry_offset;
_verified_entry_offset = nm._verified_entry_offset;
_entry_bci = nm._entry_bci;
- _immutable_data_size = nm._immutable_data_size;
_skipped_instructions_size = nm._skipped_instructions_size;
_stub_offset = nm._stub_offset;
@@ -1456,15 +1462,6 @@ nmethod::nmethod(const nmethod &nm) : CodeBlob(nm._name, nm._kind, nm._size, nm.
#if INCLUDE_JVMCI
_speculations_offset = nm._speculations_offset;
#endif
- _immutable_data_reference_counter_offset = nm._immutable_data_reference_counter_offset;
-
- // Increment number of references to immutable data to share it between nmethods
- if (_immutable_data_size > 0) {
- _immutable_data = nm._immutable_data;
- set_immutable_data_references_counter(get_immutable_data_references_counter() + 1);
- } else {
- _immutable_data = blob_end();
- }
_orig_pc_offset = nm._orig_pc_offset;
_compile_id = nm._compile_id;
@@ -1754,11 +1751,9 @@ nmethod::nmethod(
#if INCLUDE_JVMCI
_speculations_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
- _immutable_data_reference_counter_offset = _speculations_offset + align_up(speculations_len, oopSize);
- DEBUG_ONLY( int immutable_data_end_offset = _immutable_data_reference_counter_offset + ImmutableDataReferencesCounterSize; )
+ DEBUG_ONLY( int immutable_data_end_offset = _speculations_offset + align_up(speculations_len, oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
#else
- _immutable_data_reference_counter_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize);
- DEBUG_ONLY( int immutable_data_end_offset = _immutable_data_reference_counter_offset + ImmutableDataReferencesCounterSize; )
+ DEBUG_ONLY( int immutable_data_end_offset = _scopes_data_offset + align_up(debug_info->data_size(), oopSize) + align_up(ImmutableDataReferencesCounterSize, oopSize); )
#endif
assert(immutable_data_end_offset <= immutable_data_size, "wrong read-only data size: %d > %d",
immutable_data_end_offset, immutable_data_size);
@@ -2505,48 +2500,11 @@ void nmethod::post_compiled_method(CompileTask* task) {
maybe_print_nmethod(directive);
}
-#if INCLUDE_CDS
-static GrowableArrayCHeap* _delayed_compiled_method_load_events = nullptr;
-
-void nmethod::add_delayed_compiled_method_load_event(nmethod* nm) {
- precond(CDSConfig::is_using_aot_linked_classes());
- precond(!ServiceThread::has_started());
-
- // We are still in single threaded stage of VM bootstrap. No need to lock.
- if (_delayed_compiled_method_load_events == nullptr) {
- _delayed_compiled_method_load_events = new GrowableArrayCHeap();
- }
- _delayed_compiled_method_load_events->append(nm);
-}
-
-void nmethod::post_delayed_compiled_method_load_events() {
- precond(ServiceThread::has_started());
- if (_delayed_compiled_method_load_events != nullptr) {
- for (int i = 0; i < _delayed_compiled_method_load_events->length(); i++) {
- nmethod* nm = _delayed_compiled_method_load_events->at(i);
- nm->post_compiled_method_load_event();
- }
- delete _delayed_compiled_method_load_events;
- _delayed_compiled_method_load_events = nullptr;
- }
-}
-#endif
-
// ------------------------------------------------------------------
// post_compiled_method_load_event
// new method for install_code() path
// Transfer information from compilation to jvmti
void nmethod::post_compiled_method_load_event(JvmtiThreadState* state) {
-#if INCLUDE_CDS
- if (!ServiceThread::has_started()) {
- // With AOT-linked classes, we could compile wrappers for native methods before the
- // ServiceThread has been started, so we must delay the events to be posted later.
- assert(state == nullptr, "must be");
- add_delayed_compiled_method_load_event(this);
- return;
- }
-#endif
-
// This is a bad time for a safepoint. We don't want
// this nmethod to get unloaded while we're queueing the event.
NoSafepointVerifier nsv;
@@ -4308,46 +4266,6 @@ void nmethod::print_value_on_impl(outputStream* st) const {
#endif
}
-void nmethod::print_code_snippet(outputStream* st, address addr) const {
- if (entry_point() <= addr && addr < code_end()) {
- // Pointing into the nmethod's code. Try to disassemble some instructions around addr.
- // Determine conservative start and end points.
- address start;
- if (frame_complete_offset() != CodeOffsets::frame_never_safe &&
- addr >= code_begin() + frame_complete_offset()) {
- start = code_begin() + frame_complete_offset();
- } else {
- start = (addr < verified_entry_point()) ? entry_point() : verified_entry_point();
- }
- address start_for_hex_dump = start; // We can choose a different starting point for hex dump, below.
- address end = code_end();
-
- // Try using relocations to find closer instruction start and end points.
- // (Some platforms have variable length instructions and can only
- // disassemble correctly at instruction start addresses.)
- RelocIterator iter((nmethod*)this, start);
- while (iter.next() && iter.addr() < addr) { // find relocation before addr
- // Note: There's a relocation which doesn't point to an instruction start:
- // ZBarrierRelocationFormatStoreGoodAfterMov with ZGC on x86_64
- // We could detect and skip it, but hex dump is still usable when
- // disassembler produces garbage in such a very rare case.
- start = iter.addr();
- // We want at least 64 Bytes ahead in hex dump.
- if (iter.addr() <= (addr - 64)) start_for_hex_dump = iter.addr();
- }
- if (iter.has_current()) {
- if (iter.addr() == addr) iter.next(); // find relocation after addr
- if (iter.has_current()) end = iter.addr();
- }
-
- // Always print hex. Disassembler may still have problems when hitting an incorrect instruction start.
- os::print_hex_dump(st, start_for_hex_dump, end, 1, /* print_ascii=*/false);
- if (!Disassembler::is_abstract()) {
- Disassembler::decode(start, end, st);
- }
- }
-}
-
#ifndef PRODUCT
void nmethod::print_calls(outputStream* st) {
diff --git a/src/hotspot/share/code/nmethod.hpp b/src/hotspot/share/code/nmethod.hpp
index bce0181a3ec6e..2332766a47ce2 100644
--- a/src/hotspot/share/code/nmethod.hpp
+++ b/src/hotspot/share/code/nmethod.hpp
@@ -250,7 +250,6 @@ class nmethod : public CodeBlob {
#if INCLUDE_JVMCI
int _speculations_offset;
#endif
- int _immutable_data_reference_counter_offset;
// location in frame (offset for sp) that deopt can store the original
// pc during a deopt.
@@ -647,11 +646,12 @@ class nmethod : public CodeBlob {
#if INCLUDE_JVMCI
address scopes_data_end () const { return _immutable_data + _speculations_offset ; }
address speculations_begin () const { return _immutable_data + _speculations_offset ; }
- address speculations_end () const { return _immutable_data + _immutable_data_reference_counter_offset ; }
+ address speculations_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
#else
- address scopes_data_end () const { return _immutable_data + _immutable_data_reference_counter_offset ; }
+ address scopes_data_end () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
#endif
- address immutable_data_references_counter_begin () const { return _immutable_data + _immutable_data_reference_counter_offset ; }
+
+ address immutable_data_references_counter_begin () const { return immutable_data_end() - ImmutableDataReferencesCounterSize ; }
// Sizes
int immutable_data_size() const { return _immutable_data_size; }
@@ -965,8 +965,6 @@ class nmethod : public CodeBlob {
inline int get_immutable_data_references_counter() { return *((int*)immutable_data_references_counter_begin()); }
inline void set_immutable_data_references_counter(int count) { *((int*)immutable_data_references_counter_begin()) = count; }
- static void add_delayed_compiled_method_load_event(nmethod* nm) NOT_CDS_RETURN;
-
public:
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
@@ -1001,14 +999,10 @@ class nmethod : public CodeBlob {
// Avoid hiding of parent's 'decode(outputStream*)' method.
void decode(outputStream* st) const { decode2(st); } // just delegate here.
- // AOT cache support
- static void post_delayed_compiled_method_load_events() NOT_CDS_RETURN;
-
// printing support
void print_on_impl(outputStream* st) const;
void print_code();
void print_value_on_impl(outputStream* st) const;
- void print_code_snippet(outputStream* st, address addr) const;
#if defined(SUPPORT_DATA_STRUCTS)
// print output in opt build for disassembler library
diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp
index aed1edc0db5f6..35201973dfe77 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp
@@ -215,6 +215,11 @@ void CompilerConfig::set_client_emulation_mode_flags() {
if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) {
FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K);
}
+ if (FLAG_IS_DEFAULT(MaxRAM)) {
+ // Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact
+ // heap setting done based on available phys_mem (see Arguments::set_heap_size).
+ FLAG_SET_DEFAULT(MaxRAM, 1ULL*G);
+ }
if (FLAG_IS_DEFAULT(CICompilerCount)) {
FLAG_SET_ERGO(CICompilerCount, 1);
}
@@ -548,36 +553,21 @@ bool CompilerConfig::check_args_consistency(bool status) {
return status;
}
-bool CompilerConfig::should_set_client_emulation_mode_flags() {
+void CompilerConfig::ergo_initialize() {
#if !COMPILER1_OR_COMPILER2
- return false;
+ return;
#endif
if (has_c1()) {
if (!is_compilation_mode_selected()) {
if (NeverActAsServerClassMachine) {
- return true;
+ set_client_emulation_mode_flags();
}
} else if (!has_c2() && !is_jvmci_compiler()) {
- return true;
+ set_client_emulation_mode_flags();
}
}
- return false;
-}
-
-void CompilerConfig::ergo_initialize() {
-#if !COMPILER1_OR_COMPILER2
- return;
-#endif
-
- // This property is also checked when selecting the heap size. Since client
- // emulation mode influences Java heap memory usage, part of the logic must
- // occur before choosing the heap size.
- if (should_set_client_emulation_mode_flags()) {
- set_client_emulation_mode_flags();
- }
-
set_legacy_emulation_flags();
set_compilation_policy_flags();
diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp
index a9b052ff78231..1c8c65b2a533d 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.hpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.hpp
@@ -151,8 +151,6 @@ class CompilerConfig : public AllStatic {
inline static CompilerType compiler_type();
- static bool should_set_client_emulation_mode_flags();
-
private:
static bool is_compilation_mode_selected();
static void set_compilation_policy_flags();
diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp
index 23bb754f43244..868ae8bfa41f4 100644
--- a/src/hotspot/share/compiler/compilerOracle.cpp
+++ b/src/hotspot/share/compiler/compilerOracle.cpp
@@ -617,44 +617,18 @@ static void usage() {
tty->cr();
print_commands();
tty->cr();
- tty->print_cr("The has the format '.'.");
- tty->cr();
- tty->print_cr("For example, the ");
- tty->cr();
- tty->print_cr(" package/Class.method(Lpackage/Parameter;)Lpackage/Return;");
- tty->cr();
- tty->print_cr("matches the 'method' in 'package/Class' with ");
- tty->print_cr("'(Lpackage/Parameter;)Lpackage/Return;'");
+ tty->print_cr("Method patterns has the format:");
+ tty->print_cr(" package/Class.method()");
tty->cr();
tty->print_cr("For backward compatibility this form is also allowed:");
+ tty->print_cr(" package.Class::method()");
tty->cr();
- tty->print_cr(" package.Class::method(Lpackage.Parameter;)Lpackage.Return;");
- tty->cr();
- tty->print_cr("A whitespace or comma can optionally separate the from the");
- tty->print_cr(":");
- tty->cr();
- tty->print_cr(" package/Class.method (Lpackage/Parameter;)Lpackage/Return;");
- tty->print_cr(" package/Class.method,(Lpackage/Parameter;)Lpackage/Return;");
- tty->cr();
- tty->print_cr("The and accept leading and trailing '*' wildcards");
- tty->print_cr("matching:");
- tty->cr();
- tty->print_cr(" *ackage/Clas*.*etho*(Lpackage/Parameter;)Lpackage/Return;");
- tty->cr();
- tty->print_cr("The does not support explicit wildcards and");
- tty->print_cr("always has an implicit trailing wildcard. Therefore,");
- tty->cr();
- tty->print_cr(" package/Class.method(Lpackage/Parameter;)Lpackage/Return;");
- tty->cr();
- tty->print_cr("matches a subset of");
- tty->cr();
- tty->print_cr(" package/Class.method(Lpackage/Parameter;)");
- tty->cr();
- tty->print_cr("which matches a subset of");
- tty->cr();
- tty->print_cr(" package/Class.method");
+ tty->print_cr("The signature can be separated by an optional whitespace or comma:");
+ tty->print_cr(" package/Class.method ()");
tty->cr();
- tty->print_cr("which matches all possible descriptors.");
+ tty->print_cr("The class and method identifier can be used together with leading or");
+ tty->print_cr("trailing *'s for wildcard matching:");
+ tty->print_cr(" *ackage/Clas*.*etho*()");
tty->cr();
tty->print_cr("It is possible to use more than one CompileCommand on the command line:");
tty->print_cr(" -XX:CompileCommand=exclude,java/*.* -XX:CompileCommand=log,java*.*");
diff --git a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
index f3d411e34ba51..16cae714cb976 100644
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
+++ b/src/hotspot/share/gc/epsilon/epsilonHeap.cpp
@@ -52,7 +52,7 @@ jint EpsilonHeap::initialize() {
initialize_reserved_region(heap_rs);
_space = new ContiguousSpace();
- _space->initialize(committed_region, /* clear_space = */ true);
+ _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
// Precompute hot fields
_max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.cpp b/src/hotspot/share/gc/g1/g1BarrierSet.cpp
index 622651ce0d8d1..ab7d6febf4cf7 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp
@@ -111,7 +111,7 @@ void G1BarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_u
}
}
-void G1BarrierSet::write_region(MemRegion mr) {
+void G1BarrierSet::write_region(JavaThread* thread, MemRegion mr) {
if (mr.is_empty()) {
return;
}
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
index 58a70ed6a6076..20642cfc7e6c1 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp
@@ -84,6 +84,10 @@ class G1BarrierSet: public CardTableBarrierSet {
// Update the given thread's card table (byte map) base to the current card table's.
void update_card_table_base(Thread* thread);
+ virtual bool card_mark_must_follow_store() const {
+ return true;
+ }
+
// Add "pre_val" to a set of objects that may have been disconnected from the
// pre-marking object graph. Prefer the version that takes location, as it
// can avoid touching the heap unnecessarily.
@@ -99,7 +103,8 @@ class G1BarrierSet: public CardTableBarrierSet {
template
void write_ref_field_pre(T* field);
- virtual void write_region(MemRegion mr);
+ inline void write_region(MemRegion mr);
+ void write_region(JavaThread* thread, MemRegion mr);
template
void write_ref_field_post(T* field);
diff --git a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
index ffba561f11f3d..0888fc589375a 100644
--- a/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.inline.hpp
@@ -68,6 +68,10 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
enqueue(field);
}
+inline void G1BarrierSet::write_region(MemRegion mr) {
+ write_region(JavaThread::current(), mr);
+}
+
template
inline void G1BarrierSet::write_ref_field_post(T* field) {
volatile CardValue* byte = _card_table->byte_for(field);
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index d3e02df3e09b6..c1b18a71cfbfd 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -403,25 +403,21 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size,
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(requested_size), "we do not allow humongous TLABs");
- // Do not allow a GC because we are allocating a new TLAB to avoid an issue
- // with UseGCOverheadLimit: although this GC would return null if the overhead
- // limit would be exceeded, but it would likely free at least some space.
- // So the subsequent outside-TLAB allocation could be successful anyway and
- // the indication that the overhead limit had been exceeded swallowed.
- return attempt_allocation(min_size, requested_size, actual_size, false /* allow_gc */);
+ return attempt_allocation(min_size, requested_size, actual_size);
}
-HeapWord* G1CollectedHeap::mem_allocate(size_t word_size) {
+HeapWord*
+G1CollectedHeap::mem_allocate(size_t word_size) {
assert_heap_not_locked_and_not_at_safepoint();
if (is_humongous(word_size)) {
return attempt_allocation_humongous(word_size);
}
size_t dummy = 0;
- return attempt_allocation(word_size, word_size, &dummy, true /* allow_gc */);
+ return attempt_allocation(word_size, word_size, &dummy);
}
-HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_size, bool allow_gc) {
+HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
// Make sure you read the note in attempt_allocation_humongous().
@@ -448,8 +444,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_
result = _allocator->attempt_allocation_locked(node_index, word_size);
if (result != nullptr) {
return result;
- } else if (!allow_gc) {
- return nullptr;
}
// Read the GC count while still holding the Heap_lock.
@@ -618,8 +612,7 @@ void G1CollectedHeap::dealloc_archive_regions(MemRegion range) {
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- bool allow_gc) {
+ size_t* actual_word_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
@@ -631,7 +624,7 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
if (result == nullptr) {
*actual_word_size = desired_word_size;
- result = attempt_allocation_slow(node_index, desired_word_size, allow_gc);
+ result = attempt_allocation_slow(node_index, desired_word_size);
}
assert_heap_not_locked();
@@ -1398,6 +1391,7 @@ jint G1CollectedHeap::initialize() {
G1CardTable* refinement_table = new G1CardTable(_reserved);
G1BarrierSet* bs = new G1BarrierSet(card_table, refinement_table);
+ bs->initialize();
assert(bs->is_a(BarrierSet::G1BarrierSet), "sanity");
// Create space mappers.
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 0d354525d89da..7e3f8a3028568 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -439,14 +439,18 @@ class G1CollectedHeap : public CollectedHeap {
//
// * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If
- // this fails, (only) mem_allocate() will attempt to do an evacuation
- // pause and retry the allocation. Allocate_new_tlab() will return null,
- // deferring to the following mem_allocate().
+ // this fails, they will attempt to do an evacuation pause and
+ // retry the allocation.
+ //
+ // * If all allocation attempts fail, even after trying to schedule
+ // an evacuation pause, allocate_new_tlab() will return null,
+ // whereas mem_allocate() will attempt a heap expansion and/or
+ // schedule a Full GC.
//
// * We do not allow humongous-sized TLABs. So, allocate_new_tlab
// should never be called with word_size being humongous. All
// humongous allocation requests should go to mem_allocate() which
- // will satisfy them in a special path.
+ // will satisfy them with a special path.
HeapWord* allocate_new_tlab(size_t min_size,
size_t requested_size,
@@ -459,13 +463,12 @@ class G1CollectedHeap : public CollectedHeap {
// should only be used for non-humongous allocations.
inline HeapWord* attempt_allocation(size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- bool allow_gc);
+ size_t* actual_word_size);
+
// Second-level mutator allocation attempt: take the Heap_lock and
// retry the allocation attempt, potentially scheduling a GC
- // pause if allow_gc is set. This should only be used for non-humongous
- // allocations.
- HeapWord* attempt_allocation_slow(uint node_index, size_t word_size, bool allow_gc);
+ // pause. This should only be used for non-humongous allocations.
+ HeapWord* attempt_allocation_slow(uint node_index, size_t word_size);
// Takes the Heap_lock and attempts a humongous allocation. It can
// potentially schedule a GC pause.
diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
index 36412ce5efe95..df4312ebd7554 100644
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.cpp
@@ -37,11 +37,21 @@
#include "runtime/threadSMR.hpp"
#include "utilities/align.hpp"
-MutableNUMASpace::MutableNUMASpace(size_t page_size) : MutableSpace(page_size) {
+MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) {
_lgrp_spaces = new (mtGC) GrowableArray(0, mtGC);
+ _page_size = os::vm_page_size();
_adaptation_cycles = 0;
_samples_count = 0;
+#ifdef LINUX
+ // Changing the page size can lead to freeing of memory. When using large pages
+ // and the memory has been both reserved and committed, Linux does not support
+ // freeing parts of it.
+ if (UseLargePages && !os::can_commit_large_page_memory()) {
+ _must_use_large_pages = true;
+ }
+#endif // LINUX
+
size_t lgrp_limit = os::numa_get_groups_num();
uint *lgrp_ids = NEW_C_HEAP_ARRAY(uint, lgrp_limit, mtGC);
size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
@@ -50,7 +60,7 @@ MutableNUMASpace::MutableNUMASpace(size_t page_size) : MutableSpace(page_size) {
lgrp_spaces()->reserve(checked_cast(lgrp_num));
// Add new spaces for the new nodes
for (size_t i = 0; i < lgrp_num; i++) {
- lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], page_size));
+ lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment));
}
FREE_C_HEAP_ARRAY(uint, lgrp_ids);
@@ -118,10 +128,7 @@ MutableNUMASpace::LGRPSpace *MutableNUMASpace::lgrp_space_for_thread(Thread* thr
return space->lgrp_id() == (uint)lgrp_id;
});
- if (lgrp_spaces_index == -1) {
- // Running on a CPU with no memory; pick another CPU based on %.
- lgrp_spaces_index = lgrp_id % lgrp_spaces()->length();
- }
+ assert(lgrp_spaces_index != -1, "must have created spaces for all lgrp_ids");
return lgrp_spaces()->at(lgrp_spaces_index);
}
@@ -139,19 +146,22 @@ size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
// Bias region towards the first-touching lgrp. Set the right page sizes.
void MutableNUMASpace::bias_region(MemRegion mr, uint lgrp_id) {
- assert(is_aligned(mr.start(), page_size()), "precondition");
- assert(is_aligned(mr.end(), page_size()), "precondition");
-
- if (mr.is_empty()) {
- return;
+ HeapWord *start = align_up(mr.start(), page_size());
+ HeapWord *end = align_down(mr.end(), page_size());
+ if (end > start) {
+ MemRegion aligned_region(start, end);
+ assert((intptr_t)aligned_region.start() % page_size() == 0 &&
+ (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
+ assert(region().contains(aligned_region), "Sanity");
+ // First we tell the OS which page size we want in the given range. The underlying
+ // large page can be broken down if we require small pages.
+ const size_t os_align = UseLargePages ? page_size() : os::vm_page_size();
+ os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), os_align);
+ // Then we uncommit the pages in the range.
+ os::disclaim_memory((char*)aligned_region.start(), aligned_region.byte_size());
+ // And make them local/first-touch biased.
+ os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), checked_cast(lgrp_id));
}
- // First we tell the OS which page size we want in the given range. The underlying
- // large page can be broken down if we require small pages.
- os::realign_memory((char*) mr.start(), mr.byte_size(), page_size());
- // Then we uncommit the pages in the range.
- os::disclaim_memory((char*) mr.start(), mr.byte_size());
- // And make them local/first-touch biased.
- os::numa_make_local((char*)mr.start(), mr.byte_size(), checked_cast(lgrp_id));
}
// Update space layout. Perform adaptation.
@@ -200,15 +210,14 @@ size_t MutableNUMASpace::current_chunk_size(int i) {
// Return the default chunk size by equally diving the space.
// page_size() aligned.
size_t MutableNUMASpace::default_chunk_size() {
- // The number of pages may not be evenly divided.
- return align_down(capacity_in_bytes() / lgrp_spaces()->length(), page_size());
+ return base_space_size() / lgrp_spaces()->length() * page_size();
}
// Produce a new chunk size. page_size() aligned.
// This function is expected to be called on sequence of i's from 0 to
// lgrp_spaces()->length().
size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
- size_t pages_available = capacity_in_bytes() / page_size();
+ size_t pages_available = base_space_size();
for (int j = 0; j < i; j++) {
pages_available -= align_down(current_chunk_size(j), page_size()) / page_size();
}
@@ -254,13 +263,20 @@ size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
// |----bottom_region--|---intersection---|------top_region------|
void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
MemRegion* bottom_region, MemRegion *top_region) {
- assert(is_aligned(new_region.start(), page_size()), "precondition");
- assert(is_aligned(new_region.end(), page_size()), "precondition");
- assert(is_aligned(intersection.start(), page_size()), "precondition");
- assert(is_aligned(intersection.end(), page_size()), "precondition");
-
// Is there bottom?
if (new_region.start() < intersection.start()) { // Yes
+ // Try to coalesce small pages into a large one.
+ if (UseLargePages && page_size() >= alignment()) {
+ HeapWord* p = align_up(intersection.start(), alignment());
+ if (new_region.contains(p)
+ && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
+ if (intersection.contains(p)) {
+ intersection = MemRegion(p, intersection.end());
+ } else {
+ intersection = MemRegion(p, p);
+ }
+ }
+ }
*bottom_region = MemRegion(new_region.start(), intersection.start());
} else {
*bottom_region = MemRegion();
@@ -268,6 +284,18 @@ void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection
// Is there top?
if (intersection.end() < new_region.end()) { // Yes
+ // Try to coalesce small pages into a large one.
+ if (UseLargePages && page_size() >= alignment()) {
+ HeapWord* p = align_down(intersection.end(), alignment());
+ if (new_region.contains(p)
+ && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
+ if (intersection.contains(p)) {
+ intersection = MemRegion(intersection.start(), p);
+ } else {
+ intersection = MemRegion(p, p);
+ }
+ }
+ }
*top_region = MemRegion(intersection.end(), new_region.end());
} else {
*top_region = MemRegion();
@@ -281,8 +309,6 @@ void MutableNUMASpace::initialize(MemRegion mr,
WorkerThreads* pretouch_workers) {
assert(clear_space, "Reallocation will destroy data!");
assert(lgrp_spaces()->length() > 0, "There should be at least one space");
- assert(is_aligned(mr.start(), page_size()), "precondition");
- assert(is_aligned(mr.end(), page_size()), "precondition");
MemRegion old_region = region(), new_region;
set_bottom(mr.start());
@@ -290,22 +316,37 @@ void MutableNUMASpace::initialize(MemRegion mr,
// Must always clear the space
clear(SpaceDecorator::DontMangle);
- size_t num_pages = mr.byte_size() / page_size();
-
- if (num_pages < (size_t)lgrp_spaces()->length()) {
- log_warning(gc)("Degraded NUMA config: #os-pages (%zu) < #CPU (%d); space-size: %zu, page-size: %zu",
- num_pages, lgrp_spaces()->length(), mr.byte_size(), page_size());
-
- // Keep only the first few CPUs.
- lgrp_spaces()->trunc_to((int)num_pages);
+ // Compute chunk sizes
+ size_t prev_page_size = page_size();
+ set_page_size(alignment());
+ HeapWord* rounded_bottom = align_up(bottom(), page_size());
+ HeapWord* rounded_end = align_down(end(), page_size());
+ size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
+
+ // Try small pages if the chunk size is too small
+ if (base_space_size_pages / lgrp_spaces()->length() == 0
+ && page_size() > os::vm_page_size()) {
+ // Changing the page size below can lead to freeing of memory. So we fail initialization.
+ if (_must_use_large_pages) {
+ vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size");
+ }
+ set_page_size(os::vm_page_size());
+ rounded_bottom = align_up(bottom(), page_size());
+ rounded_end = align_down(end(), page_size());
+ base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
}
+ guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
+ set_base_space_size(base_space_size_pages);
// Handle space resize
MemRegion top_region, bottom_region;
if (!old_region.equals(region())) {
- new_region = mr;
+ new_region = MemRegion(rounded_bottom, rounded_end);
MemRegion intersection = new_region.intersection(old_region);
- if (intersection.is_empty()) {
+ if (intersection.start() == nullptr ||
+ intersection.end() == nullptr ||
+ prev_page_size > page_size()) { // If the page size got smaller we have to change
+ // the page size preference for the whole space.
intersection = MemRegion(new_region.start(), new_region.start());
}
select_tails(new_region, intersection, &bottom_region, &top_region);
@@ -352,18 +393,19 @@ void MutableNUMASpace::initialize(MemRegion mr,
if (i == 0) { // Bottom chunk
if (i != lgrp_spaces()->length() - 1) {
- new_region = MemRegion(bottom(), chunk_byte_size >> LogHeapWordSize);
+ new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
} else {
new_region = MemRegion(bottom(), end());
}
- } else if (i < lgrp_spaces()->length() - 1) { // Middle chunks
- MutableSpace* ps = lgrp_spaces()->at(i - 1)->space();
- new_region = MemRegion(ps->end(),
- chunk_byte_size >> LogHeapWordSize);
- } else { // Top chunk
- MutableSpace* ps = lgrp_spaces()->at(i - 1)->space();
- new_region = MemRegion(ps->end(), end());
- }
+ } else
+ if (i < lgrp_spaces()->length() - 1) { // Middle chunks
+ MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
+ new_region = MemRegion(ps->end(),
+ ps->end() + (chunk_byte_size >> LogHeapWordSize));
+ } else { // Top chunk
+ MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
+ new_region = MemRegion(ps->end(), end());
+ }
guarantee(region().contains(new_region), "Region invariant");
@@ -390,8 +432,9 @@ void MutableNUMASpace::initialize(MemRegion mr,
// Clear space (set top = bottom) but never mangle.
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
+
+ set_adaptation_cycles(samples_count());
}
- set_adaptation_cycles(samples_count());
}
// Set the top of the whole space.
diff --git a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp
index 0285037659268..dc37b10292ad6 100644
--- a/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp
+++ b/src/hotspot/share/gc/parallel/mutableNUMASpace.hpp
@@ -80,8 +80,8 @@ class MutableNUMASpace : public MutableSpace {
SpaceStats _space_stats;
public:
- LGRPSpace(uint l, size_t page_size) : _lgrp_id(l), _allocation_failed(false) {
- _space = new MutableSpace(page_size);
+ LGRPSpace(uint l, size_t alignment) : _lgrp_id(l), _allocation_failed(false) {
+ _space = new MutableSpace(alignment);
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
}
~LGRPSpace() {
@@ -117,14 +117,24 @@ class MutableNUMASpace : public MutableSpace {
};
GrowableArray* _lgrp_spaces;
+ size_t _page_size;
unsigned _adaptation_cycles, _samples_count;
+ bool _must_use_large_pages;
+
+ void set_page_size(size_t psz) { _page_size = psz; }
+ size_t page_size() const { return _page_size; }
+
unsigned adaptation_cycles() { return _adaptation_cycles; }
void set_adaptation_cycles(int v) { _adaptation_cycles = v; }
unsigned samples_count() { return _samples_count; }
void increment_samples_count() { ++_samples_count; }
+ size_t _base_space_size;
+ void set_base_space_size(size_t v) { _base_space_size = v; }
+ size_t base_space_size() const { return _base_space_size; }
+
// Bias region towards the lgrp.
void bias_region(MemRegion mr, uint lgrp_id);
@@ -144,7 +154,7 @@ class MutableNUMASpace : public MutableSpace {
public:
GrowableArray* lgrp_spaces() const { return _lgrp_spaces; }
- MutableNUMASpace(size_t page_size);
+ MutableNUMASpace(size_t alignment);
virtual ~MutableNUMASpace();
// Space initialization.
virtual void initialize(MemRegion mr,
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.cpp b/src/hotspot/share/gc/parallel/mutableSpace.cpp
index a8f47a387e35b..71fddf2c4dad9 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.cpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.cpp
@@ -34,26 +34,30 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
-MutableSpace::MutableSpace(size_t page_size) :
+MutableSpace::MutableSpace(size_t alignment) :
_last_setup_region(),
- _page_size(page_size),
+ _alignment(alignment),
_bottom(nullptr),
_top(nullptr),
- _end(nullptr) {}
-
-void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
- assert(is_aligned(mr.start(), page_size()), "precondition");
- assert(is_aligned(mr.end(), page_size()), "precondition");
-
- if (mr.is_empty()) {
- return;
- }
+ _end(nullptr)
+{
+ assert(MutableSpace::alignment() % os::vm_page_size() == 0,
+ "Space should be aligned");
+}
- if (clear_space) {
- // Prefer page reallocation to migration.
- os::disclaim_memory((char*) mr.start(), mr.byte_size());
+void MutableSpace::numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space) {
+ if (!mr.is_empty()) {
+ HeapWord *start = align_up(mr.start(), page_size);
+ HeapWord *end = align_down(mr.end(), page_size);
+ if (end > start) {
+ size_t size = pointer_delta(end, start, sizeof(char));
+ if (clear_space) {
+ // Prefer page reallocation to migration.
+ os::disclaim_memory((char*)start, size);
+ }
+ os::numa_make_global((char*)start, size);
+ }
}
- os::numa_make_global((char*) mr.start(), mr.byte_size());
}
void MutableSpace::initialize(MemRegion mr,
@@ -101,17 +105,20 @@ void MutableSpace::initialize(MemRegion mr,
}
assert(mr.contains(head) && mr.contains(tail), "Sanity");
+ size_t page_size = alignment();
+
if (UseNUMA) {
- numa_setup_pages(head, clear_space);
- numa_setup_pages(tail, clear_space);
+ numa_setup_pages(head, page_size, clear_space);
+ numa_setup_pages(tail, page_size, clear_space);
}
if (AlwaysPreTouch) {
+ size_t pretouch_page_size = UseLargePages ? page_size : os::vm_page_size();
PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
- page_size(), pretouch_workers);
+ pretouch_page_size, pretouch_workers);
PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
- page_size(), pretouch_workers);
+ pretouch_page_size, pretouch_workers);
}
// Remember where we stopped so that we can continue later.
diff --git a/src/hotspot/share/gc/parallel/mutableSpace.hpp b/src/hotspot/share/gc/parallel/mutableSpace.hpp
index 785bfe272287a..d09a2b2df89b7 100644
--- a/src/hotspot/share/gc/parallel/mutableSpace.hpp
+++ b/src/hotspot/share/gc/parallel/mutableSpace.hpp
@@ -51,20 +51,17 @@ class MutableSpace: public CHeapObj {
// The last region which page had been setup to be interleaved.
MemRegion _last_setup_region;
- size_t _page_size;
+ size_t _alignment;
HeapWord* _bottom;
HeapWord* volatile _top;
HeapWord* _end;
- void numa_setup_pages(MemRegion mr, bool clear_space);
+ void numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space);
void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; }
MemRegion last_setup_region() const { return _last_setup_region; }
-protected:
- size_t page_size() const { return _page_size; }
-
-public:
+ public:
virtual ~MutableSpace() = default;
MutableSpace(size_t page_size);
@@ -80,6 +77,8 @@ class MutableSpace: public CHeapObj {
HeapWord* volatile* top_addr() { return &_top; }
HeapWord** end_addr() { return &_end; }
+ size_t alignment() { return _alignment; }
+
MemRegion region() const { return MemRegion(bottom(), end()); }
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }
diff --git a/src/hotspot/share/gc/parallel/objectStartArray.cpp b/src/hotspot/share/gc/parallel/objectStartArray.cpp
index 255ee0c56ec42..d120c71d2fa2c 100644
--- a/src/hotspot/share/gc/parallel/objectStartArray.cpp
+++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp
@@ -47,10 +47,7 @@ ObjectStartArray::ObjectStartArray(MemRegion covered_region)
// Do not use large-pages for the backing store. The one large page region
// will be used for the heap proper.
- ReservedSpace backing_store = MemoryReserver::reserve(bytes_to_reserve,
- os::vm_allocation_granularity(),
- os::vm_page_size(),
- mtGC);
+ ReservedSpace backing_store = MemoryReserver::reserve(bytes_to_reserve, mtGC);
if (!backing_store.is_reserved()) {
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
}
diff --git a/src/hotspot/share/gc/parallel/parallelArguments.cpp b/src/hotspot/share/gc/parallel/parallelArguments.cpp
index 2d267951f797a..780185952b4cb 100644
--- a/src/hotspot/share/gc/parallel/parallelArguments.cpp
+++ b/src/hotspot/share/gc/parallel/parallelArguments.cpp
@@ -103,10 +103,15 @@ void ParallelArguments::initialize() {
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}
+// The alignment used for spaces in young gen and old gen
+static size_t default_space_alignment() {
+ return 64 * K * HeapWordSize;
+}
+
void ParallelArguments::initialize_alignments() {
// Initialize card size before initializing alignments
CardTable::initialize_card_size();
- SpaceAlignment = ParallelScavengeHeap::default_space_alignment();
+ SpaceAlignment = default_space_alignment();
HeapAlignment = compute_heap_alignment();
}
@@ -118,23 +123,12 @@ void ParallelArguments::initialize_heap_flags_and_sizes_one_pass() {
void ParallelArguments::initialize_heap_flags_and_sizes() {
initialize_heap_flags_and_sizes_one_pass();
- if (!UseLargePages) {
- ParallelScavengeHeap::set_desired_page_size(os::vm_page_size());
- return;
- }
-
- // If using large-page, need to update SpaceAlignment so that spaces are page-size aligned.
const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
const size_t page_sz = os::page_size_for_region_aligned(MinHeapSize, min_pages);
- ParallelScavengeHeap::set_desired_page_size(page_sz);
-
- if (page_sz == os::vm_page_size()) {
- log_warning(gc, heap)("MinHeapSize (%zu) must be large enough for 4 * page-size; Disabling UseLargePages for heap", MinHeapSize);
- return;
- }
- // Space is largepage-aligned.
- size_t new_alignment = page_sz;
+ // Can a page size be something else than a power of two?
+ assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
+ size_t new_alignment = align_up(page_sz, SpaceAlignment);
if (new_alignment != SpaceAlignment) {
SpaceAlignment = new_alignment;
// Redo everything from the start
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index eef9dfbc97c24..18cbe2403d80e 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -61,18 +61,11 @@ PSYoungGen* ParallelScavengeHeap::_young_gen = nullptr;
PSOldGen* ParallelScavengeHeap::_old_gen = nullptr;
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
GCPolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
-size_t ParallelScavengeHeap::_desired_page_size = 0;
jint ParallelScavengeHeap::initialize() {
const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
- assert(_desired_page_size != 0, "Should be initialized");
- ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment, _desired_page_size);
- // Adjust SpaceAlignment based on actually used large page size.
- if (UseLargePages) {
- SpaceAlignment = MAX2(heap_rs.page_size(), default_space_alignment());
- }
- assert(is_aligned(SpaceAlignment, heap_rs.page_size()), "inv");
+ ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
@@ -86,6 +79,7 @@ jint ParallelScavengeHeap::initialize() {
card_table->initialize(old_rs.base(), young_rs.base());
CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
+ barrier_set->initialize();
BarrierSet::set_barrier_set(barrier_set);
// Set up WorkerThreads
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index 84732a86880d5..bf777bda29e04 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -76,9 +76,6 @@ class ParallelScavengeHeap : public CollectedHeap {
static PSAdaptiveSizePolicy* _size_policy;
static GCPolicyCounters* _gc_policy_counters;
- // At startup, calculate the desired OS page-size based on heap size and large-page flags.
- static size_t _desired_page_size;
-
GCMemoryManager* _young_manager;
GCMemoryManager* _old_manager;
@@ -131,18 +128,6 @@ class ParallelScavengeHeap : public CollectedHeap {
_gc_overhead_counter(0),
_is_heap_almost_full(false) {}
- // The alignment used for spaces in young gen and old gen
- constexpr static size_t default_space_alignment() {
- constexpr size_t alignment = 64 * K * HeapWordSize;
- static_assert(is_power_of_2(alignment), "inv");
- return alignment;
- }
-
- static void set_desired_page_size(size_t page_size) {
- assert(is_power_of_2(page_size), "precondition");
- _desired_page_size = page_size;
- }
-
Name kind() const override {
return CollectedHeap::Parallel;
}
diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.hpp
index 0a404579da832..b013238a9f8cd 100644
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp
@@ -120,6 +120,7 @@ class ParCompactionManager : public CHeapObj {
static RegionTaskQueueSet* region_task_queues() { return _region_task_queues; }
inline PSMarkTaskQueue* marking_stack() { return &_marking_stack; }
+ inline void push(PartialArrayState* stat);
void push_objArray(oop obj);
// To collect per-region live-words in a worker local cache in order to
@@ -188,6 +189,7 @@ class ParCompactionManager : public CHeapObj {
ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
// Save for later processing. Must not fail.
+ inline void push(oop obj);
inline void push_region(size_t index);
// Check mark and maybe push on marking stack.
diff --git a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
index 663cd83be9c75..2c0b8480726ab 100644
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp
@@ -56,6 +56,14 @@ inline bool ParCompactionManager::steal(int queue_num, size_t& region) {
return region_task_queues()->steal(queue_num, region);
}
+inline void ParCompactionManager::push(oop obj) {
+ marking_stack()->push(ScannerTask(obj));
+}
+
+inline void ParCompactionManager::push(PartialArrayState* stat) {
+ marking_stack()->push(ScannerTask(stat));
+}
+
void ParCompactionManager::push_region(size_t index)
{
#ifdef ASSERT
@@ -70,26 +78,24 @@ void ParCompactionManager::push_region(size_t index)
template
inline void ParCompactionManager::mark_and_push(T* p) {
T heap_oop = RawAccess<>::oop_load(p);
- if (CompressedOops::is_null(heap_oop)) {
- return;
- }
-
- oop obj = CompressedOops::decode_not_null(heap_oop);
- if (!mark_bitmap()->mark_obj(obj)) {
- // Marked by another worker.
- return;
- }
-
- if (StringDedup::is_enabled() &&
- java_lang_String::is_instance(obj) &&
- psStringDedup::is_candidate_from_mark(obj)) {
- _string_dedup_requests.add(obj);
+ if (!CompressedOops::is_null(heap_oop)) {
+ oop obj = CompressedOops::decode_not_null(heap_oop);
+ assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
+
+ if (mark_bitmap()->mark_obj(obj)) {
+ if (StringDedup::is_enabled() &&
+ java_lang_String::is_instance(obj) &&
+ psStringDedup::is_candidate_from_mark(obj)) {
+ _string_dedup_requests.add(obj);
+ }
+
+ ContinuationGCSupport::transform_stack_chunk(obj);
+
+ assert(_marking_stats_cache != nullptr, "inv");
+ _marking_stats_cache->push(obj, obj->size());
+ push(obj);
+ }
}
-
- ContinuationGCSupport::transform_stack_chunk(obj);
-
- _marking_stats_cache->push(obj, obj->size());
- marking_stack()->push(ScannerTask(obj));
}
inline void ParCompactionManager::FollowStackClosure::do_void() {
diff --git a/src/hotspot/share/gc/parallel/psOldGen.cpp b/src/hotspot/share/gc/parallel/psOldGen.cpp
index 2d4b0698ad0c8..89f22b72b6995 100644
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp
@@ -96,7 +96,7 @@ void PSOldGen::initialize_work() {
// ObjectSpace stuff
//
- _object_space = new MutableSpace(virtual_space()->page_size());
+ _object_space = new MutableSpace(virtual_space()->alignment());
object_space()->initialize(committed_mr,
SpaceDecorator::Clear,
SpaceDecorator::Mangle,
diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.cpp b/src/hotspot/share/gc/parallel/psVirtualspace.cpp
index f4b24fa51af77..3be90b370d186 100644
--- a/src/hotspot/share/gc/parallel/psVirtualspace.cpp
+++ b/src/hotspot/share/gc/parallel/psVirtualspace.cpp
@@ -29,8 +29,8 @@
#include "utilities/align.hpp"
PSVirtualSpace::PSVirtualSpace(ReservedSpace rs, size_t alignment) :
- _alignment(alignment),
- _page_size(rs.page_size()) {
+ _alignment(alignment)
+{
set_reserved(rs);
set_committed(reserved_low_addr(), reserved_low_addr());
DEBUG_ONLY(verify());
@@ -88,8 +88,7 @@ bool PSVirtualSpace::shrink_by(size_t bytes) {
#ifndef PRODUCT
void PSVirtualSpace::verify() const {
- assert(is_aligned(_page_size, os::vm_page_size()), "bad alignment");
- assert(is_aligned(_alignment, _page_size), "inv");
+ assert(is_aligned(_alignment, os::vm_page_size()), "bad alignment");
assert(is_aligned(reserved_low_addr(), _alignment), "bad reserved_low_addr");
assert(is_aligned(reserved_high_addr(), _alignment), "bad reserved_high_addr");
assert(is_aligned(committed_low_addr(), _alignment), "bad committed_low_addr");
diff --git a/src/hotspot/share/gc/parallel/psVirtualspace.hpp b/src/hotspot/share/gc/parallel/psVirtualspace.hpp
index ca94f4d83b65f..a54a513a11753 100644
--- a/src/hotspot/share/gc/parallel/psVirtualspace.hpp
+++ b/src/hotspot/share/gc/parallel/psVirtualspace.hpp
@@ -41,9 +41,6 @@ class PSVirtualSpace : public CHeapObj {
// ReservedSpace passed to initialize() must be aligned to this value.
const size_t _alignment;
- // OS page size used. If using Transparent Huge Pages, it's the desired large page-size.
- const size_t _page_size;
-
// Reserved area
char* _reserved_low_addr;
char* _reserved_high_addr;
@@ -71,7 +68,6 @@ class PSVirtualSpace : public CHeapObj {
// Accessors (all sizes are bytes).
size_t alignment() const { return _alignment; }
- size_t page_size() const { return _page_size; }
char* reserved_low_addr() const { return _reserved_low_addr; }
char* reserved_high_addr() const { return _reserved_high_addr; }
char* committed_low_addr() const { return _committed_low_addr; }
diff --git a/src/hotspot/share/gc/parallel/psYoungGen.cpp b/src/hotspot/share/gc/parallel/psYoungGen.cpp
index b2cce11398d91..c26fdf4740caa 100644
--- a/src/hotspot/share/gc/parallel/psYoungGen.cpp
+++ b/src/hotspot/share/gc/parallel/psYoungGen.cpp
@@ -83,12 +83,12 @@ void PSYoungGen::initialize_work() {
}
if (UseNUMA) {
- _eden_space = new MutableNUMASpace(virtual_space()->page_size());
+ _eden_space = new MutableNUMASpace(virtual_space()->alignment());
} else {
- _eden_space = new MutableSpace(virtual_space()->page_size());
+ _eden_space = new MutableSpace(virtual_space()->alignment());
}
- _from_space = new MutableSpace(virtual_space()->page_size());
- _to_space = new MutableSpace(virtual_space()->page_size());
+ _from_space = new MutableSpace(virtual_space()->alignment());
+ _to_space = new MutableSpace(virtual_space()->alignment());
// Generation Counters - generation 0, 3 subspaces
_gen_counters = new GenerationCounters("new", 0, 3, min_gen_size(),
diff --git a/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp b/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp
index f5e7375fca1dd..fa019aa5b4295 100644
--- a/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp
+++ b/src/hotspot/share/gc/parallel/vmStructs_parallelgc.hpp
@@ -40,7 +40,6 @@
/* Parallel GC fields */ \
/**********************/ \
nonstatic_field(PSVirtualSpace, _alignment, const size_t) \
- nonstatic_field(PSVirtualSpace, _page_size, const size_t) \
nonstatic_field(PSVirtualSpace, _reserved_low_addr, char*) \
nonstatic_field(PSVirtualSpace, _reserved_high_addr, char*) \
nonstatic_field(PSVirtualSpace, _committed_low_addr, char*) \
diff --git a/src/hotspot/share/gc/serial/defNewGeneration.cpp b/src/hotspot/share/gc/serial/defNewGeneration.cpp
index 413d80bebf4ec..5b7bc7442362e 100644
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp
@@ -225,12 +225,16 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
_promo_failure_drain_in_progress(false),
_string_dedup_requests()
{
+ MemRegion cmr((HeapWord*)_virtual_space.low(),
+ (HeapWord*)_virtual_space.high());
+ SerialHeap* gch = SerialHeap::heap();
+
+ gch->rem_set()->resize_covered_region(cmr);
+
_eden_space = new ContiguousSpace();
_from_space = new ContiguousSpace();
_to_space = new ContiguousSpace();
- init_spaces();
-
// Compute the maximum eden and survivor space sizes. These sizes
// are computed assuming the entire reserved space is committed.
// These values are exported as performance counters.
@@ -252,6 +256,7 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
_to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
_gen_counters);
+ compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
update_counters();
_old_gen = nullptr;
_tenuring_threshold = MaxTenuringThreshold;
@@ -263,51 +268,74 @@ DefNewGeneration::DefNewGeneration(ReservedSpace rs,
_gc_tracer = new DefNewTracer();
}
-void DefNewGeneration::init_spaces() {
- // Using layout: from, to, eden, so only from can be non-empty.
- assert(eden()->is_empty(), "precondition");
- assert(to()->is_empty(), "precondition");
-
- if (!from()->is_empty()) {
- assert((char*) from()->bottom() == _virtual_space.low(), "inv");
- }
+void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
+ bool clear_space,
+ bool mangle_space) {
+ // If the spaces are being cleared (only done at heap initialization
+ // currently), the survivor spaces need not be empty.
+ // Otherwise, no care is taken for used areas in the survivor spaces
+ // so check.
+ assert(clear_space || (to()->is_empty() && from()->is_empty()),
+ "Initialization of the survivor spaces assumes these are empty");
// Compute sizes
- size_t size = _virtual_space.committed_size();
- size_t survivor_size = compute_survivor_size(size, SpaceAlignment);
- assert(survivor_size >= from()->used(), "inv");
- assert(size > 2 * survivor_size, "inv");
- size_t eden_size = size - (2 * survivor_size);
+ uintx size = _virtual_space.committed_size();
+ uintx survivor_size = compute_survivor_size(size, SpaceAlignment);
+ uintx eden_size = size - (2*survivor_size);
+ if (eden_size > max_eden_size()) {
+ // Need to reduce eden_size to satisfy the max constraint. The delta needs
+ // to be 2*SpaceAlignment aligned so that both survivors are properly
+ // aligned.
+ uintx eden_delta = align_up(eden_size - max_eden_size(), 2*SpaceAlignment);
+ eden_size -= eden_delta;
+ survivor_size += eden_delta/2;
+ }
assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
- // layout: from, to, eden
- char* from_start = _virtual_space.low();
- char* to_start = from_start + survivor_size;
- char* eden_start = to_start + survivor_size;
- char* eden_end = eden_start + eden_size;
+ if (eden_size < minimum_eden_size) {
+ // May happen due to 64Kb rounding, if so adjust eden size back up
+ minimum_eden_size = align_up(minimum_eden_size, SpaceAlignment);
+ uintx maximum_survivor_size = (size - minimum_eden_size) / 2;
+ uintx unaligned_survivor_size =
+ align_down(maximum_survivor_size, SpaceAlignment);
+ survivor_size = MAX2(unaligned_survivor_size, SpaceAlignment);
+ eden_size = size - (2*survivor_size);
+ assert(eden_size > 0 && survivor_size <= eden_size, "just checking");
+ assert(eden_size >= minimum_eden_size, "just checking");
+ }
- assert(eden_end == _virtual_space.high(), "just checking");
+ char *eden_start = _virtual_space.low();
+ char *from_start = eden_start + eden_size;
+ char *to_start = from_start + survivor_size;
+ char *to_end = to_start + survivor_size;
+
+ assert(to_end == _virtual_space.high(), "just checking");
+ assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
assert(is_aligned(from_start, SpaceAlignment), "checking alignment");
assert(is_aligned(to_start, SpaceAlignment), "checking alignment");
- assert(is_aligned(eden_start, SpaceAlignment), "checking alignment");
- assert(is_aligned(eden_end, SpaceAlignment), "checking alignment");
+ MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
- MemRegion toMR ((HeapWord*)to_start, (HeapWord*)eden_start);
- MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
-
- // Reset the spaces for their new regions.
- from()->initialize(fromMR, from()->is_empty());
- to()->initialize(toMR, true);
- eden()->initialize(edenMR, true);
+ MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
- post_resize();
-}
+ // A minimum eden size implies that there is a part of eden that
+ // is being used and that affects the initialization of any
+ // newly formed eden.
+ bool live_in_eden = minimum_eden_size > 0;
-void DefNewGeneration::post_resize() {
- MemRegion cmr((HeapWord*)_virtual_space.low(),
- (HeapWord*)_virtual_space.high());
- SerialHeap::heap()->rem_set()->resize_covered_region(cmr);
+ // Reset the spaces for their new regions.
+ eden()->initialize(edenMR,
+ clear_space && !live_in_eden,
+ SpaceDecorator::Mangle);
+ // If clear_space and live_in_eden, we will not have cleared any
+ // portion of eden above its top. This can cause newly
+ // expanded space not to be mangled if using ZapUnusedHeapArea.
+ // We explicitly do such mangling here.
+ if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
+ eden()->mangle_unused_area();
+ }
+ from()->initialize(fromMR, clear_space, mangle_space);
+ to()->initialize(toMR, clear_space, mangle_space);
}
void DefNewGeneration::swap_spaces() {
@@ -323,28 +351,20 @@ void DefNewGeneration::swap_spaces() {
}
bool DefNewGeneration::expand(size_t bytes) {
- assert(bytes != 0, "precondition");
- assert(is_aligned(bytes, SpaceAlignment), "precondition");
-
+ HeapWord* prev_high = (HeapWord*) _virtual_space.high();
bool success = _virtual_space.expand_by(bytes);
- if (!success) {
- log_info(gc)("Failed to expand young-gen by %zu bytes", bytes);
+ if (success && ZapUnusedHeapArea) {
+ // Mangle newly committed space immediately because it
+ // can be done here more simply that after the new
+ // spaces have been computed.
+ HeapWord* new_high = (HeapWord*) _virtual_space.high();
+ MemRegion mangle_region(prev_high, new_high);
+ SpaceMangler::mangle_region(mangle_region);
}
return success;
}
-void DefNewGeneration::expand_eden_by(size_t delta_bytes) {
- if (!expand(delta_bytes)) {
- return;
- }
-
- MemRegion eden_mr{eden()->bottom(), (HeapWord*)_virtual_space.high()};
- eden()->initialize(eden_mr, eden()->is_empty());
-
- post_resize();
-}
-
size_t DefNewGeneration::calculate_thread_increase_size(int threads_count) const {
size_t thread_increase_size = 0;
// Check an overflow at 'threads_count * NewSizeThreadIncrease'.
@@ -377,8 +397,18 @@ size_t DefNewGeneration::adjust_for_thread_increase(size_t new_size_candidate,
return desired_new_size;
}
-size_t DefNewGeneration::calculate_desired_young_gen_bytes() const {
- size_t old_size = SerialHeap::heap()->old_gen()->capacity();
+void DefNewGeneration::compute_new_size() {
+ // This is called after a GC that includes the old generation, so from-space
+ // will normally be empty.
+ // Note that we check both spaces, since if scavenge failed they revert roles.
+ // If not we bail out (otherwise we would have to relocate the objects).
+ if (!from()->is_empty() || !to()->is_empty()) {
+ return;
+ }
+
+ SerialHeap* gch = SerialHeap::heap();
+
+ size_t old_size = gch->old_gen()->capacity();
size_t new_size_before = _virtual_space.committed_size();
size_t min_new_size = NewSize;
size_t max_new_size = reserved().byte_size();
@@ -399,82 +429,46 @@ size_t DefNewGeneration::calculate_desired_young_gen_bytes() const {
// Adjust new generation size
desired_new_size = clamp(desired_new_size, min_new_size, max_new_size);
- if (!from()->is_empty()) {
- // Mininum constraint to hold all live objs inside from-space.
- size_t min_survivor_size = align_up(from()->used(), alignment);
-
- // SurvivorRatio := eden_size / survivor_size
- // young-gen-size = eden_size + 2 * survivor_size
- // = SurvivorRatio * survivor_size + 2 * survivor_size
- // = (SurvivorRatio + 2) * survivor_size
- size_t min_young_gen_size = min_survivor_size * (SurvivorRatio + 2);
-
- desired_new_size = MAX2(min_young_gen_size, desired_new_size);
- }
- assert(is_aligned(desired_new_size, alignment), "postcondition");
-
- return desired_new_size;
-}
-
-void DefNewGeneration::resize_inner() {
- assert(eden()->is_empty(), "precondition");
- assert(to()->is_empty(), "precondition");
-
- size_t current_young_gen_size_bytes = _virtual_space.committed_size();
- size_t desired_young_gen_size_bytes = calculate_desired_young_gen_bytes();
- if (current_young_gen_size_bytes == desired_young_gen_size_bytes) {
- return;
- }
-
- // Commit/uncommit
- if (desired_young_gen_size_bytes > current_young_gen_size_bytes) {
- size_t delta_bytes = desired_young_gen_size_bytes - current_young_gen_size_bytes;
- if (!expand(delta_bytes)) {
- return;
+ assert(desired_new_size <= max_new_size, "just checking");
+
+ bool changed = false;
+ if (desired_new_size > new_size_before) {
+ size_t change = desired_new_size - new_size_before;
+ assert(change % alignment == 0, "just checking");
+ if (expand(change)) {
+ changed = true;
}
- } else {
- size_t delta_bytes = current_young_gen_size_bytes - desired_young_gen_size_bytes;
- _virtual_space.shrink_by(delta_bytes);
- }
-
- assert(desired_young_gen_size_bytes == _virtual_space.committed_size(), "inv");
-
- init_spaces();
-
- log_debug(gc, ergo, heap)("New generation size %zuK->%zuK [eden=%zuK,survivor=%zuK]",
- current_young_gen_size_bytes/K, _virtual_space.committed_size()/K,
- eden()->capacity()/K, from()->capacity()/K);
-}
-
-void DefNewGeneration::resize_after_young_gc() {
- // Called only after successful young-gc.
- assert(eden()->is_empty(), "precondition");
- assert(to()->is_empty(), "precondition");
-
- if ((char*)to()->bottom() == _virtual_space.low()) {
- // layout: to, from, eden; can't resize.
- return;
- }
-
- assert((char*)from()->bottom() == _virtual_space.low(), "inv");
- resize_inner();
-}
-
-void DefNewGeneration::resize_after_full_gc() {
- if (eden()->is_empty() && from()->is_empty() && to()->is_empty()) {
- resize_inner();
- return;
- }
-
- // Usually the young-gen is empty after full-gc.
- // This is the extreme case; expand young-gen to its max size.
- if (_virtual_space.uncommitted_size() == 0) {
- // Already at its max size.
- return;
- }
-
- // Keep from/to and expand eden.
- expand_eden_by(_virtual_space.uncommitted_size());
+ // If the heap failed to expand to the desired size,
+ // "changed" will be false. If the expansion failed
+ // (and at this point it was expected to succeed),
+ // ignore the failure (leaving "changed" as false).
+ }
+ if (desired_new_size < new_size_before && eden()->is_empty()) {
+ // bail out of shrinking if objects in eden
+ size_t change = new_size_before - desired_new_size;
+ assert(change % alignment == 0, "just checking");
+ _virtual_space.shrink_by(change);
+ changed = true;
+ }
+ if (changed) {
+ // The spaces have already been mangled at this point but
+ // may not have been cleared (set top = bottom) and should be.
+ // Mangling was done when the heap was being expanded.
+ compute_space_boundaries(eden()->used(),
+ SpaceDecorator::Clear,
+ SpaceDecorator::DontMangle);
+ MemRegion cmr((HeapWord*)_virtual_space.low(),
+ (HeapWord*)_virtual_space.high());
+ gch->rem_set()->resize_covered_region(cmr);
+
+ log_debug(gc, ergo, heap)(
+ "New generation size %zuK->%zuK [eden=%zuK,survivor=%zuK]",
+ new_size_before/K, _virtual_space.committed_size()/K,
+ eden()->capacity()/K, from()->capacity()/K);
+ log_trace(gc, ergo, heap)(
+ " [allowed %zuK extra for %d threads]",
+ thread_increase_size/K, threads_count);
+ }
}
void DefNewGeneration::ref_processor_init() {
@@ -489,11 +483,13 @@ size_t DefNewGeneration::capacity() const {
+ from()->capacity(); // to() is only used during scavenge
}
+
size_t DefNewGeneration::used() const {
return eden()->used()
+ from()->used(); // to() is only used during scavenge
}
+
size_t DefNewGeneration::free() const {
return eden()->free()
+ from()->free(); // to() is only used during scavenge
@@ -501,8 +497,7 @@ size_t DefNewGeneration::free() const {
size_t DefNewGeneration::max_capacity() const {
const size_t reserved_bytes = reserved().byte_size();
- const size_t min_survivor_bytes = SpaceAlignment;
- return reserved_bytes - min_survivor_bytes;
+ return reserved_bytes - compute_survivor_size(reserved_bytes, SpaceAlignment);
}
bool DefNewGeneration::is_in(const void* p) const {
@@ -594,6 +589,7 @@ bool DefNewGeneration::collect(bool clear_all_soft_refs) {
IsAliveClosure is_alive(this);
age_table()->clear();
+ to()->clear(SpaceDecorator::Mangle);
YoungGenScanClosure young_gen_cl(this);
OldGenScanClosure old_gen_cl(this);
@@ -843,18 +839,13 @@ void DefNewGeneration::print_on(outputStream* st) const {
to()->print_on(st, "to ");
}
-HeapWord* DefNewGeneration::expand_and_allocate(size_t word_size) {
- assert(SafepointSynchronize::is_at_safepoint(), "precondition");
- assert(Thread::current()->is_VM_thread(), "precondition");
-
- size_t eden_free_bytes = eden()->free();
- size_t requested_bytes = word_size * HeapWordSize;
- if (eden_free_bytes < requested_bytes) {
- size_t expand_bytes = requested_bytes - eden_free_bytes;
- expand_eden_by(align_up(expand_bytes, SpaceAlignment));
- }
-
- HeapWord* result = eden()->allocate(word_size);
+HeapWord* DefNewGeneration::allocate(size_t word_size) {
+ // This is the slow-path allocation for the DefNewGeneration.
+ // Most allocations are fast-path in compiled code.
+ // We try to allocate from the eden. If that works, we are happy.
+ // Note that since DefNewGeneration supports lock-free allocation, we
+ // have to use it here, as well.
+ HeapWord* result = eden()->par_allocate(word_size);
return result;
}
diff --git a/src/hotspot/share/gc/serial/defNewGeneration.hpp b/src/hotspot/share/gc/serial/defNewGeneration.hpp
index 40d2116cb5847..32b6b32f42fa2 100644
--- a/src/hotspot/share/gc/serial/defNewGeneration.hpp
+++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp
@@ -131,13 +131,6 @@ class DefNewGeneration: public Generation {
return n > alignment ? align_down(n, alignment) : alignment;
}
- size_t calculate_desired_young_gen_bytes() const;
-
- void expand_eden_by(size_t delta_bytes);
-
- void resize_inner();
- void post_resize();
-
public:
DefNewGeneration(ReservedSpace rs,
size_t initial_byte_size,
@@ -190,8 +183,9 @@ class DefNewGeneration: public Generation {
HeapWord* block_start(const void* p) const;
+ // Allocate requested size or return null; single-threaded and lock-free versions.
+ HeapWord* allocate(size_t word_size);
HeapWord* par_allocate(size_t word_size);
- HeapWord* expand_and_allocate(size_t word_size);
void gc_epilogue();
@@ -202,8 +196,8 @@ class DefNewGeneration: public Generation {
// Reset for contribution of "to-space".
void reset_scratch();
- void resize_after_young_gc();
- void resize_after_full_gc();
+ // GC support
+ void compute_new_size();
bool collect(bool clear_all_soft_refs);
@@ -226,9 +220,13 @@ class DefNewGeneration: public Generation {
DefNewTracer* gc_tracer() const { return _gc_tracer; }
- private:
- // Initialize eden/from/to spaces.
- void init_spaces();
+ protected:
+ // If clear_space is true, clear the survivor spaces. Eden is
+ // cleared if the minimum size of eden is 0. If mangle_space
+ // is true, also mangle the space in debug mode.
+ void compute_space_boundaries(uintx minimum_eden_size,
+ bool clear_space,
+ bool mangle_space);
// Return adjusted new size for NewSizeThreadIncrease.
// If any overflow happens, revert to previous new size.
diff --git a/src/hotspot/share/gc/serial/serialHeap.cpp b/src/hotspot/share/gc/serial/serialHeap.cpp
index 8022b317ca64c..3ab88da463387 100644
--- a/src/hotspot/share/gc/serial/serialHeap.cpp
+++ b/src/hotspot/share/gc/serial/serialHeap.cpp
@@ -182,6 +182,7 @@ jint SerialHeap::initialize() {
_rem_set->initialize(young_rs.base(), old_rs.base());
CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
+ bs->initialize();
BarrierSet::set_barrier_set(bs);
_young_gen = new DefNewGeneration(young_rs, NewSize, MinNewSize, MaxNewSize);
@@ -268,9 +269,9 @@ size_t SerialHeap::max_capacity() const {
}
HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
- HeapWord* result = _young_gen->expand_and_allocate(size);
+ HeapWord* result = _young_gen->allocate(size);
- if (result == nullptr && !is_tlab) {
+ if (result == nullptr) {
result = _old_gen->expand_and_allocate(size);
}
@@ -387,13 +388,14 @@ bool SerialHeap::do_young_collection(bool clear_soft_refs) {
// Only update stats for successful young-gc
if (result) {
_old_gen->update_promote_stats();
- _young_gen->resize_after_young_gc();
}
if (should_verify && VerifyAfterGC) {
Universe::verify("After GC");
}
+ _young_gen->compute_new_size();
+
print_heap_change(pre_gc_values);
// Track memory usage and detect low memory after GC finishes
@@ -579,7 +581,7 @@ void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
// Adjust generation sizes.
_old_gen->compute_new_size();
- _young_gen->resize_after_full_gc();
+ _young_gen->compute_new_size();
_old_gen->update_promote_stats();
diff --git a/src/hotspot/share/gc/serial/serialHeap.hpp b/src/hotspot/share/gc/serial/serialHeap.hpp
index 3915f8c4af98b..27053b4cc81c5 100644
--- a/src/hotspot/share/gc/serial/serialHeap.hpp
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp
@@ -55,10 +55,10 @@ class TenuredGeneration;
// +-- generation boundary (fixed after startup)
// |
// |<- young gen (reserved MaxNewSize) ->|<- old gen (reserved MaxOldSize) ->|
-// +--------+--------+-----------------+--------+---------------+-------------------+
-// | from | to | eden | | old | |
-// | (to) | (from) | | | | |
-// +--------+--------+-----------------+--------+---------------+-------------------+
+// +-----------------+--------+--------+--------+---------------+-------------------+
+// | eden | from | to | | old | |
+// | | (to) | (from) | | | |
+// +-----------------+--------+--------+--------+---------------+-------------------+
// |<- committed ->| |<- committed ->|
//
class SerialHeap : public CollectedHeap {
diff --git a/src/hotspot/share/gc/serial/tenuredGeneration.cpp b/src/hotspot/share/gc/serial/tenuredGeneration.cpp
index f68847ed1a6b9..a28a8c8e1cb92 100644
--- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp
@@ -314,7 +314,7 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
HeapWord* bottom = (HeapWord*) _virtual_space.low();
HeapWord* end = (HeapWord*) _virtual_space.high();
_the_space = new ContiguousSpace();
- _the_space->initialize(MemRegion(bottom, end), SpaceDecorator::Clear);
+ _the_space->initialize(MemRegion(bottom, end), SpaceDecorator::Clear, SpaceDecorator::Mangle);
// If we don't shrink the heap in steps, '_shrink_factor' is always 100%.
_shrink_factor = ShrinkHeapInSteps ? 0 : 100;
_capacity_at_prologue = 0;
diff --git a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
index c4eefee5f6571..e12e7b56e2328 100644
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp
@@ -115,7 +115,7 @@ uint8_t BarrierStubC2::barrier_data() const {
void BarrierStubC2::preserve(Register r) {
const VMReg vm_reg = r->as_VMReg();
assert(vm_reg->is_Register(), "r must be a general-purpose register");
- _preserve.insert(OptoReg::as_OptoReg(vm_reg));
+ _preserve.Insert(OptoReg::as_OptoReg(vm_reg));
}
void BarrierStubC2::dont_preserve(Register r) {
@@ -124,7 +124,7 @@ void BarrierStubC2::dont_preserve(Register r) {
// Subtract the given register and all its sub-registers (e.g. {R11, R11_H}
// for r11 in aarch64).
do {
- _preserve.remove(OptoReg::as_OptoReg(vm_reg));
+ _preserve.Remove(OptoReg::as_OptoReg(vm_reg));
vm_reg = vm_reg->next();
} while (vm_reg->is_Register() && !vm_reg->is_concrete());
}
@@ -1171,7 +1171,7 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
// Initialize to union of successors
for (uint i = 0; i < block->_num_succs; i++) {
const uint succ_id = block->_succs[i]->_pre_order;
- new_live.or_with(live[succ_id]);
+ new_live.OR(live[succ_id]);
}
// Walk block backwards, computing liveness
@@ -1182,7 +1182,7 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
if (!bs_state->needs_livein_data()) {
RegMask* const regs = bs_state->live(node);
if (regs != nullptr) {
- regs->or_with(new_live);
+ regs->OR(new_live);
}
}
@@ -1190,10 +1190,10 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
const OptoReg::Name first = bs->refine_register(node, regalloc->get_reg_first(node));
const OptoReg::Name second = bs->refine_register(node, regalloc->get_reg_second(node));
if (first != OptoReg::Bad) {
- new_live.remove(first);
+ new_live.Remove(first);
}
if (second != OptoReg::Bad) {
- new_live.remove(second);
+ new_live.Remove(second);
}
// Add use bits
@@ -1202,10 +1202,10 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
const OptoReg::Name first = bs->refine_register(use, regalloc->get_reg_first(use));
const OptoReg::Name second = bs->refine_register(use, regalloc->get_reg_second(use));
if (first != OptoReg::Bad) {
- new_live.insert(first);
+ new_live.Insert(first);
}
if (second != OptoReg::Bad) {
- new_live.insert(second);
+ new_live.Insert(second);
}
}
@@ -1213,16 +1213,16 @@ void BarrierSetC2::compute_liveness_at_stubs() const {
if (bs_state->needs_livein_data()) {
RegMask* const regs = bs_state->live(node);
if (regs != nullptr) {
- regs->or_with(new_live);
+ regs->OR(new_live);
}
}
}
// Now at block top, see if we have any changes
- new_live.subtract(old_live);
- if (!new_live.is_empty()) {
+ new_live.SUBTRACT(old_live);
+ if (!new_live.is_Empty()) {
// Liveness has refined, update and propagate to prior blocks
- old_live.or_with(new_live);
+ old_live.OR(new_live);
for (uint i = 1; i < block->num_preds(); ++i) {
Block* const pred = cfg->get_block_for_node(block->pred(i));
worklist.push(pred);
diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp
index de514f64be2b0..dfa00636dec5c 100644
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp
@@ -57,6 +57,7 @@ CardTableBarrierSet::CardTableBarrierSet(BarrierSetAssembler* barrier_set_assemb
barrier_set_c1,
barrier_set_c2,
fake_rtti.add_tag(BarrierSet::CardTableBarrierSet)),
+ _defer_initial_card_mark(false),
_card_table(card_table)
{}
@@ -65,9 +66,14 @@ CardTableBarrierSet::CardTableBarrierSet(CardTable* card_table) :
make_barrier_set_c1(),
make_barrier_set_c2(),
BarrierSet::FakeRtti(BarrierSet::CardTableBarrierSet)),
+ _defer_initial_card_mark(false),
_card_table(card_table)
{}
+void CardTableBarrierSet::initialize() {
+ initialize_deferred_card_mark_barriers();
+}
+
CardTableBarrierSet::~CardTableBarrierSet() {
delete _card_table;
}
@@ -102,7 +108,9 @@ void CardTableBarrierSet::print_on(outputStream* st) const {
// to the post-barrier, we note that G1 needs a RS update barrier
// which simply enqueues a (sequence of) dirty cards which may
// optionally be refined by the concurrent update threads. Note
-// that this barrier need only be applied to a non-young write.
+// that this barrier need only be applied to a non-young write,
+// but, because of the presence of concurrent refinement,
+// must strictly follow the oop-store.
//
// For any future collector, this code should be reexamined with
// that specific collector in mind, and the documentation above suitably
@@ -112,13 +120,72 @@ void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop ne
if (!ReduceInitialCardMarks) {
return;
}
+ // If a previous card-mark was deferred, flush it now.
+ flush_deferred_card_mark_barrier(thread);
if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
// Arrays of non-references don't need a post-barrier.
+ // The deferred_card_mark region should be empty
+ // following the flush above.
+ assert(thread->deferred_card_mark().is_empty(), "Error");
} else {
MemRegion mr(cast_from_oop(new_obj), new_obj->size());
assert(!mr.is_empty(), "Error");
- // Do the card mark
- write_region(mr);
+ if (_defer_initial_card_mark) {
+ // Defer the card mark
+ thread->set_deferred_card_mark(mr);
+ } else {
+ // Do the card mark
+ write_region(mr);
+ }
}
#endif // COMPILER2_OR_JVMCI
}
+
+void CardTableBarrierSet::initialize_deferred_card_mark_barriers() {
+ // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
+ // otherwise remains unused.
+#if COMPILER2_OR_JVMCI
+ _defer_initial_card_mark = CompilerConfig::is_c2_or_jvmci_compiler_enabled() && ReduceInitialCardMarks
+ && (DeferInitialCardMark || card_mark_must_follow_store());
+#else
+ assert(_defer_initial_card_mark == false, "Who would set it?");
+#endif
+}
+
+void CardTableBarrierSet::flush_deferred_card_mark_barrier(JavaThread* thread) {
+#if COMPILER2_OR_JVMCI
+ MemRegion deferred = thread->deferred_card_mark();
+ if (!deferred.is_empty()) {
+ assert(_defer_initial_card_mark, "Otherwise should be empty");
+ {
+ // Verify that the storage points to a parsable object in heap
+ DEBUG_ONLY(oop old_obj = cast_to_oop(deferred.start());)
+ assert(!_card_table->is_in_young(old_obj),
+ "Else should have been filtered in on_slowpath_allocation_exit()");
+ assert(oopDesc::is_oop(old_obj), "Not an oop");
+ assert(deferred.word_size() == old_obj->size(),
+ "Mismatch: multiple objects?");
+ }
+ write_region(thread, deferred);
+ // "Clear" the deferred_card_mark field
+ thread->set_deferred_card_mark(MemRegion());
+ }
+ assert(thread->deferred_card_mark().is_empty(), "invariant");
+#else
+ assert(!_defer_initial_card_mark, "Should be false");
+ assert(thread->deferred_card_mark().is_empty(), "Should be empty");
+#endif
+}
+
+void CardTableBarrierSet::on_thread_detach(Thread* thread) {
+ // The deferred store barriers must all have been flushed to the
+ // card-table (or other remembered set structure) before GC starts
+ // processing the card-table (or other remembered set).
+ if (thread->is_Java_thread()) { // Only relevant for Java threads.
+ flush_deferred_card_mark_barrier(JavaThread::cast(thread));
+ }
+}
+
+bool CardTableBarrierSet::card_mark_must_follow_store() const {
+ return false;
+}
diff --git a/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp b/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp
index a5646c303f38a..13f3e0783a668 100644
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp
@@ -47,6 +47,9 @@ class CardTableBarrierSet: public ModRefBarrierSet {
protected:
typedef CardTable::CardValue CardValue;
+ // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
+ // or INCLUDE_JVMCI is being used
+ bool _defer_initial_card_mark;
CardTable* _card_table;
CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
@@ -61,6 +64,13 @@ class CardTableBarrierSet: public ModRefBarrierSet {
CardTable* card_table() const { return _card_table; }
+ void initialize();
+
+ void write_region(JavaThread* thread, MemRegion mr) {
+ write_region(mr);
+ }
+
+ public:
// Record a reference update. Note that these versions are precise!
// The scanning code has to handle the fact that the write barrier may be
// either precise or imprecise. We make non-virtual inline variants of
@@ -70,7 +80,29 @@ class CardTableBarrierSet: public ModRefBarrierSet {
virtual void write_region(MemRegion mr);
+ // ReduceInitialCardMarks
+ void initialize_deferred_card_mark_barriers();
+
+ // If the CollectedHeap was asked to defer a store barrier above,
+ // this informs it to flush such a deferred store barrier to the
+ // remembered set.
+ void flush_deferred_card_mark_barrier(JavaThread* thread);
+
+ // If a compiler is eliding store barriers for TLAB-allocated objects,
+ // we will be informed of a slow-path allocation by a call
+ // to on_slowpath_allocation_exit() below. Such a call precedes the
+ // initialization of the object itself, and no post-store-barriers will
+ // be issued. Some heap types require that the barrier strictly follows
+ // the initializing stores. (This is currently implemented by deferring the
+ // barrier until the next slow-path allocation or gc-related safepoint.)
+ // This interface answers whether a particular barrier type needs the card
+ // mark to be thus strictly sequenced after the stores.
+ virtual bool card_mark_must_follow_store() const;
+
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
+ virtual void on_thread_detach(Thread* thread);
+
+ virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
virtual void print_on(outputStream* st) const;
diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp
index ea20b8ce4b2ac..8ef73c6a0b825 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp
@@ -623,8 +623,6 @@ void CollectedHeap::stall_for_vm_shutdown() {
// - short enough to avoid excessive stall time if the shutdown itself
// triggers a GC.
JavaThread::current()->sleep(2 * MILLIUNITS);
-
- ResourceMark rm;
log_warning(gc, alloc)("%s: Stall for VM-Shutdown timed out; allocation may fail with OOME", Thread::current()->name());
}
diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp
index 956bffde15625..0b245026d68aa 100644
--- a/src/hotspot/share/gc/shared/gc_globals.hpp
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp
@@ -418,6 +418,10 @@
"dictionary, classloader_data_graph, metaspace, jni_handles, " \
"codecache_oops, resolved_method_table, stringdedup") \
\
+ product(bool, DeferInitialCardMark, false, DIAGNOSTIC, \
+ "When +ReduceInitialCardMarks, explicitly defer any that " \
+ "may arise from new_pre_store_barrier") \
+ \
product(bool, UseCondCardMark, false, \
"Check for already marked card before updating card table") \
\
diff --git a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp
index c078d151233d1..15ac797111835 100644
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp
@@ -53,6 +53,8 @@ class ModRefBarrierSet: public BarrierSet {
// Causes all refs in "mr" to be assumed to be modified (by this JavaThread).
virtual void write_region(MemRegion mr) = 0;
+ // Causes all refs in "mr" to be assumed to be modified by the given JavaThread.
+ virtual void write_region(JavaThread* thread, MemRegion mr) = 0;
// Operations on arrays, or general regions (e.g., for "clone") may be
// optimized by some barriers.
diff --git a/src/hotspot/share/gc/shared/space.cpp b/src/hotspot/share/gc/shared/space.cpp
index 011a0f5cfd83c..08476cb2a3ad6 100644
--- a/src/hotspot/share/gc/shared/space.cpp
+++ b/src/hotspot/share/gc/shared/space.cpp
@@ -44,7 +44,8 @@ ContiguousSpace::ContiguousSpace():
_top(nullptr) {}
void ContiguousSpace::initialize(MemRegion mr,
- bool clear_space) {
+ bool clear_space,
+ bool mangle_space) {
HeapWord* bottom = mr.start();
HeapWord* end = mr.end();
assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
@@ -52,10 +53,7 @@ void ContiguousSpace::initialize(MemRegion mr,
set_bottom(bottom);
set_end(end);
if (clear_space) {
- clear(SpaceDecorator::DontMangle);
- }
- if (ZapUnusedHeapArea) {
- mangle_unused_area();
+ clear(mangle_space);
}
}
diff --git a/src/hotspot/share/gc/shared/space.hpp b/src/hotspot/share/gc/shared/space.hpp
index 7f2887275b3c8..75dd3f998d63e 100644
--- a/src/hotspot/share/gc/shared/space.hpp
+++ b/src/hotspot/share/gc/shared/space.hpp
@@ -101,7 +101,7 @@ class ContiguousSpace: public CHeapObj {
// any purpose. The "mr" arguments gives the bounds of the space, and
// the "clear_space" argument should be true unless the memory in "mr" is
// known to be zeroed.
- void initialize(MemRegion mr, bool clear_space);
+ void initialize(MemRegion mr, bool clear_space, bool mangle_space);
// The "clear" method must be called on a region that may have
// had allocation performed in it, but is now to be considered empty.
diff --git a/src/hotspot/share/gc/shared/vmStructs_gc.hpp b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
index 9d84a56fbd748..bba9c9e099fc9 100644
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp
@@ -88,6 +88,7 @@
nonstatic_field(CardTable, _byte_map_size, const size_t) \
nonstatic_field(CardTable, _byte_map, CardTable::CardValue*) \
nonstatic_field(CardTable, _byte_map_base, CardTable::CardValue*) \
+ nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \
nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \
\
static_field(CollectedHeap, _lab_alignment_reserve, size_t) \
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
index 66bfc3375a311..014a4d9913119 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
@@ -32,62 +32,25 @@
#include "memory/allocation.hpp"
#include "utilities/numberSeq.hpp"
-/**
- * ShenanoahAllocationRate maintains a truncated history of recently sampled allocation rates for the purpose of providing
- * informed estimates of current and future allocation rates based on weighted averages and standard deviations of the
- * truncated history. More recently sampled allocations are weighted more heavily than older samples when computing
- * averages and standard deviations.
- */
class ShenandoahAllocationRate : public CHeapObj {
public:
explicit ShenandoahAllocationRate();
-
- // Reset the _last_sample_value to zero, _last_sample_time to current time.
void allocation_counter_reset();
- // Force an allocation rate sample to be taken, even if the time since last sample is not greater than
- // 1s/ShenandoahAdaptiveSampleFrequencyHz, except when current_time - _last_sample_time < MinSampleTime (2 ms).
- // The sampled allocation rate is computed from (allocated - _last_sample_value) / (current_time - _last_sample_time).
- // Return the newly computed rate if the sample is taken, zero if it is not an appropriate time to add a sample.
- // In the case that a new sample is not taken, overwrite unaccounted_bytes_allocated with bytes allocated since
- // the previous sample was taken (allocated - _last_sample_value). Otherwise, overwrite unaccounted_bytes_allocated
- // with 0.
double force_sample(size_t allocated, size_t &unaccounted_bytes_allocated);
-
- // Add an allocation rate sample if the time since last sample is greater than 1s/ShenandoahAdaptiveSampleFrequencyHz.
- // The sampled allocation rate is computed from (allocated - _last_sample_value) / (current_time - _last_sample_time).
- // Return the newly computed rate if the sample is taken, zero if it is not an appropriate time to add a sample.
double sample(size_t allocated);
- // Return an estimate of the upper bound on allocation rate, with the upper bound computed as the weighted average
- // of recently sampled instantaneous allocation rates added to sds times the standard deviation computed for the
- // sequence of recently sampled average allocation rates.
double upper_bound(double sds) const;
-
- // Test whether rate significantly diverges from the computed average allocation rate. If so, return true.
- // Otherwise, return false. Significant divergence is recognized if (rate - _rate.avg()) / _rate.sd() > threshold.
bool is_spiking(double rate, double threshold) const;
private:
- // Return the instantaneous rate calculated from (allocated - _last_sample_value) / (time - _last_sample_time).
- // Return Sentinel value 0.0 if (time - _last_sample_time) == 0 or if (allocated <= _last_sample_value).
double instantaneous_rate(double time, size_t allocated) const;
- // Time at which previous allocation rate sample was collected.
double _last_sample_time;
-
- // Bytes allocated as of the time at which previous allocation rate sample was collected.
size_t _last_sample_value;
-
- // The desired interval of time between consecutive samples of the allocation rate.
double _interval_sec;
-
- // Holds a sequence of the most recently sampled instantaneous allocation rates
TruncatedSeq _rate;
-
- // Holds a sequence of the most recently computed weighted average of allocation rates, with each weighted average
- // computed immediately after an instantaneous rate was sampled
TruncatedSeq _rate_avg;
};
@@ -191,8 +154,6 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
}
public:
- // Sample the allocation rate at GC trigger time if possible. Return the number of allocated bytes that were
- // not accounted for in the sample. This must be called before resetting bytes allocated since gc start.
virtual size_t force_alloc_rate_sample(size_t bytes_allocated) override {
size_t unaccounted_bytes;
_allocation_rate.force_sample(bytes_allocated, unaccounted_bytes);
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
index a2cccec842373..c7067b2e5abf7 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
@@ -127,7 +127,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
// Reclaim humongous regions here, and count them as the immediate garbage
#ifdef ASSERT
bool reg_live = region->has_live();
- bool bm_live = _generation->complete_marking_context()->is_marked(cast_to_oop(region->bottom()));
+ bool bm_live = heap->active_generation()->complete_marking_context()->is_marked(cast_to_oop(region->bottom()));
assert(reg_live == bm_live,
"Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: %zu",
BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
index 478c569618876..c8a0c3dc51837 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
@@ -73,10 +73,9 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
}
void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
- ShenandoahHeap* heap = ShenandoahHeap::heap();
-
assert(collection_set->is_empty(), "Must be empty");
- assert(!heap->mode()->is_generational(), "Wrong heuristic for heap mode");
+
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
// Check all pinned regions have updated status before choosing the collection set.
heap->assert_pinned_region_status();
@@ -121,7 +120,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
// Reclaim humongous regions here, and count them as the immediate garbage
#ifdef ASSERT
bool reg_live = region->has_live();
- bool bm_live = heap->global_generation()->complete_marking_context()->is_marked(cast_to_oop(region->bottom()));
+ bool bm_live = heap->gc_generation()->complete_marking_context()->is_marked(cast_to_oop(region->bottom()));
assert(reg_live == bm_live,
"Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: %zu",
BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
index baeaffb9c7ba1..3d9fa10b0fc6e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
@@ -425,16 +425,6 @@ void ShenandoahAsserts::assert_marked_strong(void *interior_loc, oop obj, const
}
}
-void ShenandoahAsserts::assert_mark_complete(HeapWord* obj, const char* file, int line) {
- const ShenandoahHeap* heap = ShenandoahHeap::heap();
- const ShenandoahHeapRegion* region = heap->heap_region_containing(obj);
- const ShenandoahGeneration* generation = heap->generation_for(region->affiliation());
- if (!generation->is_mark_complete()) {
- ShenandoahMessageBuffer msg("Marking should be complete for object " PTR_FORMAT " in the %s generation", p2i(obj), generation->name());
- report_vm_error(file, line, msg.buffer());
- }
-}
-
void ShenandoahAsserts::assert_in_cset(void* interior_loc, oop obj, const char* file, int line) {
assert_correct(interior_loc, obj, file, line);
@@ -552,6 +542,23 @@ void ShenandoahAsserts::assert_control_or_vm_thread_at_safepoint(bool at_safepoi
report_vm_error(file, line, msg.buffer());
}
+void ShenandoahAsserts::assert_generations_reconciled(const char* file, int line) {
+ if (!ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
+ // Only shenandoah safepoint operations participate in the active/gc generation scheme
+ return;
+ }
+
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
+ ShenandoahGeneration* ggen = heap->gc_generation();
+ ShenandoahGeneration* agen = heap->active_generation();
+ if (agen == ggen) {
+ return;
+ }
+
+ ShenandoahMessageBuffer msg("Active(%s) & GC(%s) Generations aren't reconciled", agen->name(), ggen->name());
+ report_vm_error(file, line, msg.buffer());
+}
+
bool ShenandoahAsserts::extract_klass_safely(oop obj, narrowKlass& nk, const Klass*& k) {
nk = 0;
k = nullptr;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp
index d0fc3e213c8e3..e31ef7c99aae3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp
@@ -65,9 +65,6 @@ class ShenandoahAsserts {
static void assert_marked(void* interior_loc, oop obj, const char* file, int line);
static void assert_marked_weak(void* interior_loc, oop obj, const char* file, int line);
static void assert_marked_strong(void* interior_loc, oop obj, const char* file, int line);
-
- // Assert that marking is complete for the generation where this obj resides
- static void assert_mark_complete(HeapWord* obj, const char* file, int line);
static void assert_in_cset(void* interior_loc, oop obj, const char* file, int line);
static void assert_not_in_cset(void* interior_loc, oop obj, const char* file, int line);
static void assert_not_in_cset_loc(void* interior_loc, const char* file, int line);
@@ -79,6 +76,7 @@ class ShenandoahAsserts {
static void assert_heaplocked_or_safepoint(const char* file, int line);
static void assert_control_or_vm_thread_at_safepoint(bool at_safepoint, const char* file, int line);
static void assert_generational(const char* file, int line);
+ static void assert_generations_reconciled(const char* file, int line);
// Given a possibly invalid oop, extract narrowKlass (if UCCP) and Klass*
// from it safely.
@@ -135,9 +133,6 @@ class ShenandoahAsserts {
#define shenandoah_assert_marked_strong(interior_loc, obj) \
ShenandoahAsserts::assert_marked_strong(interior_loc, obj, __FILE__, __LINE__)
-#define shenandoah_assert_mark_complete(obj) \
- ShenandoahAsserts::assert_mark_complete(obj, __FILE__, __LINE__)
-
#define shenandoah_assert_in_cset_if(interior_loc, obj, condition) \
if (condition) ShenandoahAsserts::assert_in_cset(interior_loc, obj, __FILE__, __LINE__)
#define shenandoah_assert_in_cset_except(interior_loc, obj, exception) \
@@ -189,6 +184,10 @@ class ShenandoahAsserts {
#define shenandoah_assert_generational() \
ShenandoahAsserts::assert_generational(__FILE__, __LINE__)
+// Some limited sanity checking of the _gc_generation and _active_generation fields of ShenandoahHeap
+#define shenandoah_assert_generations_reconciled() \
+ ShenandoahAsserts::assert_generations_reconciled(__FILE__, __LINE__)
+
#else
#define shenandoah_assert_in_heap_bounds(interior_loc, obj)
#define shenandoah_assert_in_heap_bounds_or_null(interior_loc, obj)
@@ -218,8 +217,6 @@ class ShenandoahAsserts {
#define shenandoah_assert_marked_strong_except(interior_loc, obj, exception)
#define shenandoah_assert_marked_strong(interior_loc, obj)
-#define shenandoah_assert_mark_complete(obj)
-
#define shenandoah_assert_in_cset_if(interior_loc, obj, condition)
#define shenandoah_assert_in_cset_except(interior_loc, obj, exception)
#define shenandoah_assert_in_cset(interior_loc, obj)
@@ -244,6 +241,7 @@ class ShenandoahAsserts {
#define shenandoah_assert_control_or_vm_thread()
#define shenandoah_assert_control_or_vm_thread_at_safepoint()
#define shenandoah_assert_generational()
+#define shenandoah_assert_generations_reconciled()
#endif
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
index 2aa37d7c575ab..5d19a6a34e31e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
@@ -89,20 +89,10 @@ bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators, Basi
void ShenandoahBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
#if COMPILER2_OR_JVMCI
- if (ReduceInitialCardMarks && ShenandoahCardBarrier && !ShenandoahHeap::heap()->is_in_young(new_obj)) {
- log_debug(gc)("Newly allocated object (" PTR_FORMAT ") is not in the young generation", p2i(new_obj));
- // This can happen when an object is newly allocated, but we come to a safepoint before returning
- // the object. If the safepoint runs a degenerated cycle that is upgraded to a full GC, this object
- // will have survived two GC cycles. If the tenuring age is very low (1), this object may be promoted.
- // In this case, we have an allocated object, but it has received no stores yet. If card marking barriers
- // have been elided, we could end up with an object in old holding pointers to young that won't be in
- // the remembered set. The solution here is conservative, but this problem should be rare, and it will
- // correct itself on subsequent cycles when the remembered set is updated.
- ShenandoahGenerationalHeap::heap()->old_generation()->card_scan()->mark_range_as_dirty(
- cast_from_oop(new_obj), new_obj->size()
- );
- }
+ assert(!ReduceInitialCardMarks || !ShenandoahCardBarrier || ShenandoahGenerationalHeap::heap()->is_in_young(new_obj),
+ "Allocating new object outside of young generation: " INTPTR_FORMAT, p2i(new_obj));
#endif // COMPILER2_OR_JVMCI
+ assert(thread->deferred_card_mark().is_empty(), "We don't use this");
}
void ShenandoahBarrierSet::on_thread_create(Thread* thread) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
index 9613422496af6..456b9fe6d3c85 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -91,8 +91,8 @@ class ShenandoahBreakpointMarkScope : public StackObj {
};
ShenandoahConcurrentGC::ShenandoahConcurrentGC(ShenandoahGeneration* generation, bool do_old_gc_bootstrap) :
- ShenandoahGC(generation),
_mark(generation),
+ _generation(generation),
_degen_point(ShenandoahDegenPoint::_degenerated_unset),
_abbreviated(false),
_do_old_gc_bootstrap(do_old_gc_bootstrap) {
@@ -576,7 +576,7 @@ void ShenandoahConcurrentGC::entry_promote_in_place() const {
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::promote_in_place);
EventMark em("%s", "Promote in place");
- ShenandoahGenerationalHeap::heap()->promote_regions_in_place(_generation, true);
+ ShenandoahGenerationalHeap::heap()->promote_regions_in_place(true);
}
void ShenandoahConcurrentGC::entry_update_thread_roots() {
@@ -706,7 +706,7 @@ void ShenandoahConcurrentGC::op_init_mark() {
if (ShenandoahVerify) {
ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_mark_verify);
- heap->verifier()->verify_before_concmark(_generation);
+ heap->verifier()->verify_before_concmark();
}
if (VerifyBeforeGC) {
@@ -763,7 +763,7 @@ void ShenandoahConcurrentGC::op_final_mark() {
assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
if (ShenandoahVerify) {
- heap->verifier()->verify_roots_no_forwarded(_generation);
+ heap->verifier()->verify_roots_no_forwarded();
}
if (!heap->cancelled_gc()) {
@@ -791,7 +791,7 @@ void ShenandoahConcurrentGC::op_final_mark() {
if (ShenandoahVerify) {
ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
- heap->verifier()->verify_before_evacuation(_generation);
+ heap->verifier()->verify_before_evacuation();
}
heap->set_evacuation_in_progress(true);
@@ -806,9 +806,9 @@ void ShenandoahConcurrentGC::op_final_mark() {
if (ShenandoahVerify) {
ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_mark_verify);
if (has_in_place_promotions(heap)) {
- heap->verifier()->verify_after_concmark_with_promotions(_generation);
+ heap->verifier()->verify_after_concmark_with_promotions();
} else {
- heap->verifier()->verify_after_concmark(_generation);
+ heap->verifier()->verify_after_concmark();
}
}
}
@@ -877,20 +877,18 @@ void ShenandoahConcurrentGC::op_weak_refs() {
class ShenandoahEvacUpdateCleanupOopStorageRootsClosure : public BasicOopIterateClosure {
private:
ShenandoahHeap* const _heap;
- ShenandoahGeneration* const _generation;
ShenandoahMarkingContext* const _mark_context;
bool _evac_in_progress;
Thread* const _thread;
public:
- explicit ShenandoahEvacUpdateCleanupOopStorageRootsClosure(ShenandoahGeneration* generation);
+ ShenandoahEvacUpdateCleanupOopStorageRootsClosure();
void do_oop(oop* p);
void do_oop(narrowOop* p);
};
-ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure(ShenandoahGeneration* generation) :
+ShenandoahEvacUpdateCleanupOopStorageRootsClosure::ShenandoahEvacUpdateCleanupOopStorageRootsClosure() :
_heap(ShenandoahHeap::heap()),
- _generation(generation),
_mark_context(ShenandoahHeap::heap()->marking_context()),
_evac_in_progress(ShenandoahHeap::heap()->is_evacuation_in_progress()),
_thread(Thread::current()) {
@@ -900,7 +898,8 @@ void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
const oop obj = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(obj)) {
if (!_mark_context->is_marked(obj)) {
- if (_generation->contains(obj)) {
+ shenandoah_assert_generations_reconciled();
+ if (_heap->is_in_active_generation(obj)) {
// Note: The obj is dead here. Do not touch it, just clear.
ShenandoahHeap::atomic_clear_oop(p, obj);
}
@@ -943,16 +942,14 @@ class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
ShenandoahClassLoaderDataRoots
_cld_roots;
ShenandoahConcurrentNMethodIterator _nmethod_itr;
- ShenandoahGeneration* _generation;
ShenandoahPhaseTimings::Phase _phase;
public:
- ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahGeneration* generation, ShenandoahPhaseTimings::Phase phase) :
+ ShenandoahConcurrentWeakRootsEvacUpdateTask(ShenandoahPhaseTimings::Phase phase) :
WorkerTask("Shenandoah Evacuate/Update Concurrent Weak Roots"),
_vm_roots(phase),
_cld_roots(phase, ShenandoahHeap::heap()->workers()->active_workers(), false /*heap iteration*/),
_nmethod_itr(ShenandoahCodeRoots::table()),
- _generation(generation),
_phase(phase) {}
~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
@@ -960,14 +957,14 @@ class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
_vm_roots.report_num_dead();
}
- void work(uint worker_id) override {
+ void work(uint worker_id) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner sts_join;
{
ShenandoahEvacOOMScope oom;
// jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
// may race against OopStorage::release() calls.
- ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl(_generation);
+ ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
_vm_roots.oops_do(&cl, worker_id);
}
@@ -1002,7 +999,7 @@ void ShenandoahConcurrentGC::op_weak_roots() {
// Concurrent weak root processing
ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
- ShenandoahConcurrentWeakRootsEvacUpdateTask task(_generation, ShenandoahPhaseTimings::conc_weak_roots_work);
+ ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
heap->workers()->run_task(&task);
}
@@ -1108,19 +1105,19 @@ void ShenandoahConcurrentGC::op_cleanup_early() {
}
void ShenandoahConcurrentGC::op_evacuate() {
- ShenandoahHeap::heap()->evacuate_collection_set(_generation, true /*concurrent*/);
+ ShenandoahHeap::heap()->evacuate_collection_set(true /*concurrent*/);
}
void ShenandoahConcurrentGC::op_init_update_refs() {
+ ShenandoahHeap* const heap = ShenandoahHeap::heap();
if (ShenandoahVerify) {
- ShenandoahHeap* const heap = ShenandoahHeap::heap();
ShenandoahTimingsTracker v(ShenandoahPhaseTimings::init_update_refs_verify);
- heap->verifier()->verify_before_update_refs(_generation);
+ heap->verifier()->verify_before_update_refs();
}
}
void ShenandoahConcurrentGC::op_update_refs() {
- ShenandoahHeap::heap()->update_heap_references(_generation, true /*concurrent*/);
+ ShenandoahHeap::heap()->update_heap_references(true /*concurrent*/);
}
class ShenandoahUpdateThreadHandshakeClosure : public HandshakeClosure {
@@ -1166,7 +1163,7 @@ void ShenandoahConcurrentGC::op_final_update_refs() {
// Has to be done before cset is clear
if (ShenandoahVerify) {
- heap->verifier()->verify_roots_in_to_space(_generation);
+ heap->verifier()->verify_roots_in_to_space();
}
// If we are running in generational mode and this is an aging cycle, this will also age active
@@ -1201,7 +1198,7 @@ void ShenandoahConcurrentGC::op_final_update_refs() {
if (ShenandoahVerify) {
ShenandoahTimingsTracker v(ShenandoahPhaseTimings::final_update_refs_verify);
- heap->verifier()->verify_after_update_refs(_generation);
+ heap->verifier()->verify_after_update_refs();
}
if (VerifyAfterGC) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp
index 54d43416fdb3a..d81c49363a230 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.hpp
@@ -47,6 +47,7 @@ class ShenandoahConcurrentGC : public ShenandoahGC {
protected:
ShenandoahConcurrentMark _mark;
+ ShenandoahGeneration* const _generation;
private:
ShenandoahDegenPoint _degen_point;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
index 7a195f64cbd51..005d6c42f8c55 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp
@@ -56,11 +56,18 @@ class ShenandoahConcurrentMarkingTask : public WorkerTask {
}
void work(uint worker_id) {
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::conc_mark, ShenandoahPhaseTimings::ParallelMark, worker_id, true);
ShenandoahSuspendibleThreadSetJoiner stsj;
+ // Do not use active_generation() : we must use the gc_generation() set by
+ // ShenandoahGCScope on the ControllerThread's stack; no safepoint may
+ // intervene to update active_generation, so we can't
+ // shenandoah_assert_generations_reconciled() here.
+ ShenandoahReferenceProcessor* rp = heap->gc_generation()->ref_processor();
+ assert(rp != nullptr, "need reference processor");
StringDedup::Requests requests;
- _cm->mark_loop(worker_id, _terminator, GENERATION, true /*cancellable*/,
+ _cm->mark_loop(worker_id, _terminator, rp, GENERATION, true /*cancellable*/,
ShenandoahStringDedup::is_enabled() ? ENQUEUE_DEDUP : NO_DEDUP,
&requests);
}
@@ -99,6 +106,9 @@ class ShenandoahFinalMarkingTask : public WorkerTask {
ShenandoahParallelWorkerSession worker_session(worker_id);
StringDedup::Requests requests;
+ ShenandoahReferenceProcessor* rp = heap->gc_generation()->ref_processor();
+ shenandoah_assert_generations_reconciled();
+
// First drain remaining SATB buffers.
{
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
@@ -112,7 +122,7 @@ class ShenandoahFinalMarkingTask : public WorkerTask {
ShenandoahSATBAndRemarkThreadsClosure tc(satb_mq_set);
Threads::possibly_parallel_threads_do(true /* is_par */, &tc);
}
- _cm->mark_loop(worker_id, _terminator, GENERATION, false /*not cancellable*/,
+ _cm->mark_loop(worker_id, _terminator, rp, GENERATION, false /*not cancellable*/,
_dedup_string ? ENQUEUE_DEDUP : NO_DEDUP,
&requests);
assert(_cm->task_queues()->is_empty(), "Should be empty");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
index 2b791619d2e62..b918bf67b34b7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
@@ -46,8 +46,9 @@
#include "utilities/events.hpp"
ShenandoahDegenGC::ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation) :
- ShenandoahGC(generation),
+ ShenandoahGC(),
_degen_point(degen_point),
+ _generation(generation),
_abbreviated(false) {
}
@@ -259,7 +260,7 @@ void ShenandoahDegenGC::op_degenerated() {
} else if (has_in_place_promotions(heap)) {
// We have nothing to evacuate, but there are still regions to promote in place.
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_promote_regions);
- ShenandoahGenerationalHeap::heap()->promote_regions_in_place(_generation, false /* concurrent*/);
+ ShenandoahGenerationalHeap::heap()->promote_regions_in_place(false /* concurrent*/);
}
// Update collector state regardless of whether there are forwarded objects
@@ -299,7 +300,7 @@ void ShenandoahDegenGC::op_degenerated() {
}
if (ShenandoahVerify) {
- heap->verifier()->verify_after_degenerated(_generation);
+ heap->verifier()->verify_after_degenerated();
}
if (VerifyAfterGC) {
@@ -336,11 +337,11 @@ void ShenandoahDegenGC::op_finish_mark() {
void ShenandoahDegenGC::op_prepare_evacuation() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
if (ShenandoahVerify) {
- heap->verifier()->verify_roots_no_forwarded(_generation);
+ heap->verifier()->verify_roots_no_forwarded();
}
// STW cleanup weak roots and unload classes
- heap->parallel_cleaning(_generation, false /*full gc*/);
+ heap->parallel_cleaning(false /*full gc*/);
// Prepare regions and collection set
_generation->prepare_regions_and_collection_set(false /*concurrent*/);
@@ -357,7 +358,7 @@ void ShenandoahDegenGC::op_prepare_evacuation() {
if (!heap->collection_set()->is_empty()) {
if (ShenandoahVerify) {
- heap->verifier()->verify_before_evacuation(_generation);
+ heap->verifier()->verify_before_evacuation();
}
heap->set_evacuation_in_progress(true);
@@ -365,9 +366,9 @@ void ShenandoahDegenGC::op_prepare_evacuation() {
} else {
if (ShenandoahVerify) {
if (has_in_place_promotions(heap)) {
- heap->verifier()->verify_after_concmark_with_promotions(_generation);
+ heap->verifier()->verify_after_concmark_with_promotions();
} else {
- heap->verifier()->verify_after_concmark(_generation);
+ heap->verifier()->verify_after_concmark();
}
}
@@ -387,7 +388,7 @@ void ShenandoahDegenGC::op_cleanup_early() {
void ShenandoahDegenGC::op_evacuate() {
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_stw_evac);
- ShenandoahHeap::heap()->evacuate_collection_set(_generation, false /* concurrent*/);
+ ShenandoahHeap::heap()->evacuate_collection_set(false /* concurrent*/);
}
void ShenandoahDegenGC::op_init_update_refs() {
@@ -401,7 +402,7 @@ void ShenandoahDegenGC::op_update_refs() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_update_refs);
// Handed over from concurrent update references phase
- heap->update_heap_references(_generation, false /*concurrent*/);
+ heap->update_heap_references(false /*concurrent*/);
heap->set_update_refs_in_progress(false);
heap->set_has_forwarded_objects(false);
@@ -415,7 +416,7 @@ void ShenandoahDegenGC::op_update_roots() {
heap->update_heap_region_states(false /*concurrent*/);
if (ShenandoahVerify) {
- heap->verifier()->verify_after_update_refs(_generation);
+ heap->verifier()->verify_after_update_refs();
}
if (VerifyAfterGC) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp
index 34b9688106c90..971bd67eb0df8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.hpp
@@ -34,11 +34,12 @@ class ShenandoahDegenGC : public ShenandoahGC {
friend class VM_ShenandoahDegeneratedGC;
private:
const ShenandoahDegenPoint _degen_point;
+ ShenandoahGeneration* _generation;
bool _abbreviated;
public:
ShenandoahDegenGC(ShenandoahDegenPoint degen_point, ShenandoahGeneration* generation);
- bool collect(GCCause::Cause cause) override;
+ bool collect(GCCause::Cause cause);
private:
void vmop_degenerated();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index 78218f5e403f8..2e486a23363ea 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -68,7 +68,6 @@
#include "utilities/growableArray.hpp"
ShenandoahFullGC::ShenandoahFullGC() :
- ShenandoahGC(ShenandoahHeap::heap()->global_generation()),
_gc_timer(ShenandoahHeap::heap()->gc_timer()),
_preserved_marks(new PreservedMarksSet(true)) {}
@@ -125,7 +124,7 @@ void ShenandoahFullGC::op_full(GCCause::Cause cause) {
}
// Regardless if progress was made, we record that we completed a "successful" full GC.
- _generation->heuristics()->record_success_full();
+ heap->global_generation()->heuristics()->record_success_full();
heap->shenandoah_policy()->record_success_full();
{
@@ -142,7 +141,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
}
if (ShenandoahVerify) {
- heap->verifier()->verify_before_fullgc(_generation);
+ heap->verifier()->verify_before_fullgc();
}
if (VerifyBeforeGC) {
@@ -195,7 +194,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
}
// d. Abandon reference discovery and clear all discovered references.
- ShenandoahReferenceProcessor* rp = _generation->ref_processor();
+ ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
rp->abandon_partial_discovery();
// e. Sync pinned region status from the CP marks
@@ -274,7 +273,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
heap->set_full_gc_in_progress(false);
if (ShenandoahVerify) {
- heap->verifier()->verify_after_fullgc(_generation);
+ heap->verifier()->verify_after_fullgc();
}
if (VerifyAfterGC) {
@@ -293,19 +292,19 @@ void ShenandoahFullGC::phase1_mark_heap() {
ShenandoahHeap* heap = ShenandoahHeap::heap();
- _generation->reset_mark_bitmap();
+ heap->global_generation()->reset_mark_bitmap();
assert(heap->marking_context()->is_bitmap_clear(), "sanity");
- assert(!_generation->is_mark_complete(), "sanity");
+ assert(!heap->global_generation()->is_mark_complete(), "sanity");
- heap->set_unload_classes(_generation->heuristics()->can_unload_classes());
+ heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
- ShenandoahReferenceProcessor* rp = _generation->ref_processor();
+ ShenandoahReferenceProcessor* rp = heap->global_generation()->ref_processor();
// enable ("weak") refs discovery
rp->set_soft_reference_policy(true); // forcefully purge all soft references
- ShenandoahSTWMark mark(_generation, true /*full_gc*/);
+ ShenandoahSTWMark mark(heap->global_generation(), true /*full_gc*/);
mark.mark();
- heap->parallel_cleaning(_generation, true /* full_gc */);
+ heap->parallel_cleaning(true /* full_gc */);
if (ShenandoahHeap::heap()->mode()->is_generational()) {
ShenandoahGenerationalFullGC::log_live_in_old(heap);
@@ -351,12 +350,10 @@ class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure {
return _empty_regions_pos;
}
- void do_object(oop p) override {
- shenandoah_assert_mark_complete(cast_from_oop(p));
+ void do_object(oop p) {
assert(_from_region != nullptr, "must set before work");
- assert(_heap->global_generation()->is_mark_complete(), "marking must be finished");
- assert(_heap->marking_context()->is_marked(p), "must be marked");
- assert(!_heap->marking_context()->allocated_after_mark_start(p), "must be truly marked");
+ assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
+ assert(!_heap->gc_generation()->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
size_t obj_size = p->size();
if (_compact_point + obj_size > _to_region->end()) {
@@ -526,8 +523,12 @@ void ShenandoahFullGC::calculate_target_humongous_objects() {
}
class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure {
+private:
+ ShenandoahHeap* const _heap;
+
public:
- void heap_region_do(ShenandoahHeapRegion* r) override {
+ ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {}
+ void heap_region_do(ShenandoahHeapRegion* r) {
if (r->is_trash()) {
r->try_recycle_under_lock();
}
@@ -759,6 +760,7 @@ void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet
class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure {
private:
+ ShenandoahHeap* const _heap;
ShenandoahMarkingContext* const _ctx;
template
@@ -776,7 +778,8 @@ class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure
public:
ShenandoahAdjustPointersClosure() :
- _ctx(ShenandoahHeap::heap()->global_generation()->complete_marking_context()) {}
+ _heap(ShenandoahHeap::heap()),
+ _ctx(ShenandoahHeap::heap()->gc_generation()->complete_marking_context()) {}
void do_oop(oop* p) { do_oop_work(p); }
void do_oop(narrowOop* p) { do_oop_work(p); }
@@ -786,12 +789,15 @@ class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure
class ShenandoahAdjustPointersObjectClosure : public ObjectClosure {
private:
+ ShenandoahHeap* const _heap;
ShenandoahAdjustPointersClosure _cl;
public:
- void do_object(oop p) override {
- assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be complete");
- assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
+ ShenandoahAdjustPointersObjectClosure() :
+ _heap(ShenandoahHeap::heap()) {
+ }
+ void do_object(oop p) {
+ assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
p->oop_iterate(&_cl);
}
};
@@ -807,7 +813,7 @@ class ShenandoahAdjustPointersTask : public WorkerTask {
_heap(ShenandoahHeap::heap()) {
}
- void work(uint worker_id) override {
+ void work(uint worker_id) {
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahAdjustPointersObjectClosure obj_cl;
ShenandoahHeapRegion* r = _regions.next();
@@ -833,7 +839,7 @@ class ShenandoahAdjustRootPointersTask : public WorkerTask {
_rp(rp),
_preserved_marks(preserved_marks) {}
- void work(uint worker_id) override {
+ void work(uint worker_id) {
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahAdjustPointersClosure cl;
_rp->roots_do(worker_id, &cl);
@@ -867,15 +873,15 @@ void ShenandoahFullGC::phase3_update_references() {
class ShenandoahCompactObjectsClosure : public ObjectClosure {
private:
- uint const _worker_id;
+ ShenandoahHeap* const _heap;
+ uint const _worker_id;
public:
- explicit ShenandoahCompactObjectsClosure(uint worker_id) :
- _worker_id(worker_id) {}
+ ShenandoahCompactObjectsClosure(uint worker_id) :
+ _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {}
- void do_object(oop p) override {
- assert(ShenandoahHeap::heap()->global_generation()->is_mark_complete(), "marking must be finished");
- assert(ShenandoahHeap::heap()->marking_context()->is_marked(p), "must be marked");
+ void do_object(oop p) {
+ assert(_heap->gc_generation()->complete_marking_context()->is_marked(p), "must be marked");
size_t size = p->size();
if (FullGCForwarding::is_forwarded(p)) {
HeapWord* compact_from = cast_from_oop(p);
@@ -902,7 +908,7 @@ class ShenandoahCompactObjectsTask : public WorkerTask {
_worker_slices(worker_slices) {
}
- void work(uint worker_id) override {
+ void work(uint worker_id) {
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]);
@@ -939,7 +945,7 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
_heap->free_set()->clear();
}
- void heap_region_do(ShenandoahHeapRegion* r) override {
+ void heap_region_do(ShenandoahHeapRegion* r) {
assert (!r->is_cset(), "cset regions should have been demoted already");
// Need to reset the complete-top-at-mark-start pointer here because
@@ -948,7 +954,7 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
// NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip
// pinned regions.
if (!r->is_pinned()) {
- _heap->marking_context()->reset_top_at_mark_start(r);
+ _heap->gc_generation()->complete_marking_context()->reset_top_at_mark_start(r);
}
size_t live = r->used();
@@ -1073,7 +1079,7 @@ void ShenandoahFullGC::compact_humongous_objects() {
// we need to remain able to walk pinned regions.
// Since pinned region do not move and don't get compacted, we will get holes with
// unreachable objects in them (which may have pointers to unloaded Klasses and thus
-// cannot be iterated over using oop->size()). The only way to safely iterate over those is using
+// cannot be iterated over using oop->size(). The only way to safely iterate over those is using
// a valid marking bitmap and valid TAMS pointer. This class only resets marking
// bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions.
class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
@@ -1085,12 +1091,11 @@ class ShenandoahMCResetCompleteBitmapTask : public WorkerTask {
WorkerTask("Shenandoah Reset Bitmap") {
}
- void work(uint worker_id) override {
+ void work(uint worker_id) {
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahHeapRegion* region = _regions.next();
ShenandoahHeap* heap = ShenandoahHeap::heap();
- ShenandoahMarkingContext* const ctx = heap->marking_context();
- assert(heap->global_generation()->is_mark_complete(), "Marking must be complete");
+ ShenandoahMarkingContext* const ctx = heap->gc_generation()->complete_marking_context();
while (region != nullptr) {
if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
ctx->clear_bitmap(region);
@@ -1158,7 +1163,7 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() {
heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
// Set mark incomplete because the marking bitmaps have been reset except pinned regions.
- _generation->set_mark_incomplete();
+ heap->global_generation()->set_mark_incomplete();
heap->clear_cancelled_gc();
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp
index 8b8244f2ce3ab..b0b8c7bf0c599 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp
@@ -68,7 +68,7 @@ class ShenandoahFullGC : public ShenandoahGC {
public:
ShenandoahFullGC();
~ShenandoahFullGC();
- bool collect(GCCause::Cause cause) override;
+ bool collect(GCCause::Cause cause);
private:
// GC entries
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp
index 7182665f2e3a1..f08bdce0a2025 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGC.hpp
@@ -44,8 +44,6 @@
* Full GC --------> (finish)
*/
-class ShenandoahGeneration;
-
class ShenandoahGC : public StackObj {
public:
// Fail point from concurrent GC
@@ -59,17 +57,12 @@ class ShenandoahGC : public StackObj {
_DEGENERATED_LIMIT
};
- explicit ShenandoahGC(ShenandoahGeneration* generation) : _generation(generation) {}
-
// Returns false if the collection was cancelled, true otherwise.
virtual bool collect(GCCause::Cause cause) = 0;
static const char* degen_point_to_string(ShenandoahDegenPoint point);
- ShenandoahGeneration* generation() const { return _generation; }
protected:
static void update_roots(bool full_gc);
-
- ShenandoahGeneration* _generation;
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHGC_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
index d2e25176c1fce..e6597b3c1e49c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
@@ -142,11 +142,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo {
size_t soft_available() const override;
size_t bytes_allocated_since_gc_start() const override;
-
- // Reset the bytes allocated within this generation since the start of GC. The argument initial_bytes_allocated
- // is normally zero. In the case that some memory was allocated following the last allocation rate sample that
- // precedes the start of GC, the number of bytes allocated is supplied as the initial value of bytes_allocated_since_gc_start.
- // We will behave as if these bytes were allocated after the start of GC.
void reset_bytes_allocated_since_gc_start(size_t initial_bytes_allocated);
void increase_allocated(size_t bytes);
@@ -203,7 +198,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo {
bool is_bitmap_clear();
// We need to track the status of marking for different generations.
- bool is_mark_complete() const { return _is_marking_complete.is_set(); }
+ bool is_mark_complete() { return _is_marking_complete.is_set(); }
virtual void set_mark_complete();
virtual void set_mark_incomplete();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
index ccabdb7b9daa9..971129beea807 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
@@ -50,12 +50,10 @@ class ShenandoahConcurrentEvacuator : public ObjectClosure {
};
ShenandoahGenerationalEvacuationTask::ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* heap,
- ShenandoahGeneration* generation,
ShenandoahRegionIterator* iterator,
bool concurrent, bool only_promote_regions) :
WorkerTask("Shenandoah Evacuation"),
_heap(heap),
- _generation(generation),
_regions(iterator),
_concurrent(concurrent),
_only_promote_regions(only_promote_regions)
@@ -171,12 +169,13 @@ void ShenandoahGenerationalEvacuationTask::maybe_promote_region(ShenandoahHeapRe
// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting pointers"
// contained herein.
void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
- assert(!_generation->is_old(), "Sanity check");
+ assert(!_heap->gc_generation()->is_old(), "Sanity check");
ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context();
HeapWord* const tams = marking_context->top_at_mark_start(region);
{
const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
+ shenandoah_assert_generations_reconciled();
assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region %zu has too much garbage for promotion", region->index());
assert(region->is_young(), "Only young regions can be promoted");
@@ -260,7 +259,8 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion
void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
ShenandoahMarkingContext* marking_context = _heap->marking_context();
oop obj = cast_to_oop(region->bottom());
- assert(_generation->is_mark_complete(), "sanity");
+ assert(_heap->gc_generation()->is_mark_complete(), "sanity");
+ shenandoah_assert_generations_reconciled();
assert(region->is_young(), "Only young regions can be promoted");
assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
assert(_heap->is_tenurable(region), "Only promote regions that are sufficiently aged");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
index de47184ffffc3..0c402d6c90a58 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
@@ -36,14 +36,12 @@ class ShenandoahRegionIterator;
class ShenandoahGenerationalEvacuationTask : public WorkerTask {
private:
ShenandoahGenerationalHeap* const _heap;
- ShenandoahGeneration* const _generation;
ShenandoahRegionIterator* _regions;
bool _concurrent;
bool _only_promote_regions;
public:
ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* sh,
- ShenandoahGeneration* generation,
ShenandoahRegionIterator* iterator,
bool concurrent, bool only_promote_regions);
void work(uint worker_id) override;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp
index 8d8091472fcbb..c4a7408e03243 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp
@@ -53,7 +53,8 @@ void assert_usage_not_more_than_regions_used(ShenandoahGeneration* generation) {
void ShenandoahGenerationalFullGC::prepare() {
auto heap = ShenandoahGenerationalHeap::heap();
// Since we may arrive here from degenerated GC failure of either young or old, establish generation as GLOBAL.
- heap->set_active_generation(heap->global_generation());
+ heap->set_gc_generation(heap->global_generation());
+ heap->set_active_generation();
// No need for old_gen->increase_used() as this was done when plabs were allocated.
heap->reset_generation_reserves();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
index bc653b030a8ca..34f217ada250b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
@@ -178,15 +178,15 @@ bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
return false;
}
-void ShenandoahGenerationalHeap::evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) {
+void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
ShenandoahRegionIterator regions;
- ShenandoahGenerationalEvacuationTask task(this, generation, ®ions, concurrent, false /* only promote regions */);
+ ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, false /* only promote regions */);
workers()->run_task(&task);
}
-void ShenandoahGenerationalHeap::promote_regions_in_place(ShenandoahGeneration* generation, bool concurrent) {
+void ShenandoahGenerationalHeap::promote_regions_in_place(bool concurrent) {
ShenandoahRegionIterator regions;
- ShenandoahGenerationalEvacuationTask task(this, generation, ®ions, concurrent, true /* only promote regions */);
+ ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, true /* only promote regions */);
workers()->run_task(&task);
}
@@ -757,27 +757,23 @@ void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent)
template
class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
private:
- // For update refs, _generation will be young or global. Mixed collections use the young generation.
- ShenandoahGeneration* _generation;
ShenandoahGenerationalHeap* _heap;
ShenandoahRegionIterator* _regions;
ShenandoahRegionChunkIterator* _work_chunks;
public:
- ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahGeneration* generation,
- ShenandoahRegionIterator* regions,
- ShenandoahRegionChunkIterator* work_chunks) :
+ explicit ShenandoahGenerationalUpdateHeapRefsTask(ShenandoahRegionIterator* regions,
+ ShenandoahRegionChunkIterator* work_chunks) :
WorkerTask("Shenandoah Update References"),
- _generation(generation),
_heap(ShenandoahGenerationalHeap::heap()),
_regions(regions),
_work_chunks(work_chunks)
{
- const bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
+ bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
log_debug(gc, remset)("Update refs, scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
}
- void work(uint worker_id) override {
+ void work(uint worker_id) {
if (CONCURRENT) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj;
@@ -807,8 +803,10 @@ class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
// If !CONCURRENT, there's no value in expanding Mutator free set
ShenandoahHeapRegion* r = _regions->next();
- // We update references for global, mixed, and young collections.
- assert(_generation->is_mark_complete(), "Expected complete marking");
+ // We update references for global, old, and young collections.
+ ShenandoahGeneration* const gc_generation = _heap->gc_generation();
+ shenandoah_assert_generations_reconciled();
+ assert(gc_generation->is_mark_complete(), "Expected complete marking");
ShenandoahMarkingContext* const ctx = _heap->marking_context();
bool is_mixed = _heap->collection_set()->has_old_regions();
while (r != nullptr) {
@@ -820,7 +818,7 @@ class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
if (r->is_young()) {
_heap->marked_object_oop_iterate(r, &cl, update_watermark);
} else if (r->is_old()) {
- if (_generation->is_global()) {
+ if (gc_generation->is_global()) {
_heap->marked_object_oop_iterate(r, &cl, update_watermark);
}
@@ -849,7 +847,7 @@ class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
r = _regions->next();
}
- if (_generation->is_young()) {
+ if (!gc_generation->is_global()) {
// Since this is generational and not GLOBAL, we have to process the remembered set. There's no remembered
// set processing if not in generational mode or if GLOBAL mode.
@@ -963,15 +961,15 @@ class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
}
};
-void ShenandoahGenerationalHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) {
+void ShenandoahGenerationalHeap::update_heap_references(bool concurrent) {
assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
const uint nworkers = workers()->active_workers();
ShenandoahRegionChunkIterator work_list(nworkers);
if (concurrent) {
- ShenandoahGenerationalUpdateHeapRefsTask task(generation, &_update_refs_iterator, &work_list);
+ ShenandoahGenerationalUpdateHeapRefsTask task(&_update_refs_iterator, &work_list);
workers()->run_task(&task);
} else {
- ShenandoahGenerationalUpdateHeapRefsTask task(generation, &_update_refs_iterator, &work_list);
+ ShenandoahGenerationalUpdateHeapRefsTask task(&_update_refs_iterator, &work_list);
workers()->run_task(&task);
}
@@ -1046,7 +1044,7 @@ class ShenandoahUpdateRegionAges : public ShenandoahHeapRegionClosure {
void ShenandoahGenerationalHeap::final_update_refs_update_region_states() {
ShenandoahSynchronizePinnedRegionStates pins;
- ShenandoahUpdateRegionAges ages(marking_context());
+ ShenandoahUpdateRegionAges ages(active_generation()->complete_marking_context());
auto cl = ShenandoahCompositeRegionClosure::of(pins, ages);
parallel_heap_region_iterate(&cl);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
index d3584a6f9a01a..6960562b31d80 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
@@ -88,11 +88,8 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap {
oop evacuate_object(oop p, Thread* thread) override;
oop try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
-
- // In the generational mode, we will use these two functions for young, mixed, and global collections.
- // For young and mixed, the generation argument will be the young generation, otherwise it will be the global generation.
- void evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) override;
- void promote_regions_in_place(ShenandoahGeneration* generation, bool concurrent);
+ void evacuate_collection_set(bool concurrent) override;
+ void promote_regions_in_place(bool concurrent);
size_t plab_min_size() const { return _min_plab_size; }
size_t plab_max_size() const { return _max_plab_size; }
@@ -102,9 +99,7 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap {
// ---------- Update References
//
- // In the generational mode, we will use this function for young, mixed, and global collections.
- // For young and mixed, the generation argument will be the young generation, otherwise it will be the global generation.
- void update_heap_references(ShenandoahGeneration* generation, bool concurrent) override;
+ void update_heap_references(bool concurrent) override;
void final_update_refs_update_region_states() override;
private:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index cb22c794d8554..b2fd32d2fd0b9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -529,6 +529,7 @@ void ShenandoahHeap::initialize_heuristics() {
ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
CollectedHeap(),
+ _gc_generation(nullptr),
_active_generation(nullptr),
_initial_size(0),
_committed(0),
@@ -1256,8 +1257,7 @@ class ShenandoahPrepareForUpdateRefsHandshakeClosure : public HandshakeClosure {
ShenandoahGCStatePropagatorHandshakeClosure _propagator;
};
-void ShenandoahHeap::evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent) {
- assert(generation->is_global(), "Only global generation expected here");
+void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
ShenandoahEvacuationTask task(this, _collection_set, concurrent);
workers()->run_task(&task);
}
@@ -1576,8 +1576,8 @@ void ShenandoahHeap::collect_as_vm_thread(GCCause::Cause cause) {
// cycle. We _could_ cancel the concurrent cycle and then try to run a cycle directly
// on the VM thread, but this would confuse the control thread mightily and doesn't
// seem worth the trouble. Instead, we will have the caller thread run (and wait for) a
- // concurrent cycle in the prologue of the heap inspect/dump operation (see VM_HeapDumper::doit_prologue).
- // This is how other concurrent collectors in the JVM handle this scenario as well.
+ // concurrent cycle in the prologue of the heap inspect/dump operation. This is how
+ // other concurrent collectors in the JVM handle this scenario as well.
assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
guarantee(cause == GCCause::_heap_dump || cause == GCCause::_heap_inspection, "Invalid cause");
}
@@ -1587,10 +1587,7 @@ void ShenandoahHeap::collect(GCCause::Cause cause) {
}
void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
- // This method is only called by `CollectedHeap::collect_as_vm_thread`, which we have
- // overridden to do nothing. See the comment there for an explanation of how heap inspections
- // work for Shenandoah.
- ShouldNotReachHere();
+ //assert(false, "Shouldn't need to do full collections");
}
HeapWord* ShenandoahHeap::block_start(const void* addr) const {
@@ -1659,11 +1656,17 @@ void ShenandoahHeap::print_tracing_info() const {
}
}
+void ShenandoahHeap::set_gc_generation(ShenandoahGeneration* generation) {
+ shenandoah_assert_control_or_vm_thread_at_safepoint();
+ _gc_generation = generation;
+}
+
// Active generation may only be set by the VM thread at a safepoint.
-void ShenandoahHeap::set_active_generation(ShenandoahGeneration* generation) {
+void ShenandoahHeap::set_active_generation() {
assert(Thread::current()->is_VM_thread(), "Only the VM Thread");
assert(SafepointSynchronize::is_at_safepoint(), "Only at a safepoint!");
- _active_generation = generation;
+ assert(_gc_generation != nullptr, "Will set _active_generation to nullptr");
+ _active_generation = _gc_generation;
}
void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation) {
@@ -1672,14 +1675,17 @@ void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration*
const GCCause::Cause current = gc_cause();
assert(current == GCCause::_no_gc, "Over-writing cause: %s, with: %s",
GCCause::to_string(current), GCCause::to_string(cause));
+ assert(_gc_generation == nullptr, "Over-writing _gc_generation");
set_gc_cause(cause);
+ set_gc_generation(generation);
generation->heuristics()->record_cycle_start();
}
void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
assert(gc_cause() != GCCause::_no_gc, "cause wasn't set");
+ assert(_gc_generation != nullptr, "_gc_generation wasn't set");
generation->heuristics()->record_cycle_end();
if (mode()->is_generational() && generation->is_global()) {
@@ -1688,13 +1694,14 @@ void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
old_generation()->heuristics()->record_cycle_end();
}
+ set_gc_generation(nullptr);
set_gc_cause(GCCause::_no_gc);
}
void ShenandoahHeap::verify(VerifyOption vo) {
if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
if (ShenandoahVerify) {
- verifier()->verify_generic(active_generation(), vo);
+ verifier()->verify_generic(vo);
} else {
// TODO: Consider allocating verification bitmaps on demand,
// and turn this on unconditionally.
@@ -2054,13 +2061,14 @@ void ShenandoahHeap::do_class_unloading() {
}
}
-void ShenandoahHeap::stw_weak_refs(ShenandoahGeneration* generation, bool full_gc) {
+void ShenandoahHeap::stw_weak_refs(bool full_gc) {
// Weak refs processing
ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
: ShenandoahPhaseTimings::degen_gc_weakrefs;
ShenandoahTimingsTracker t(phase);
ShenandoahGCWorkerPhase worker_phase(phase);
- generation->ref_processor()->process_references(phase, workers(), false /* concurrent */);
+ shenandoah_assert_generations_reconciled();
+ gc_generation()->ref_processor()->process_references(phase, workers(), false /* concurrent */);
}
void ShenandoahHeap::prepare_update_heap_references() {
@@ -2301,13 +2309,13 @@ void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
}
}
-void ShenandoahHeap::parallel_cleaning(ShenandoahGeneration* generation, bool full_gc) {
+void ShenandoahHeap::parallel_cleaning(bool full_gc) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
ShenandoahGCPhase phase(full_gc ?
ShenandoahPhaseTimings::full_gc_purge :
ShenandoahPhaseTimings::degen_gc_purge);
- stw_weak_refs(generation, full_gc);
+ stw_weak_refs(full_gc);
stw_process_weak_roots(full_gc);
stw_unload_classes(full_gc);
}
@@ -2415,8 +2423,11 @@ void ShenandoahHeap::sync_pinned_region_status() {
void ShenandoahHeap::assert_pinned_region_status() {
for (size_t i = 0; i < num_regions(); i++) {
ShenandoahHeapRegion* r = get_region(i);
- assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
- "Region %zu pinning status is inconsistent", i);
+ shenandoah_assert_generations_reconciled();
+ if (gc_generation()->contains(r)) {
+ assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
+ "Region %zu pinning status is inconsistent", i);
+ }
}
}
#endif
@@ -2519,8 +2530,7 @@ class ShenandoahUpdateHeapRefsTask : public WorkerTask {
}
};
-void ShenandoahHeap::update_heap_references(ShenandoahGeneration* generation, bool concurrent) {
- assert(generation->is_global(), "Should only get global generation here");
+void ShenandoahHeap::update_heap_references(bool concurrent) {
assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
if (concurrent) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index d6bc17b844bfa..8bcb04e5766c8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -145,10 +145,17 @@ class ShenandoahHeap : public CollectedHeap {
private:
ShenandoahHeapLock _lock;
+ // Indicates the generation whose collection is in
+ // progress. Mutator threads aren't allowed to read
+ // this field.
+ ShenandoahGeneration* _gc_generation;
+
// This is set and cleared by only the VMThread
- // at each STW pause (safepoint) to the value given to the VM operation.
- // This allows the value to be always consistently
+ // at each STW pause (safepoint) to the value seen in
+ // _gc_generation. This allows the value to be always consistently
// seen by all mutators as well as all GC worker threads.
+ // In that sense, it's a stable snapshot of _gc_generation that is
+ // updated at each STW pause associated with a ShenandoahVMOp.
ShenandoahGeneration* _active_generation;
protected:
@@ -160,13 +167,25 @@ class ShenandoahHeap : public CollectedHeap {
return &_lock;
}
+ ShenandoahGeneration* gc_generation() const {
+ // We don't want this field read by a mutator thread
+ assert(!Thread::current()->is_Java_thread(), "Not allowed");
+ // value of _gc_generation field, see above
+ return _gc_generation;
+ }
+
ShenandoahGeneration* active_generation() const {
// value of _active_generation field, see above
return _active_generation;
}
- // Update the _active_generation field: can only be called at a safepoint by the VMThread.
- void set_active_generation(ShenandoahGeneration* generation);
+ // Set the _gc_generation field
+ void set_gc_generation(ShenandoahGeneration* generation);
+
+ // Copy the value in the _gc_generation field into
+ // the _active_generation field: can only be called at
+ // a safepoint by the VMThread.
+ void set_active_generation();
ShenandoahHeuristics* heuristics();
@@ -463,7 +482,7 @@ class ShenandoahHeap : public CollectedHeap {
// GC support
// Evacuation
- virtual void evacuate_collection_set(ShenandoahGeneration* generation, bool concurrent);
+ virtual void evacuate_collection_set(bool concurrent);
// Concurrent root processing
void prepare_concurrent_roots();
void finish_concurrent_roots();
@@ -478,7 +497,7 @@ class ShenandoahHeap : public CollectedHeap {
// Turn off weak roots flag, purge old satb buffers in generational mode
void concurrent_final_roots(HandshakeClosure* handshake_closure = nullptr);
- virtual void update_heap_references(ShenandoahGeneration* generation, bool concurrent);
+ virtual void update_heap_references(bool concurrent);
// Final update region states
void update_heap_region_states(bool concurrent);
virtual void final_update_refs_update_region_states();
@@ -586,12 +605,12 @@ class ShenandoahHeap : public CollectedHeap {
bool unload_classes() const;
// Perform STW class unloading and weak root cleaning
- void parallel_cleaning(ShenandoahGeneration* generation, bool full_gc);
+ void parallel_cleaning(bool full_gc);
private:
void stw_unload_classes(bool full_gc);
void stw_process_weak_roots(bool full_gc);
- void stw_weak_refs(ShenandoahGeneration* generation, bool full_gc);
+ void stw_weak_refs(bool full_gc);
inline void assert_lock_for_affiliation(ShenandoahAffiliation orig_affiliation,
ShenandoahAffiliation new_affiliation);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index ca0f7460d542e..df45a59433ec7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -315,9 +315,9 @@ void ShenandoahHeapRegion::make_trash_immediate() {
// On this path, we know there are no marked objects in the region,
// tell marking context about it to bypass bitmap resets.
- const ShenandoahHeap* heap = ShenandoahHeap::heap();
- assert(heap->generation_for(affiliation())->is_mark_complete(), "Marking should be complete here.");
- heap->marking_context()->reset_top_bitmap(this);
+ assert(ShenandoahHeap::heap()->gc_generation()->is_mark_complete(), "Marking should be complete here.");
+ shenandoah_assert_generations_reconciled();
+ ShenandoahHeap::heap()->marking_context()->reset_top_bitmap(this);
}
void ShenandoahHeapRegion::make_empty() {
@@ -461,9 +461,9 @@ bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
ShenandoahMarkingContext* marking_context = heap->marking_context();
- // Expect marking to be completed for the old generation before we fill in unmarked objects
- assert(heap->old_generation()->is_mark_complete(), "sanity");
- assert(is_old(), "Only need to coalesce and fill old regions");
+ // Expect marking to be completed before these threads invoke this service.
+ assert(heap->gc_generation()->is_mark_complete(), "sanity");
+ shenandoah_assert_generations_reconciled();
// All objects above TAMS are considered live even though their mark bits will not be set. Note that young-
// gen evacuations that interrupt a long-running old-gen concurrent mark may promote objects into old-gen
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
index a3c28e2c6d370..2a4149ee44dc4 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.cpp
@@ -55,10 +55,10 @@ ShenandoahMark::ShenandoahMark(ShenandoahGeneration* generation) :
}
template
-void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, StringDedup::Requests* const req, bool update_refs) {
+void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs) {
ShenandoahObjToScanQueue* q = get_queue(w);
ShenandoahObjToScanQueue* old_q = get_old_queue(w);
- ShenandoahReferenceProcessor *rp = _generation->ref_processor();
+
ShenandoahHeap* const heap = ShenandoahHeap::heap();
ShenandoahLiveData* ld = heap->get_liveness_cache(w);
@@ -78,22 +78,22 @@ void ShenandoahMark::mark_loop_prework(uint w, TaskTerminator *t, StringDedup::R
}
template
-void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator,
- ShenandoahGenerationType generation_type, StringDedup::Requests* const req) {
+void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
+ ShenandoahGenerationType generation, StringDedup::Requests* const req) {
bool update_refs = ShenandoahHeap::heap()->has_forwarded_objects();
- switch (generation_type) {
+ switch (generation) {
case YOUNG:
- mark_loop_prework(worker_id, terminator, req, update_refs);
+ mark_loop_prework(worker_id, terminator, rp, req, update_refs);
break;
case OLD:
// Old generation collection only performs marking, it should not update references.
- mark_loop_prework(worker_id, terminator, req, false);
+ mark_loop_prework(worker_id, terminator, rp, req, false);
break;
case GLOBAL:
- mark_loop_prework(worker_id, terminator, req, update_refs);
+ mark_loop_prework(worker_id, terminator, rp, req, update_refs);
break;
case NON_GEN:
- mark_loop_prework(worker_id, terminator, req, update_refs);
+ mark_loop_prework(worker_id, terminator, rp, req, update_refs);
break;
default:
ShouldNotReachHere();
@@ -101,30 +101,30 @@ void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator,
}
}
-void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahGenerationType generation_type,
- bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
+void ShenandoahMark::mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
+ ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req) {
if (cancellable) {
switch(dedup_mode) {
case NO_DEDUP:
- mark_loop(worker_id, terminator, generation_type, req);
+ mark_loop(worker_id, terminator, rp, generation, req);
break;
case ENQUEUE_DEDUP:
- mark_loop(worker_id, terminator, generation_type, req);
+ mark_loop(worker_id, terminator, rp, generation, req);
break;
case ALWAYS_DEDUP:
- mark_loop(worker_id, terminator, generation_type, req);
+ mark_loop(worker_id, terminator, rp, generation, req);
break;
}
} else {
switch(dedup_mode) {
case NO_DEDUP:
- mark_loop(worker_id, terminator, generation_type, req);
+ mark_loop(worker_id, terminator, rp, generation, req);
break;
case ENQUEUE_DEDUP:
- mark_loop(worker_id, terminator, generation_type, req);
+ mark_loop(worker_id, terminator, rp, generation, req);
break;
case ALWAYS_DEDUP:
- mark_loop(worker_id, terminator, generation_type, req);
+ mark_loop(worker_id, terminator, rp, generation, req);
break;
}
}
@@ -139,8 +139,12 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w
ShenandoahObjToScanQueue* q;
ShenandoahMarkTask t;
- assert(_generation->type() == GENERATION, "Sanity: %d != %d", _generation->type(), GENERATION);
- _generation->ref_processor()->set_mark_closure(worker_id, cl);
+ // Do not use active_generation() : we must use the gc_generation() set by
+ // ShenandoahGCScope on the ControllerThread's stack; no safepoint may
+ // intervene to update active_generation, so we can't
+ // shenandoah_assert_generations_reconciled() here.
+ assert(heap->gc_generation()->type() == GENERATION, "Sanity: %d != %d", heap->gc_generation()->type(), GENERATION);
+ heap->gc_generation()->ref_processor()->set_mark_closure(worker_id, cl);
/*
* Process outstanding queues, if any.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
index 2fbb106f4d7c8..4aef14f2c9aba 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.hpp
@@ -41,6 +41,7 @@ enum StringDedupMode {
};
class ShenandoahMarkingContext;
+class ShenandoahReferenceProcessor;
// Base class for mark
// Mark class does not maintain states. Instead, mark states are
@@ -71,7 +72,7 @@ class ShenandoahMark: public StackObj {
inline ShenandoahObjToScanQueue* get_queue(uint index) const;
inline ShenandoahObjToScanQueue* get_old_queue(uint index) const;
- ShenandoahGeneration* generation() const { return _generation; };
+ inline ShenandoahGeneration* generation() { return _generation; };
private:
// ---------- Marking loop and tasks
@@ -92,7 +93,7 @@ class ShenandoahMark: public StackObj {
void mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint worker_id, TaskTerminator *t, StringDedup::Requests* const req);
template
- void mark_loop_prework(uint worker_id, TaskTerminator *terminator, StringDedup::Requests* const req, bool update_refs);
+ void mark_loop_prework(uint worker_id, TaskTerminator *terminator, ShenandoahReferenceProcessor *rp, StringDedup::Requests* const req, bool update_refs);
template
static bool in_generation(ShenandoahHeap* const heap, oop obj);
@@ -108,11 +109,11 @@ class ShenandoahMark: public StackObj {
inline void dedup_string(oop obj, StringDedup::Requests* const req);
protected:
template
- void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahGenerationType generation_type,
- StringDedup::Requests* const req);
+ void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
+ ShenandoahGenerationType generation, StringDedup::Requests* const req);
- void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahGenerationType generation_type,
- bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req);
+ void mark_loop(uint worker_id, TaskTerminator* terminator, ShenandoahReferenceProcessor *rp,
+ ShenandoahGenerationType generation, bool cancellable, StringDedupMode dedup_mode, StringDedup::Requests* const req);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHMARK_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp
index 40eee8c342bad..0babeaffd3e0e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.cpp
@@ -74,8 +74,8 @@ void ShenandoahMarkingContext::initialize_top_at_mark_start(ShenandoahHeapRegion
_top_at_mark_starts_base[idx] = bottom;
_top_bitmaps[idx] = bottom;
- log_debug(gc, mark)("SMC:initialize_top_at_mark_start for Region %zu, TAMS: " PTR_FORMAT ", TopOfBitMap: " PTR_FORMAT,
- r->index(), p2i(bottom), p2i(r->end()));
+ log_debug(gc)("SMC:initialize_top_at_mark_start for Region %zu, TAMS: " PTR_FORMAT ", TopOfBitMap: " PTR_FORMAT,
+ r->index(), p2i(bottom), p2i(r->end()));
}
HeapWord* ShenandoahMarkingContext::top_bitmap(ShenandoahHeapRegion* r) {
@@ -86,8 +86,8 @@ void ShenandoahMarkingContext::clear_bitmap(ShenandoahHeapRegion* r) {
HeapWord* bottom = r->bottom();
HeapWord* top_bitmap = _top_bitmaps[r->index()];
- log_debug(gc, mark)("SMC:clear_bitmap for %s Region %zu, top_bitmap: " PTR_FORMAT,
- r->affiliation_name(), r->index(), p2i(top_bitmap));
+ log_debug(gc)("SMC:clear_bitmap for %s Region %zu, top_bitmap: " PTR_FORMAT,
+ r->affiliation_name(), r->index(), p2i(top_bitmap));
if (top_bitmap > bottom) {
_mark_bit_map.clear_range_large(MemRegion(bottom, top_bitmap));
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp
index bff4afc9ce9d0..e3ba774283c18 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.inline.hpp
@@ -104,8 +104,8 @@ inline void ShenandoahMarkingContext::capture_top_at_mark_start(ShenandoahHeapRe
"Region %zu, bitmap should be clear while adjusting TAMS: " PTR_FORMAT " -> " PTR_FORMAT,
idx, p2i(old_tams), p2i(new_tams));
- log_debug(gc, mark)("Capturing TAMS for %s Region %zu, was: " PTR_FORMAT ", now: " PTR_FORMAT,
- r->affiliation_name(), idx, p2i(old_tams), p2i(new_tams));
+ log_debug(gc)("Capturing TAMS for %s Region %zu, was: " PTR_FORMAT ", now: " PTR_FORMAT,
+ r->affiliation_name(), idx, p2i(old_tams), p2i(new_tams));
_top_at_mark_starts_base[idx] = new_tams;
_top_bitmaps[idx] = new_tams;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
index 3e9f3a490df9f..1724fc2849f76 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
@@ -49,7 +49,7 @@ void ShenandoahOldGC::op_final_mark() {
assert(!heap->has_forwarded_objects(), "No forwarded objects on this path");
if (ShenandoahVerify) {
- heap->verifier()->verify_roots_no_forwarded(_old_generation);
+ heap->verifier()->verify_roots_no_forwarded();
}
if (!heap->cancelled_gc()) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
index f37329d1c4499..4ca6f2fdf4900 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
@@ -329,31 +329,25 @@ bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type
return true;
}
- shenandoah_assert_mark_complete(raw_referent);
ShenandoahHeap* heap = ShenandoahHeap::heap();
- // Check if the referent is still alive, in which case we should drop the reference.
+ // Check if the referent is still alive, in which case we should
+ // drop the reference.
if (type == REF_PHANTOM) {
- return heap->marking_context()->is_marked(raw_referent);
+ return heap->active_generation()->complete_marking_context()->is_marked(raw_referent);
} else {
- return heap->marking_context()->is_marked_strong(raw_referent);
+ return heap->active_generation()->complete_marking_context()->is_marked_strong(raw_referent);
}
}
template
void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType type) const {
if (type == REF_FINAL) {
-#ifdef ASSERT
- auto referent = reference_referent_raw(reference);
- auto heap = ShenandoahHeap::heap();
- shenandoah_assert_mark_complete(referent);
- assert(reference_next(reference) == nullptr, "Already inactive");
- assert(heap->marking_context()->is_marked(referent), "only make inactive final refs with alive referents");
-#endif
-
// Don't clear referent. It is needed by the Finalizer thread to make the call
// to finalize(). A FinalReference is instead made inactive by self-looping the
// next field. An application can't call FinalReference.enqueue(), so there is
// no race to worry about when setting the next field.
+ assert(reference_next(reference) == nullptr, "Already inactive");
+ assert(ShenandoahHeap::heap()->active_generation()->complete_marking_context()->is_marked(reference_referent_raw(reference)), "only make inactive final refs with alive referents");
reference_set_next(reference, reference);
} else {
// Clear referent
@@ -443,12 +437,8 @@ oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
HeapWord* raw_referent = reference_referent_raw(reference);
#ifdef ASSERT
- if (raw_referent != nullptr) {
- ShenandoahHeap* heap = ShenandoahHeap::heap();
- ShenandoahHeapRegion* region = heap->heap_region_containing(raw_referent);
- ShenandoahMarkingContext* ctx = heap->generation_for(region->affiliation())->complete_marking_context();
- assert(ctx->is_marked(raw_referent), "only drop references with alive referents");
- }
+ assert(raw_referent == nullptr || ShenandoahHeap::heap()->active_generation()->complete_marking_context()->is_marked(raw_referent),
+ "only drop references with alive referents");
#endif
// Unlink and return next in list
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp
index 964b6f0a10ae2..774c4f7d9413e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp
@@ -58,7 +58,6 @@ void ShenandoahRegulatorThread::run_service() {
void ShenandoahRegulatorThread::regulate_young_and_old_cycles() {
while (!should_terminate()) {
- SuspendibleThreadSetJoiner joiner;
ShenandoahGenerationalControlThread::GCMode mode = _control_thread->gc_mode();
if (mode == ShenandoahGenerationalControlThread::none) {
if (should_start_metaspace_gc()) {
@@ -96,7 +95,6 @@ void ShenandoahRegulatorThread::regulate_young_and_old_cycles() {
void ShenandoahRegulatorThread::regulate_young_and_global_cycles() {
while (!should_terminate()) {
- SuspendibleThreadSetJoiner joiner;
if (_control_thread->gc_mode() == ShenandoahGenerationalControlThread::none) {
if (start_global_cycle()) {
log_debug(gc)("Heuristics request for global collection accepted.");
@@ -124,7 +122,6 @@ void ShenandoahRegulatorThread::regulator_sleep() {
_last_sleep_adjust_time = current;
}
- SuspendibleThreadSetLeaver leaver;
os::naked_short_sleep(_sleep);
if (LogTarget(Debug, gc, thread)::is_enabled()) {
double elapsed = os::elapsedTime() - current;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
index 9e6b19607089a..a56113868be76 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp
@@ -58,9 +58,9 @@ void ShenandoahJavaThreadsIterator::threads_do(ThreadClosure* cl, uint worker_id
}
ShenandoahThreadRoots::ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase, bool is_par) :
- _phase(phase),
- _is_par(is_par),
- _threads_claim_token_scope() {}
+ _phase(phase), _is_par(is_par) {
+ Threads::change_thread_claim_token();
+}
void ShenandoahThreadRoots::oops_do(OopClosure* oops_cl, NMethodClosure* code_cl, uint worker_id) {
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::ThreadRoots, worker_id);
@@ -74,6 +74,10 @@ void ShenandoahThreadRoots::threads_do(ThreadClosure* tc, uint worker_id) {
Threads::possibly_parallel_threads_do(_is_par, tc);
}
+ShenandoahThreadRoots::~ShenandoahThreadRoots() {
+ Threads::assert_all_threads_claimed();
+}
+
ShenandoahCodeCacheRoots::ShenandoahCodeCacheRoots(ShenandoahPhaseTimings::Phase phase) : _phase(phase) {
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
index 29d8c9fac2d15..40d4077256d04 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp
@@ -33,7 +33,6 @@
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "memory/iterator.hpp"
-#include "runtime/threads.hpp"
template
class ShenandoahVMWeakRoots {
@@ -88,10 +87,10 @@ class ShenandoahJavaThreadsIterator {
class ShenandoahThreadRoots {
private:
ShenandoahPhaseTimings::Phase _phase;
- const bool _is_par;
- ThreadsClaimTokenScope _threads_claim_token_scope;
+ const bool _is_par;
public:
ShenandoahThreadRoots(ShenandoahPhaseTimings::Phase phase, bool is_par);
+ ~ShenandoahThreadRoots();
void oops_do(OopClosure* oops_cl, NMethodClosure* code_cl, uint worker_id);
void threads_do(ThreadClosure* tc, uint worker_id);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
index 23edc780e4744..11ff92cd9ccf3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
@@ -61,7 +61,7 @@ ShenandoahGCStateResetter::~ShenandoahGCStateResetter() {
assert(_heap->gc_state() == _saved_gc_state, "Should be restored");
}
-void ShenandoahRootVerifier::roots_do(OopIterateClosure* oops, ShenandoahGeneration* generation) {
+void ShenandoahRootVerifier::roots_do(OopIterateClosure* oops) {
ShenandoahGCStateResetter resetter;
shenandoah_assert_safepoint();
@@ -75,9 +75,9 @@ void ShenandoahRootVerifier::roots_do(OopIterateClosure* oops, ShenandoahGenerat
OopStorageSet::storage(id)->oops_do(oops);
}
- if (generation->is_young()) {
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
+ if (heap->mode()->is_generational() && heap->active_generation()->is_young()) {
shenandoah_assert_safepoint();
- shenandoah_assert_generational();
ShenandoahGenerationalHeap::heap()->old_generation()->card_scan()->roots_do(oops);
}
@@ -87,7 +87,7 @@ void ShenandoahRootVerifier::roots_do(OopIterateClosure* oops, ShenandoahGenerat
Threads::possibly_parallel_oops_do(true, oops, nullptr);
}
-void ShenandoahRootVerifier::strong_roots_do(OopIterateClosure* oops, ShenandoahGeneration* generation) {
+void ShenandoahRootVerifier::strong_roots_do(OopIterateClosure* oops) {
ShenandoahGCStateResetter resetter;
shenandoah_assert_safepoint();
@@ -98,8 +98,8 @@ void ShenandoahRootVerifier::strong_roots_do(OopIterateClosure* oops, Shenandoah
OopStorageSet::storage(id)->oops_do(oops);
}
- if (generation->is_young()) {
- shenandoah_assert_generational();
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
+ if (heap->mode()->is_generational() && heap->active_generation()->is_young()) {
ShenandoahGenerationalHeap::heap()->old_generation()->card_scan()->roots_do(oops);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp
index 1f3cb40046577..405c69c4160e6 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.hpp
@@ -43,10 +43,8 @@ class ShenandoahGCStateResetter : public StackObj {
class ShenandoahRootVerifier : public AllStatic {
public:
// Used to seed ShenandoahVerifier, do not honor root type filter
- // The generation parameter here may be young or global. If it is young,
- // then the roots will include the remembered set.
- static void roots_do(OopIterateClosure* cl, ShenandoahGeneration* generation);
- static void strong_roots_do(OopIterateClosure* cl, ShenandoahGeneration* generation);
+ static void roots_do(OopIterateClosure* cl);
+ static void strong_roots_do(OopIterateClosure* cl);
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHROOTVERIFIER_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
index 117984a6d41c5..53391a3e224f3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSTWMark.cpp
@@ -77,13 +77,15 @@ void ShenandoahSTWMark::mark() {
ShenandoahCodeRoots::arm_nmethods_for_mark();
// Weak reference processing
+ assert(ShenandoahHeap::heap()->gc_generation() == _generation, "Marking unexpected generation");
ShenandoahReferenceProcessor* rp = _generation->ref_processor();
+ shenandoah_assert_generations_reconciled();
rp->reset_thread_locals();
// Init mark, do not expect forwarded pointers in roots
if (ShenandoahVerify) {
assert(Thread::current()->is_VM_thread(), "Must be");
- heap->verifier()->verify_roots_no_forwarded(_generation);
+ heap->verifier()->verify_roots_no_forwarded();
}
start_mark();
@@ -117,6 +119,7 @@ void ShenandoahSTWMark::mark() {
}
void ShenandoahSTWMark::mark_roots(uint worker_id) {
+ assert(ShenandoahHeap::heap()->gc_generation() == _generation, "Marking unexpected generation");
ShenandoahReferenceProcessor* rp = _generation->ref_processor();
auto queue = task_queues()->queue(worker_id);
switch (_generation->type()) {
@@ -145,10 +148,14 @@ void ShenandoahSTWMark::mark_roots(uint worker_id) {
}
void ShenandoahSTWMark::finish_mark(uint worker_id) {
+ assert(ShenandoahHeap::heap()->gc_generation() == _generation, "Marking unexpected generation");
ShenandoahPhaseTimings::Phase phase = _full_gc ? ShenandoahPhaseTimings::full_gc_mark : ShenandoahPhaseTimings::degen_gc_stw_mark;
ShenandoahWorkerTimingsTracker timer(phase, ShenandoahPhaseTimings::ParallelMark, worker_id);
+ ShenandoahReferenceProcessor* rp = _generation->ref_processor();
+ shenandoah_assert_generations_reconciled();
StringDedup::Requests requests;
- mark_loop(worker_id, &_terminator, _generation->type(), false /* not cancellable */,
+ mark_loop(worker_id, &_terminator, rp,
+ _generation->type(), false /* not cancellable */,
ShenandoahStringDedup::is_enabled() ? ALWAYS_DEDUP : NO_DEDUP, &requests);
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
index 4a0215f15f199..23c705348c409 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
@@ -683,9 +683,9 @@ void ShenandoahScanRememberedTask::do_work(uint worker_id) {
struct ShenandoahRegionChunk assignment;
while (_work_list->next(&assignment)) {
ShenandoahHeapRegion* region = assignment._r;
- log_debug(gc, remset)("ShenandoahScanRememberedTask::do_work(%u), processing slice of region "
- "%zu at offset %zu, size: %zu",
- worker_id, region->index(), assignment._chunk_offset, assignment._chunk_size);
+ log_debug(gc)("ShenandoahScanRememberedTask::do_work(%u), processing slice of region "
+ "%zu at offset %zu, size: %zu",
+ worker_id, region->index(), assignment._chunk_offset, assignment._chunk_size);
if (region->is_old()) {
size_t cluster_size =
CardTable::card_size_in_words() * ShenandoahCardCluster::CardsPerCluster;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
index 919cc4f6fd796..ce7cda984121a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
@@ -343,9 +343,9 @@ ShenandoahScanRemembered::process_region_slice(ShenandoahHeapRegion *region, siz
}
}
- log_debug(gc, remset)("Remembered set scan processing Region %zu, from " PTR_FORMAT " to " PTR_FORMAT ", using %s table",
- region->index(), p2i(start_of_range), p2i(end_of_range),
- use_write_table? "read/write (updating)": "read (marking)");
+ log_debug(gc)("Remembered set scan processing Region %zu, from " PTR_FORMAT " to " PTR_FORMAT ", using %s table",
+ region->index(), p2i(start_of_range), p2i(end_of_range),
+ use_write_table? "read/write (updating)": "read (marking)");
// Note that end_of_range may point to the middle of a cluster because we limit scanning to
// region->top() or region->get_update_watermark(). We avoid processing past end_of_range.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
index 6b45842f78174..0137492f06f81 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.cpp
@@ -50,14 +50,16 @@ void VM_ShenandoahOperation::doit_epilogue() {
void VM_ShenandoahOperation::log_active_generation(const char* prefix) {
ShenandoahGeneration* agen = ShenandoahHeap::heap()->active_generation();
- log_debug(gc, heap)("%s: active_generation is %s", prefix,
- agen == nullptr ? "nullptr" : shenandoah_generation_name(agen->type()));
+ ShenandoahGeneration* ggen = ShenandoahHeap::heap()->gc_generation();
+ log_debug(gc, heap)("%s: active_generation is %s, gc_generation is %s", prefix,
+ agen == nullptr ? "nullptr" : shenandoah_generation_name(agen->type()),
+ ggen == nullptr ? "nullptr" : shenandoah_generation_name(ggen->type()));
}
void VM_ShenandoahOperation::set_active_generation() {
if (evaluate_at_safepoint()) {
assert(SafepointSynchronize::is_at_safepoint(), "Error??");
- ShenandoahHeap::heap()->set_active_generation(_generation);
+ ShenandoahHeap::heap()->set_active_generation();
}
}
@@ -75,70 +77,42 @@ void VM_ShenandoahReferenceOperation::doit_epilogue() {
Heap_lock->unlock();
}
-VM_ShenandoahInitMark::VM_ShenandoahInitMark(ShenandoahConcurrentGC* gc)
- : VM_ShenandoahOperation(gc->generation()), _gc(gc) {
-}
-
void VM_ShenandoahInitMark::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Init Mark", SvcGCMarker::CONCURRENT);
set_active_generation();
_gc->entry_init_mark();
}
-VM_ShenandoahFinalMarkStartEvac::VM_ShenandoahFinalMarkStartEvac(ShenandoahConcurrentGC* gc)
- : VM_ShenandoahOperation(gc->generation()), _gc(gc) {
-}
-
void VM_ShenandoahFinalMarkStartEvac::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Final Mark", SvcGCMarker::CONCURRENT);
set_active_generation();
_gc->entry_final_mark();
}
-VM_ShenandoahFullGC::VM_ShenandoahFullGC(GCCause::Cause gc_cause, ShenandoahFullGC* full_gc)
- : VM_ShenandoahReferenceOperation(full_gc->generation()), _gc_cause(gc_cause), _full_gc(full_gc) {
-}
-
void VM_ShenandoahFullGC::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Full GC", SvcGCMarker::FULL);
set_active_generation();
_full_gc->entry_full(_gc_cause);
}
-VM_ShenandoahDegeneratedGC::VM_ShenandoahDegeneratedGC(ShenandoahDegenGC* gc)
- : VM_ShenandoahReferenceOperation(gc->generation()), _gc(gc) {
-}
-
void VM_ShenandoahDegeneratedGC::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Degenerated GC", SvcGCMarker::CONCURRENT);
set_active_generation();
_gc->entry_degenerated();
}
-VM_ShenandoahInitUpdateRefs::VM_ShenandoahInitUpdateRefs(ShenandoahConcurrentGC* gc)
- : VM_ShenandoahOperation(gc->generation()), _gc(gc) {
-}
-
void VM_ShenandoahInitUpdateRefs::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Init Update Refs", SvcGCMarker::CONCURRENT);
set_active_generation();
_gc->entry_init_update_refs();
}
-VM_ShenandoahFinalUpdateRefs::VM_ShenandoahFinalUpdateRefs(ShenandoahConcurrentGC* gc)
- : VM_ShenandoahOperation(gc->generation()), _gc(gc) {
-}
-
void VM_ShenandoahFinalUpdateRefs::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Final Update Refs", SvcGCMarker::CONCURRENT);
set_active_generation();
_gc->entry_final_update_refs();
}
-VM_ShenandoahFinalRoots::VM_ShenandoahFinalRoots(ShenandoahConcurrentGC* gc)
- : VM_ShenandoahOperation(gc->generation()), _gc(gc) {
-}
-
void VM_ShenandoahFinalRoots::doit() {
ShenandoahGCPauseMark mark(_gc_id, "Final Roots", SvcGCMarker::CONCURRENT);
set_active_generation();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
index d565a3df22c96..291fadd18874e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVMOperations.hpp
@@ -46,15 +46,10 @@ class ShenandoahFullGC;
class VM_ShenandoahOperation : public VM_Operation {
protected:
uint _gc_id;
- ShenandoahGeneration* _generation;
void set_active_generation();
public:
- explicit VM_ShenandoahOperation(ShenandoahGeneration* generation)
- : _gc_id(GCId::current())
- , _generation(generation) {
- }
-
+ VM_ShenandoahOperation() : _gc_id(GCId::current()) {};
bool skip_thread_oop_barriers() const override { return true; }
void log_active_generation(const char* prefix);
@@ -66,74 +61,93 @@ class VM_ShenandoahOperation : public VM_Operation {
class VM_ShenandoahReferenceOperation : public VM_ShenandoahOperation {
public:
- explicit VM_ShenandoahReferenceOperation(ShenandoahGeneration* generation)
- : VM_ShenandoahOperation(generation) {};
+ VM_ShenandoahReferenceOperation() : VM_ShenandoahOperation() {};
bool doit_prologue() override;
void doit_epilogue() override;
};
class VM_ShenandoahInitMark: public VM_ShenandoahOperation {
+private:
ShenandoahConcurrentGC* const _gc;
public:
- explicit VM_ShenandoahInitMark(ShenandoahConcurrentGC* gc);
- VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahInitMark; }
- const char* name() const override { return "Shenandoah Init Marking"; }
- void doit() override;
+ VM_ShenandoahInitMark(ShenandoahConcurrentGC* gc) :
+ VM_ShenandoahOperation(),
+ _gc(gc) {};
+ VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitMark; }
+ const char* name() const { return "Shenandoah Init Marking"; }
+ virtual void doit();
};
class VM_ShenandoahFinalMarkStartEvac: public VM_ShenandoahOperation {
+private:
ShenandoahConcurrentGC* const _gc;
public:
- explicit VM_ShenandoahFinalMarkStartEvac(ShenandoahConcurrentGC* gc);
- VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahFinalMarkStartEvac; }
- const char* name() const override { return "Shenandoah Final Mark and Start Evacuation"; }
- void doit() override;
+ VM_ShenandoahFinalMarkStartEvac(ShenandoahConcurrentGC* gc) :
+ VM_ShenandoahOperation(),
+ _gc(gc) {};
+ VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalMarkStartEvac; }
+ const char* name() const { return "Shenandoah Final Mark and Start Evacuation"; }
+ virtual void doit();
};
class VM_ShenandoahDegeneratedGC: public VM_ShenandoahReferenceOperation {
+private:
ShenandoahDegenGC* const _gc;
public:
- explicit VM_ShenandoahDegeneratedGC(ShenandoahDegenGC* gc);
- VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahDegeneratedGC; }
- const char* name() const override { return "Shenandoah Degenerated GC"; }
- void doit() override;
+ VM_ShenandoahDegeneratedGC(ShenandoahDegenGC* gc) :
+ VM_ShenandoahReferenceOperation(),
+ _gc(gc) {};
+
+ VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahDegeneratedGC; }
+ const char* name() const { return "Shenandoah Degenerated GC"; }
+ virtual void doit();
};
class VM_ShenandoahFullGC : public VM_ShenandoahReferenceOperation {
+private:
GCCause::Cause _gc_cause;
ShenandoahFullGC* const _full_gc;
public:
- explicit VM_ShenandoahFullGC(GCCause::Cause gc_cause, ShenandoahFullGC* full_gc);
- VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahFullGC; }
- const char* name() const override { return "Shenandoah Full GC"; }
- void doit() override;
+ VM_ShenandoahFullGC(GCCause::Cause gc_cause, ShenandoahFullGC* full_gc) :
+ VM_ShenandoahReferenceOperation(),
+ _gc_cause(gc_cause),
+ _full_gc(full_gc) {};
+ VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFullGC; }
+ const char* name() const { return "Shenandoah Full GC"; }
+ virtual void doit();
};
class VM_ShenandoahInitUpdateRefs: public VM_ShenandoahOperation {
ShenandoahConcurrentGC* const _gc;
public:
- explicit VM_ShenandoahInitUpdateRefs(ShenandoahConcurrentGC* gc);
- VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahInitUpdateRefs; }
- const char* name() const override { return "Shenandoah Init Update References"; }
- void doit() override;
+ VM_ShenandoahInitUpdateRefs(ShenandoahConcurrentGC* gc) :
+ VM_ShenandoahOperation(),
+ _gc(gc) {};
+ VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahInitUpdateRefs; }
+ const char* name() const { return "Shenandoah Init Update References"; }
+ virtual void doit();
};
class VM_ShenandoahFinalUpdateRefs: public VM_ShenandoahOperation {
ShenandoahConcurrentGC* const _gc;
public:
- explicit VM_ShenandoahFinalUpdateRefs(ShenandoahConcurrentGC* gc);
- VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahFinalUpdateRefs; }
- const char* name() const override { return "Shenandoah Final Update References"; }
- void doit() override;
+ VM_ShenandoahFinalUpdateRefs(ShenandoahConcurrentGC* gc) :
+ VM_ShenandoahOperation(),
+ _gc(gc) {};
+ VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalUpdateRefs; }
+ const char* name() const { return "Shenandoah Final Update References"; }
+ virtual void doit();
};
class VM_ShenandoahFinalRoots: public VM_ShenandoahOperation {
ShenandoahConcurrentGC* const _gc;
public:
- explicit VM_ShenandoahFinalRoots(ShenandoahConcurrentGC* gc);
- VM_Operation::VMOp_Type type() const override { return VMOp_ShenandoahFinalRoots; }
- const char* name() const override { return "Shenandoah Final Roots"; }
- void doit() override;
+ VM_ShenandoahFinalRoots(ShenandoahConcurrentGC* gc) :
+ VM_ShenandoahOperation(),
+ _gc(gc) {};
+ VM_Operation::VMOp_Type type() const { return VMOp_ShenandoahFinalRoots; }
+ const char* name() const { return "Shenandoah Final Roots"; }
+ virtual void doit();
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHVMOPERATIONS_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index fb5fbbd00a152..c84a2a656771b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -70,8 +70,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
ShenandoahGeneration* _generation;
public:
- ShenandoahVerifyOopClosure(ShenandoahGeneration* generation, ShenandoahVerifierStack* stack,
- MarkBitMap* map, ShenandoahLivenessData* ld,
+ ShenandoahVerifyOopClosure(ShenandoahVerifierStack* stack, MarkBitMap* map, ShenandoahLivenessData* ld,
const char* phase, ShenandoahVerifier::VerifyOptions options) :
_phase(phase),
_options(options),
@@ -81,7 +80,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
_ld(ld),
_interior_loc(nullptr),
_loc(nullptr),
- _generation(generation) {
+ _generation(nullptr) {
if (options._verify_marked == ShenandoahVerifier::_verify_marked_complete_except_references ||
options._verify_marked == ShenandoahVerifier::_verify_marked_complete_satb_empty ||
options._verify_marked == ShenandoahVerifier::_verify_marked_disable) {
@@ -93,6 +92,12 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
// Otherwise do all fields.
_ref_mode = DO_FIELDS;
}
+
+ if (_heap->mode()->is_generational()) {
+ _generation = _heap->gc_generation();
+ assert(_generation != nullptr, "Expected active generation in this mode");
+ shenandoah_assert_generations_reconciled();
+ }
}
ReferenceIterationMode reference_iteration_mode() override {
@@ -126,7 +131,11 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
}
}
- bool in_generation(oop obj) const {
+ bool in_generation(oop obj) {
+ if (_generation == nullptr) {
+ return true;
+ }
+
ShenandoahHeapRegion* region = _heap->heap_region_containing(obj);
return _generation->contains(region);
}
@@ -188,8 +197,9 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
// fallthrough for fast failure for un-live regions:
case ShenandoahVerifier::_verify_liveness_conservative:
check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live() ||
- (obj_reg->is_old() && _generation->is_young()),
+ (obj_reg->is_old() && _heap->gc_generation()->is_young()),
"Object must belong to region with live data");
+ shenandoah_assert_generations_reconciled();
break;
default:
assert(false, "Unhandled liveness verification");
@@ -266,12 +276,12 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
"Must be marked in incomplete bitmap");
break;
case ShenandoahVerifier::_verify_marked_complete:
- check(ShenandoahAsserts::_safe_all, obj, _generation->complete_marking_context()->is_marked(obj),
+ check(ShenandoahAsserts::_safe_all, obj, _heap->gc_generation()->complete_marking_context()->is_marked(obj),
"Must be marked in complete bitmap");
break;
case ShenandoahVerifier::_verify_marked_complete_except_references:
case ShenandoahVerifier::_verify_marked_complete_satb_empty:
- check(ShenandoahAsserts::_safe_all, obj, _generation->complete_marking_context()->is_marked(obj),
+ check(ShenandoahAsserts::_safe_all, obj, _heap->gc_generation()->complete_marking_context()->is_marked(obj),
"Must be marked in complete bitmap, except j.l.r.Reference referents");
break;
default:
@@ -561,11 +571,9 @@ class ShenandoahVerifierReachableTask : public WorkerTask {
ShenandoahLivenessData* _ld;
MarkBitMap* _bitmap;
volatile size_t _processed;
- ShenandoahGeneration* _generation;
public:
- ShenandoahVerifierReachableTask(ShenandoahGeneration* generation,
- MarkBitMap* bitmap,
+ ShenandoahVerifierReachableTask(MarkBitMap* bitmap,
ShenandoahLivenessData* ld,
const char* label,
ShenandoahVerifier::VerifyOptions options) :
@@ -575,8 +583,7 @@ class ShenandoahVerifierReachableTask : public WorkerTask {
_heap(ShenandoahHeap::heap()),
_ld(ld),
_bitmap(bitmap),
- _processed(0),
- _generation(generation) {};
+ _processed(0) {};
size_t processed() const {
return _processed;
@@ -592,20 +599,20 @@ class ShenandoahVerifierReachableTask : public WorkerTask {
// extended parallelism would buy us out.
if (((ShenandoahVerifyLevel == 2) && (worker_id == 0))
|| (ShenandoahVerifyLevel >= 3)) {
- ShenandoahVerifyOopClosure cl(_generation, &stack, _bitmap, _ld,
+ ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld,
ShenandoahMessageBuffer("%s, Roots", _label),
_options);
if (_heap->unload_classes()) {
- ShenandoahRootVerifier::strong_roots_do(&cl, _generation);
+ ShenandoahRootVerifier::strong_roots_do(&cl);
} else {
- ShenandoahRootVerifier::roots_do(&cl, _generation);
+ ShenandoahRootVerifier::roots_do(&cl);
}
}
size_t processed = 0;
if (ShenandoahVerifyLevel >= 3) {
- ShenandoahVerifyOopClosure cl(_generation, &stack, _bitmap, _ld,
+ ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld,
ShenandoahMessageBuffer("%s, Reachable", _label),
_options);
while (!stack.is_empty()) {
@@ -641,8 +648,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask {
ShenandoahGeneration* _generation;
public:
- ShenandoahVerifierMarkedRegionTask(ShenandoahGeneration* generation,
- MarkBitMap* bitmap,
+ ShenandoahVerifierMarkedRegionTask(MarkBitMap* bitmap,
ShenandoahLivenessData* ld,
const char* label,
ShenandoahVerifier::VerifyOptions options) :
@@ -654,7 +660,13 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask {
_ld(ld),
_claimed(0),
_processed(0),
- _generation(generation) {}
+ _generation(nullptr) {
+ if (_heap->mode()->is_generational()) {
+ _generation = _heap->gc_generation();
+ assert(_generation != nullptr, "Expected active generation in this mode.");
+ shenandoah_assert_generations_reconciled();
+ }
+ };
size_t processed() {
return AtomicAccess::load(&_processed);
@@ -667,7 +679,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask {
}
ShenandoahVerifierStack stack;
- ShenandoahVerifyOopClosure cl(_generation, &stack, _bitmap, _ld,
+ ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld,
ShenandoahMessageBuffer("%s, Marked", _label),
_options);
@@ -690,14 +702,14 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask {
}
}
- bool in_generation(ShenandoahHeapRegion* r) const {
- return _generation->contains(r);
+ bool in_generation(ShenandoahHeapRegion* r) {
+ return _generation == nullptr || _generation->contains(r);
}
virtual void work_humongous(ShenandoahHeapRegion *r, ShenandoahVerifierStack& stack, ShenandoahVerifyOopClosure& cl) {
size_t processed = 0;
HeapWord* obj = r->bottom();
- if (_generation->complete_marking_context()->is_marked(cast_to_oop(obj))) {
+ if (_heap->gc_generation()->complete_marking_context()->is_marked(cast_to_oop(obj))) {
verify_and_follow(obj, stack, cl, &processed);
}
AtomicAccess::add(&_processed, processed, memory_order_relaxed);
@@ -705,7 +717,7 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask {
virtual void work_regular(ShenandoahHeapRegion *r, ShenandoahVerifierStack &stack, ShenandoahVerifyOopClosure &cl) {
size_t processed = 0;
- ShenandoahMarkingContext* ctx = _generation->complete_marking_context();
+ ShenandoahMarkingContext* ctx = _heap->gc_generation()->complete_marking_context();
HeapWord* tams = ctx->top_at_mark_start(r);
// Bitmaps, before TAMS
@@ -782,8 +794,7 @@ class VerifyThreadGCState : public ThreadClosure {
}
};
-void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
- const char* label,
+void ShenandoahVerifier::verify_at_safepoint(const char* label,
VerifyRememberedSet remembered,
VerifyForwarded forwarded,
VerifyMarked marked,
@@ -885,7 +896,16 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
log_debug(gc)("Safepoint verification finished heap usage verification");
+ ShenandoahGeneration* generation;
if (_heap->mode()->is_generational()) {
+ generation = _heap->gc_generation();
+ guarantee(generation != nullptr, "Need to know which generation to verify.");
+ shenandoah_assert_generations_reconciled();
+ } else {
+ generation = nullptr;
+ }
+
+ if (generation != nullptr) {
ShenandoahHeapLocker lock(_heap->lock());
switch (remembered) {
@@ -932,7 +952,11 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
// Internal heap region checks
if (ShenandoahVerifyLevel >= 1) {
ShenandoahVerifyHeapRegionClosure cl(label, regions);
- generation->heap_region_iterate(&cl);
+ if (generation != nullptr) {
+ generation->heap_region_iterate(&cl);
+ } else {
+ _heap->heap_region_iterate(&cl);
+ }
}
log_debug(gc)("Safepoint verification finished heap region closure verification");
@@ -956,7 +980,7 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
// This verifies what application can see, since it only cares about reachable objects.
size_t count_reachable = 0;
if (ShenandoahVerifyLevel >= 2) {
- ShenandoahVerifierReachableTask task(generation, _verification_bit_map, ld, label, options);
+ ShenandoahVerifierReachableTask task(_verification_bit_map, ld, label, options);
_heap->workers()->run_task(&task);
count_reachable = task.processed();
}
@@ -975,8 +999,8 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
(marked == _verify_marked_complete ||
marked == _verify_marked_complete_except_references ||
marked == _verify_marked_complete_satb_empty)) {
- guarantee(generation->is_mark_complete(), "Marking context should be complete");
- ShenandoahVerifierMarkedRegionTask task(generation, _verification_bit_map, ld, label, options);
+ guarantee(_heap->gc_generation()->is_mark_complete(), "Marking context should be complete");
+ ShenandoahVerifierMarkedRegionTask task(_verification_bit_map, ld, label, options);
_heap->workers()->run_task(&task);
count_marked = task.processed();
} else {
@@ -991,7 +1015,7 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
if (ShenandoahVerifyLevel >= 4 && marked == _verify_marked_complete && liveness == _verify_liveness_complete) {
for (size_t i = 0; i < _heap->num_regions(); i++) {
ShenandoahHeapRegion* r = _heap->get_region(i);
- if (!generation->contains(r)) {
+ if (generation != nullptr && !generation->contains(r)) {
continue;
}
@@ -1018,15 +1042,16 @@ void ShenandoahVerifier::verify_at_safepoint(ShenandoahGeneration* generation,
}
log_debug(gc)("Safepoint verification finished accumulation of liveness data");
+
+
log_info(gc)("Verify %s, Level %zd (%zu reachable, %zu marked)",
label, ShenandoahVerifyLevel, count_reachable, count_marked);
FREE_C_HEAP_ARRAY(ShenandoahLivenessData, ld);
}
-void ShenandoahVerifier::verify_generic(ShenandoahGeneration* generation, VerifyOption vo) {
+void ShenandoahVerifier::verify_generic(VerifyOption vo) {
verify_at_safepoint(
- generation,
"Generic Verification",
_verify_remembered_disable, // do not verify remembered set
_verify_forwarded_allow, // conservatively allow forwarded
@@ -1039,7 +1064,7 @@ void ShenandoahVerifier::verify_generic(ShenandoahGeneration* generation, Verify
);
}
-void ShenandoahVerifier::verify_before_concmark(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_before_concmark() {
VerifyRememberedSet verify_remembered_set = _verify_remembered_before_marking;
if (_heap->mode()->is_generational() &&
!_heap->old_generation()->is_mark_complete()) {
@@ -1047,7 +1072,6 @@ void ShenandoahVerifier::verify_before_concmark(ShenandoahGeneration* generation
verify_remembered_set = _verify_remembered_disable;
}
verify_at_safepoint(
- generation,
"Before Mark",
verify_remembered_set,
// verify read-only remembered set from bottom() to top()
@@ -1061,9 +1085,8 @@ void ShenandoahVerifier::verify_before_concmark(ShenandoahGeneration* generation
);
}
-void ShenandoahVerifier::verify_after_concmark(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_after_concmark() {
verify_at_safepoint(
- generation,
"After Mark",
_verify_remembered_disable, // do not verify remembered set
_verify_forwarded_none, // no forwarded references
@@ -1076,9 +1099,8 @@ void ShenandoahVerifier::verify_after_concmark(ShenandoahGeneration* generation)
);
}
-void ShenandoahVerifier::verify_after_concmark_with_promotions(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_after_concmark_with_promotions() {
verify_at_safepoint(
- generation,
"After Mark",
_verify_remembered_disable, // do not verify remembered set
_verify_forwarded_none, // no forwarded references
@@ -1092,9 +1114,8 @@ void ShenandoahVerifier::verify_after_concmark_with_promotions(ShenandoahGenerat
);
}
-void ShenandoahVerifier::verify_before_evacuation(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_before_evacuation() {
verify_at_safepoint(
- generation,
"Before Evacuation",
_verify_remembered_disable, // do not verify remembered set
_verify_forwarded_none, // no forwarded references
@@ -1108,14 +1129,13 @@ void ShenandoahVerifier::verify_before_evacuation(ShenandoahGeneration* generati
);
}
-void ShenandoahVerifier::verify_before_update_refs(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_before_update_refs() {
VerifyRememberedSet verify_remembered_set = _verify_remembered_before_updating_references;
if (_heap->mode()->is_generational() &&
!_heap->old_generation()->is_mark_complete()) {
verify_remembered_set = _verify_remembered_disable;
}
verify_at_safepoint(
- generation,
"Before Updating References",
verify_remembered_set, // verify read-write remembered set
_verify_forwarded_allow, // forwarded references allowed
@@ -1129,9 +1149,8 @@ void ShenandoahVerifier::verify_before_update_refs(ShenandoahGeneration* generat
}
// We have not yet cleanup (reclaimed) the collection set
-void ShenandoahVerifier::verify_after_update_refs(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_after_update_refs() {
verify_at_safepoint(
- generation,
"After Updating References",
_verify_remembered_disable, // do not verify remembered set
_verify_forwarded_none, // no forwarded references
@@ -1144,9 +1163,8 @@ void ShenandoahVerifier::verify_after_update_refs(ShenandoahGeneration* generati
);
}
-void ShenandoahVerifier::verify_after_degenerated(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_after_degenerated() {
verify_at_safepoint(
- generation,
"After Degenerated GC",
_verify_remembered_disable, // do not verify remembered set
_verify_forwarded_none, // all objects are non-forwarded
@@ -1159,9 +1177,8 @@ void ShenandoahVerifier::verify_after_degenerated(ShenandoahGeneration* generati
);
}
-void ShenandoahVerifier::verify_before_fullgc(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_before_fullgc() {
verify_at_safepoint(
- generation,
"Before Full GC",
_verify_remembered_disable, // do not verify remembered set
_verify_forwarded_allow, // can have forwarded objects
@@ -1174,9 +1191,8 @@ void ShenandoahVerifier::verify_before_fullgc(ShenandoahGeneration* generation)
);
}
-void ShenandoahVerifier::verify_after_fullgc(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_after_fullgc() {
verify_at_safepoint(
- generation,
"After Full GC",
_verify_remembered_after_full_gc, // verify read-write remembered set
_verify_forwarded_none, // all objects are non-forwarded
@@ -1241,14 +1257,14 @@ class ShenandoahVerifyInToSpaceClosure : public BasicOopIterateClosure {
void do_oop(oop* p) override { do_oop_work(p); }
};
-void ShenandoahVerifier::verify_roots_in_to_space(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_roots_in_to_space() {
ShenandoahVerifyInToSpaceClosure cl;
- ShenandoahRootVerifier::roots_do(&cl, generation);
+ ShenandoahRootVerifier::roots_do(&cl);
}
-void ShenandoahVerifier::verify_roots_no_forwarded(ShenandoahGeneration* generation) {
+void ShenandoahVerifier::verify_roots_no_forwarded() {
ShenandoahVerifyNoForwarded cl;
- ShenandoahRootVerifier::roots_do(&cl, generation);
+ ShenandoahRootVerifier::roots_do(&cl);
}
template
@@ -1284,6 +1300,7 @@ class ShenandoahVerifyRemSetClosure : public BasicOopIterateClosure {
template
void ShenandoahVerifier::help_verify_region_rem_set(Scanner* scanner, ShenandoahHeapRegion* r,
HeapWord* registration_watermark, const char* message) {
+ shenandoah_assert_generations_reconciled();
ShenandoahOldGeneration* old_gen = _heap->old_generation();
assert(old_gen->is_mark_complete() || old_gen->is_parsable(), "Sanity");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
index e49990fdc620d..aba6379e0223c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
@@ -196,8 +196,7 @@ class ShenandoahVerifier : public CHeapObj {
};
private:
- void verify_at_safepoint(ShenandoahGeneration* generation,
- const char* label,
+ void verify_at_safepoint(const char* label,
VerifyRememberedSet remembered,
VerifyForwarded forwarded,
VerifyMarked marked,
@@ -211,20 +210,20 @@ class ShenandoahVerifier : public CHeapObj {
ShenandoahVerifier(ShenandoahHeap* heap, MarkBitMap* verification_bitmap) :
_heap(heap), _verification_bit_map(verification_bitmap) {};
- void verify_before_concmark(ShenandoahGeneration* generation);
- void verify_after_concmark(ShenandoahGeneration* generation);
- void verify_after_concmark_with_promotions(ShenandoahGeneration* generation);
- void verify_before_evacuation(ShenandoahGeneration* generation);
- void verify_before_update_refs(ShenandoahGeneration* generation);
- void verify_after_update_refs(ShenandoahGeneration* generation);
- void verify_before_fullgc(ShenandoahGeneration* generation);
- void verify_after_fullgc(ShenandoahGeneration* generation);
- void verify_after_degenerated(ShenandoahGeneration* generation);
- void verify_generic(ShenandoahGeneration* generation, VerifyOption option);
+ void verify_before_concmark();
+ void verify_after_concmark();
+ void verify_after_concmark_with_promotions();
+ void verify_before_evacuation();
+ void verify_before_update_refs();
+ void verify_after_update_refs();
+ void verify_before_fullgc();
+ void verify_after_fullgc();
+ void verify_after_degenerated();
+ void verify_generic(VerifyOption option);
// Roots should only contain to-space oops
- void verify_roots_in_to_space(ShenandoahGeneration* generation);
- void verify_roots_no_forwarded(ShenandoahGeneration* generation);
+ void verify_roots_in_to_space();
+ void verify_roots_no_forwarded();
// Check that generation usages are accurate before rebuilding free set
void verify_before_rebuilding_free_set();
diff --git a/src/hotspot/share/gc/z/zNUMA.hpp b/src/hotspot/share/gc/z/zNUMA.hpp
index 838a114c210ef..de74086b10ab0 100644
--- a/src/hotspot/share/gc/z/zNUMA.hpp
+++ b/src/hotspot/share/gc/z/zNUMA.hpp
@@ -53,8 +53,6 @@ class ZNUMA : public AllStatic {
static size_t calculate_share(uint32_t numa_id, size_t total, size_t granule = ZGranuleSize, uint32_t ignore_count = 0);
static const char* to_string();
-
- static int numa_id_to_node(uint32_t numa_id);
};
#endif // SHARE_GC_Z_ZNUMA_HPP
diff --git a/src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp b/src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp
index 2e7a97028ff31..1a38efb89fd5b 100644
--- a/src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp
+++ b/src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp
@@ -108,7 +108,7 @@ void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max
// Test if uncommit is supported by the operating system by committing
// and then uncommitting a granule.
const ZVirtualMemory vmem(zoffset(0), ZGranuleSize);
- if (!commit(vmem, 0) || !uncommit(vmem)) {
+ if (!commit(vmem, (uint32_t)-1) || !uncommit(vmem)) {
log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
FLAG_SET_ERGO(ZUncommit, false);
return;
@@ -293,7 +293,7 @@ void ZPhysicalMemoryManager::map(const ZVirtualMemory& vmem, uint32_t numa_id) c
// Setup NUMA preferred for large pages
if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
- os::numa_make_local((char*)addr, size, ZNUMA::numa_id_to_node(numa_id));
+ os::numa_make_local((char*)addr, size, (int)numa_id);
}
}
diff --git a/src/hotspot/share/interpreter/abstractInterpreter.cpp b/src/hotspot/share/interpreter/abstractInterpreter.cpp
index b6a2255b4686c..640e3ab3fff93 100644
--- a/src/hotspot/share/interpreter/abstractInterpreter.cpp
+++ b/src/hotspot/share/interpreter/abstractInterpreter.cpp
@@ -258,8 +258,7 @@ bool AbstractInterpreter::is_not_reached(const methodHandle& method, int bci) {
case Bytecodes::_invokedynamic: {
assert(invoke_bc.has_index_u4(code), "sanity");
int method_index = invoke_bc.get_index_u4(code);
- bool is_resolved = cpool->resolved_indy_entry_at(method_index)->is_resolved();
- return !is_resolved;
+ return cpool->resolved_indy_entry_at(method_index)->is_resolved();
}
case Bytecodes::_invokevirtual: // fall-through
case Bytecodes::_invokeinterface: // fall-through
diff --git a/src/hotspot/share/interpreter/bytecodeStream.hpp b/src/hotspot/share/interpreter/bytecodeStream.hpp
index 412951691c536..89d97053b45e2 100644
--- a/src/hotspot/share/interpreter/bytecodeStream.hpp
+++ b/src/hotspot/share/interpreter/bytecodeStream.hpp
@@ -100,23 +100,8 @@ class BaseBytecodeStream: StackObj {
void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
// Bytecode-specific attributes
- int get_offset_s2() const { return bytecode().get_offset_s2(raw_code()); }
- int get_offset_s4() const { return bytecode().get_offset_s4(raw_code()); }
-
- // These methods are not safe to use before or during verification as they may
- // have large offsets and cause overflows
- int dest() const {
- int min_offset = -1 * max_method_code_size;
- int offset = bytecode().get_offset_s2(raw_code());
- guarantee(offset >= min_offset && offset <= max_method_code_size, "must be");
- return bci() + offset;
- }
- int dest_w() const {
- int min_offset = -1 * max_method_code_size;
- int offset = bytecode().get_offset_s4(raw_code());
- guarantee(offset >= min_offset && offset <= max_method_code_size, "must be");
- return bci() + offset;
- }
+ int dest() const { return bci() + bytecode().get_offset_s2(raw_code()); }
+ int dest_w() const { return bci() + bytecode().get_offset_s4(raw_code()); }
// One-byte indices.
u1 get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp b/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp
index 031dfb7e8ad51..7507b9c994efb 100644
--- a/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp
+++ b/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.cpp
@@ -82,7 +82,14 @@ JfrCPUTimeTraceQueue::~JfrCPUTimeTraceQueue() {
bool JfrCPUTimeTraceQueue::enqueue(JfrCPUTimeSampleRequest& request) {
assert(JavaThread::current()->jfr_thread_local()->is_cpu_time_jfr_enqueue_locked(), "invariant");
assert(&JavaThread::current()->jfr_thread_local()->cpu_time_jfr_queue() == this, "invariant");
- _data[_head++] = request;
+ u4 elementIndex;
+ do {
+ elementIndex = AtomicAccess::load_acquire(&_head);
+ if (elementIndex >= _capacity) {
+ return false;
+ }
+ } while (AtomicAccess::cmpxchg(&_head, elementIndex, elementIndex + 1) != elementIndex);
+ _data[elementIndex] = request;
return true;
}
@@ -94,19 +101,19 @@ JfrCPUTimeSampleRequest& JfrCPUTimeTraceQueue::at(u4 index) {
static volatile u4 _lost_samples_sum = 0;
u4 JfrCPUTimeTraceQueue::size() const {
- return _head;
+ return AtomicAccess::load_acquire(&_head);
}
void JfrCPUTimeTraceQueue::set_size(u4 size) {
- _head = size;
+ AtomicAccess::release_store(&_head, size);
}
u4 JfrCPUTimeTraceQueue::capacity() const {
- return _capacity;
+ return AtomicAccess::load_acquire(&_capacity);
}
void JfrCPUTimeTraceQueue::set_capacity(u4 capacity) {
- if (capacity == _capacity) {
+ if (capacity == AtomicAccess::load(&_capacity)) {
return;
}
_head = 0;
@@ -119,15 +126,15 @@ void JfrCPUTimeTraceQueue::set_capacity(u4 capacity) {
} else {
_data = nullptr;
}
- _capacity = capacity;
+ AtomicAccess::release_store(&_capacity, capacity);
}
bool JfrCPUTimeTraceQueue::is_empty() const {
- return _head == 0;
+ return AtomicAccess::load_acquire(&_head) == 0;
}
u4 JfrCPUTimeTraceQueue::lost_samples() const {
- return _lost_samples;
+ return AtomicAccess::load(&_lost_samples);
}
void JfrCPUTimeTraceQueue::increment_lost_samples() {
@@ -136,7 +143,7 @@ void JfrCPUTimeTraceQueue::increment_lost_samples() {
}
void JfrCPUTimeTraceQueue::increment_lost_samples_due_to_queue_full() {
- _lost_samples_due_to_queue_full++;
+ AtomicAccess::inc(&_lost_samples_due_to_queue_full);
}
u4 JfrCPUTimeTraceQueue::get_and_reset_lost_samples() {
@@ -144,9 +151,7 @@ u4 JfrCPUTimeTraceQueue::get_and_reset_lost_samples() {
}
u4 JfrCPUTimeTraceQueue::get_and_reset_lost_samples_due_to_queue_full() {
- u4 lost = _lost_samples_due_to_queue_full;
- _lost_samples_due_to_queue_full = 0;
- return lost;
+ return AtomicAccess::xchg(&_lost_samples_due_to_queue_full, (u4)0);
}
void JfrCPUTimeTraceQueue::init() {
@@ -154,7 +159,7 @@ void JfrCPUTimeTraceQueue::init() {
}
void JfrCPUTimeTraceQueue::clear() {
- _head = 0;
+ AtomicAccess::release_store(&_head, (u4)0);
}
void JfrCPUTimeTraceQueue::resize_if_needed() {
@@ -162,8 +167,9 @@ void JfrCPUTimeTraceQueue::resize_if_needed() {
if (lost_samples_due_to_queue_full == 0) {
return;
}
- if (_capacity < CPU_TIME_QUEUE_MAX_CAPACITY) {
- float ratio = (float)lost_samples_due_to_queue_full / (float)_capacity;
+ u4 capacity = AtomicAccess::load(&_capacity);
+ if (capacity < CPU_TIME_QUEUE_MAX_CAPACITY) {
+ float ratio = (float)lost_samples_due_to_queue_full / (float)capacity;
int factor = 1;
if (ratio > 8) { // idea is to quickly scale the queue in the worst case
factor = ratio;
@@ -175,7 +181,7 @@ void JfrCPUTimeTraceQueue::resize_if_needed() {
factor = 2;
}
if (factor > 1) {
- u4 new_capacity = MIN2(CPU_TIME_QUEUE_MAX_CAPACITY, _capacity * factor);
+ u4 new_capacity = MIN2(CPU_TIME_QUEUE_MAX_CAPACITY, capacity * factor);
set_capacity(new_capacity);
}
}
diff --git a/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp b/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp
index 48fe28d22f079..e7c915fc8bed8 100644
--- a/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp
+++ b/src/hotspot/share/jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp
@@ -43,24 +43,19 @@ struct JfrCPUTimeSampleRequest {
// Fixed size async-signal-safe SPSC linear queue backed by an array.
// Designed to be only used under lock and read linearly
-// The lock in question is the tri-state CPU time JFR lock in JfrThreadLocal
-// This allows us to skip most of the atomic accesses and memory barriers,
-// holding a lock acts as a memory barrier
-// Only the _lost_samples property is atomic, as it can be accessed even after
-// acquiring the lock failed.
-// Important to note is that the queue is also only accessed under lock in signal
-// handlers.
class JfrCPUTimeTraceQueue {
+ // the default queue capacity, scaled if the sampling period is smaller than 10ms
+ // when the thread is started
+ static const u4 CPU_TIME_QUEUE_CAPACITY = 500;
+
JfrCPUTimeSampleRequest* _data;
- u4 _capacity;
+ volatile u4 _capacity;
// next unfilled index
- u4 _head;
+ volatile u4 _head;
- // the only property accessible without a lock
volatile u4 _lost_samples;
-
- u4 _lost_samples_due_to_queue_full;
+ volatile u4 _lost_samples_due_to_queue_full;
static const u4 CPU_TIME_QUEUE_INITIAL_CAPACITY = 20;
static const u4 CPU_TIME_QUEUE_MAX_CAPACITY = 2000;
@@ -87,7 +82,6 @@ class JfrCPUTimeTraceQueue {
u4 lost_samples() const;
- // the only method callable without holding a lock
void increment_lost_samples();
void increment_lost_samples_due_to_queue_full();
diff --git a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp
index 309ae961808d5..5163bc7f6a5cb 100644
--- a/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp
+++ b/src/hotspot/share/jfr/recorder/repository/jfrEmergencyDump.cpp
@@ -458,7 +458,6 @@ const char* JfrEmergencyDump::chunk_path(const char* repository_path) {
*/
static void release_locks(Thread* thread) {
assert(thread != nullptr, "invariant");
- assert(!thread->is_Java_thread() || JavaThread::cast(thread)->thread_state() == _thread_in_vm, "invariant");
#ifdef ASSERT
Mutex* owned_lock = thread->owned_locks();
@@ -520,14 +519,13 @@ static void release_locks(Thread* thread) {
class JavaThreadInVMAndNative : public StackObj {
private:
- JavaThread* _jt;
+ JavaThread* const _jt;
JavaThreadState _original_state;
public:
- JavaThreadInVMAndNative(Thread* t) : _jt(nullptr),
+ JavaThreadInVMAndNative(Thread* t) : _jt(t->is_Java_thread() ? JavaThread::cast(t) : nullptr),
_original_state(_thread_max_state) {
- if (t != nullptr && t->is_Java_thread()) {
- _jt = JavaThread::cast(t);
+ if (_jt != nullptr) {
_original_state = _jt->thread_state();
if (_original_state != _thread_in_vm) {
_jt->set_thread_state(_thread_in_vm);
@@ -537,7 +535,6 @@ class JavaThreadInVMAndNative : public StackObj {
~JavaThreadInVMAndNative() {
if (_original_state != _thread_max_state) {
- assert(_jt != nullptr, "invariant");
_jt->set_thread_state(_original_state);
}
}
@@ -577,13 +574,11 @@ static bool guard_reentrancy() {
Thread* const thread = Thread::current_or_null_safe();
const traceid tid = thread != nullptr ? JFR_JVM_THREAD_ID(thread) : max_julong;
if (AtomicAccess::cmpxchg(&_jfr_shutdown_tid, shutdown_tid, tid) != shutdown_tid) {
- JavaThreadInVMAndNative jtivm(thread);
if (thread != nullptr) {
+ JavaThreadInVMAndNative jtivm(thread);
release_locks(thread);
}
log_info(jfr, system)("A jfr emergency dump is already in progress, waiting for thread id " UINT64_FORMAT_X, AtomicAccess::load(&_jfr_shutdown_tid));
- // Transition to a safe safepoint state for the infinite sleep. A nop for non-java threads.
- jtivm.transition_to_native();
os::infinite_sleep(); // stay here until we exit normally or crash.
ShouldNotReachHere();
}
diff --git a/src/hotspot/share/logging/logTag.hpp b/src/hotspot/share/logging/logTag.hpp
index 3ad6a197d07d3..6d0bd117ad91d 100644
--- a/src/hotspot/share/logging/logTag.hpp
+++ b/src/hotspot/share/logging/logTag.hpp
@@ -41,7 +41,6 @@ class outputStream;
LOG_TAG(aot) \
LOG_TAG(arguments) \
LOG_TAG(array) \
- LOG_TAG(asan) \
LOG_TAG(attach) \
LOG_TAG(barrier) \
LOG_TAG(blocks) \
diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp
index b9968083e0e85..db0bb8add21dc 100644
--- a/src/hotspot/share/memory/arena.cpp
+++ b/src/hotspot/share/memory/arena.cpp
@@ -39,26 +39,19 @@
// It is used very early in the vm initialization, in allocation
// code and other areas. For many calls, the current thread has not
// been created so we cannot use Mutex.
-static DeferredStatic GlobalChunkPoolMutex;
+static PlatformMutex* GlobalChunkPoolMutex = nullptr;
void Arena::initialize_chunk_pool() {
- GlobalChunkPoolMutex.initialize();
+ GlobalChunkPoolMutex = new PlatformMutex();
}
-ChunkPoolLocker::ChunkPoolLocker(LockStrategy ls) {
- if (ls == LockStrategy::Lock) {
- GlobalChunkPoolMutex->lock();
- _locked = true;
- } else {
- assert(ls == LockStrategy::Try, "must be");
- _locked = GlobalChunkPoolMutex->try_lock();
- }
+ChunkPoolLocker::ChunkPoolLocker() {
+ assert(GlobalChunkPoolMutex != nullptr, "must be initialized");
+ GlobalChunkPoolMutex->lock();
};
ChunkPoolLocker::~ChunkPoolLocker() {
- if (_locked) {
- GlobalChunkPoolMutex->unlock();
- }
+ GlobalChunkPoolMutex->unlock();
};
// Pre-defined default chunk sizes must be arena-aligned, see Chunk::operator new()
diff --git a/src/hotspot/share/memory/arena.hpp b/src/hotspot/share/memory/arena.hpp
index b4a0546babf83..e2169ee406e78 100644
--- a/src/hotspot/share/memory/arena.hpp
+++ b/src/hotspot/share/memory/arena.hpp
@@ -38,11 +38,8 @@
#define ARENA_ALIGN(x) (align_up((x), ARENA_AMALLOC_ALIGNMENT))
class ChunkPoolLocker : public StackObj {
- bool _locked;
public:
- enum class LockStrategy { Lock, Try };
-
- ChunkPoolLocker(LockStrategy ls = LockStrategy::Lock);
+ ChunkPoolLocker();
~ChunkPoolLocker();
};
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index 756619bff3386..424e43c5e83ce 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -955,7 +955,7 @@ void Universe::initialize_tlab() {
}
}
-ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment, size_t desired_page_size) {
+ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
assert(alignment <= Arguments::conservative_max_heap_alignment(),
"actual alignment %zu must be within maximum heap alignment %zu",
@@ -966,21 +966,15 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment, siz
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
- size_t page_size;
- if (desired_page_size == 0) {
- if (UseLargePages) {
- page_size = os::large_page_size();
- } else {
- page_size = os::vm_page_size();
- }
+ size_t page_size = os::vm_page_size();
+ if (UseLargePages && is_aligned(alignment, os::large_page_size())) {
+ page_size = os::large_page_size();
} else {
// Parallel is the only collector that might opt out of using large pages
// for the heap.
- assert(UseParallelGC , "only Parallel");
- // Use caller provided value.
- page_size = desired_page_size;
+ assert(!UseLargePages || UseParallelGC , "Wrong alignment to use large pages");
}
- assert(is_aligned(heap_size, page_size), "inv");
+
// Now create the space.
ReservedHeapSpace rhs = HeapReserver::reserve(total_reserved, alignment, page_size, AllocateHeapAt);
diff --git a/src/hotspot/share/memory/universe.hpp b/src/hotspot/share/memory/universe.hpp
index 37ca965062e80..3b1f2523ed845 100644
--- a/src/hotspot/share/memory/universe.hpp
+++ b/src/hotspot/share/memory/universe.hpp
@@ -315,7 +315,7 @@ class Universe: AllStatic {
DEBUG_ONLY(static bool is_in_heap_or_null(const void* p) { return p == nullptr || is_in_heap(p); })
// Reserve Java heap and determine CompressedOops mode
- static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment, size_t desired_page_size = 0);
+ static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment);
// Global OopStorages
static OopStorage* vm_weak();
diff --git a/src/hotspot/share/nmt/mallocHeader.cpp b/src/hotspot/share/nmt/mallocHeader.cpp
index d88b5c790fbaa..2b59a2b66480f 100644
--- a/src/hotspot/share/nmt/mallocHeader.cpp
+++ b/src/hotspot/share/nmt/mallocHeader.cpp
@@ -26,7 +26,6 @@
#include "nmt/mallocHeader.inline.hpp"
#include "nmt/mallocSiteTable.hpp"
#include "nmt/memTag.hpp"
-#include "nmt/memTracker.hpp"
#include "runtime/os.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
@@ -37,7 +36,7 @@
// fitting into eight bits.
STATIC_ASSERT(sizeof(MemTag) == sizeof(uint8_t));
-void MallocHeader::print_block_on_error(outputStream* st, address bad_address, address block_address) const {
+void MallocHeader::print_block_on_error(outputStream* st, address bad_address) const {
assert(bad_address >= (address)this, "sanity");
// This function prints block information, including hex dump, in case of a detected
@@ -49,18 +48,6 @@ void MallocHeader::print_block_on_error(outputStream* st, address bad_address, a
st->print_cr("NMT Block at " PTR_FORMAT ", corruption at: " PTR_FORMAT ": ",
p2i(this), p2i(bad_address));
- if (MemTracker::tracking_level() == NMT_TrackingLevel::NMT_detail) {
- MallocHeader* mh = (MallocHeader*)block_address;
- NativeCallStack stack;
- if (MallocSiteTable::access_stack(stack, *mh)) {
- st->print_cr("allocated from:");
- stack.print_on(st);
- } else {
- st->print_cr("allocation-site cannot be shown since the marker is also corrupted.");
- }
- st->print_cr("");
- }
-
static const size_t min_dump_length = 256;
address from1 = align_down((address)this, sizeof(void*)) - (min_dump_length / 2);
address to1 = from1 + min_dump_length;
diff --git a/src/hotspot/share/nmt/mallocHeader.hpp b/src/hotspot/share/nmt/mallocHeader.hpp
index acfc74012682f..8472b5f8ce888 100644
--- a/src/hotspot/share/nmt/mallocHeader.hpp
+++ b/src/hotspot/share/nmt/mallocHeader.hpp
@@ -106,7 +106,7 @@ class MallocHeader {
// We discount sizes larger than these
static const size_t max_reasonable_malloc_size = LP64_ONLY(256 * G) NOT_LP64(3500 * M);
- void print_block_on_error(outputStream* st, address bad_address, address block_address) const;
+ void print_block_on_error(outputStream* st, address bad_address) const;
static uint16_t build_footer(uint8_t b1, uint8_t b2) { return (uint16_t)(((uint16_t)b1 << 8) | (uint16_t)b2); }
diff --git a/src/hotspot/share/nmt/mallocHeader.inline.hpp b/src/hotspot/share/nmt/mallocHeader.inline.hpp
index 7bc8a25028c51..8b1862332fc37 100644
--- a/src/hotspot/share/nmt/mallocHeader.inline.hpp
+++ b/src/hotspot/share/nmt/mallocHeader.inline.hpp
@@ -103,7 +103,7 @@ inline OutTypeParam MallocHeader::resolve_checked_impl(InTypeParam memblock) {
}
OutTypeParam header_pointer = (OutTypeParam)memblock - 1;
if (!header_pointer->check_block_integrity(msg, sizeof(msg), &corruption)) {
- header_pointer->print_block_on_error(tty, corruption != nullptr ? corruption : (address)header_pointer, (address)header_pointer);
+ header_pointer->print_block_on_error(tty, corruption != nullptr ? corruption : (address)header_pointer);
fatal("NMT has detected a memory corruption bug. Block at " PTR_FORMAT ": %s", p2i(memblock), msg);
}
return header_pointer;
diff --git a/src/hotspot/share/nmt/mallocSiteTable.cpp b/src/hotspot/share/nmt/mallocSiteTable.cpp
index 0150a25cae3a4..c9ddffce5ecb7 100644
--- a/src/hotspot/share/nmt/mallocSiteTable.cpp
+++ b/src/hotspot/share/nmt/mallocSiteTable.cpp
@@ -163,17 +163,13 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t*
// Access malloc site
MallocSite* MallocSiteTable::malloc_site(uint32_t marker) {
uint16_t bucket_idx = bucket_idx_from_marker(marker);
- if (bucket_idx >= table_size) {
- return nullptr;
- }
+ assert(bucket_idx < table_size, "Invalid bucket index");
const uint16_t pos_idx = pos_idx_from_marker(marker);
MallocSiteHashtableEntry* head = _table[bucket_idx];
for (size_t index = 0;
index < pos_idx && head != nullptr;
index++, head = (MallocSiteHashtableEntry*)head->next()) {}
- if (head == nullptr) {
- return nullptr;
- }
+ assert(head != nullptr, "Invalid position index");
return head->data();
}
diff --git a/src/hotspot/share/nmt/mallocTracker.cpp b/src/hotspot/share/nmt/mallocTracker.cpp
index a61a27db25d2f..75089dffc3014 100644
--- a/src/hotspot/share/nmt/mallocTracker.cpp
+++ b/src/hotspot/share/nmt/mallocTracker.cpp
@@ -65,11 +65,7 @@ void MallocMemorySnapshot::copy_to(MallocMemorySnapshot* s) {
// Use lock to make sure that mtChunks don't get deallocated while the
// copy is going on, because their size is adjusted using this
// buffer in make_adjustment().
- ChunkPoolLocker::LockStrategy ls = ChunkPoolLocker::LockStrategy::Lock;
- if (VMError::is_error_reported() && VMError::is_error_reported_in_current_thread()) {
- ls = ChunkPoolLocker::LockStrategy::Try;
- }
- ChunkPoolLocker cpl(ls);
+ ChunkPoolLocker lock;
s->_all_mallocs = _all_mallocs;
size_t total_size = 0;
size_t total_count = 0;
diff --git a/src/hotspot/share/nmt/nmtUsage.cpp b/src/hotspot/share/nmt/nmtUsage.cpp
index 9e6fc3e183bac..3a9a232a36eea 100644
--- a/src/hotspot/share/nmt/nmtUsage.cpp
+++ b/src/hotspot/share/nmt/nmtUsage.cpp
@@ -30,7 +30,6 @@
#include "nmt/nmtUsage.hpp"
#include "nmt/threadStackTracker.hpp"
#include "runtime/mutexLocker.hpp"
-#include "utilities/vmError.hpp"
// Enabled all options for snapshot.
const NMTUsageOptions NMTUsage::OptionsAll = { true, true, true };
@@ -59,11 +58,7 @@ void NMTUsage::update_malloc_usage() {
// Lock needed to keep values in sync, total area size
// is deducted from mtChunk in the end to give correct values.
{
- ChunkPoolLocker::LockStrategy ls = ChunkPoolLocker::LockStrategy::Lock;
- if (VMError::is_error_reported() && VMError::is_error_reported_in_current_thread()) {
- ls = ChunkPoolLocker::LockStrategy::Try;
- }
- ChunkPoolLocker cpl(ls);
+ ChunkPoolLocker lock;
ms = MallocMemorySummary::as_snapshot();
}
diff --git a/src/hotspot/share/oops/constantPool.cpp b/src/hotspot/share/oops/constantPool.cpp
index b072c7f26ec53..5d5c05482156b 100644
--- a/src/hotspot/share/oops/constantPool.cpp
+++ b/src/hotspot/share/oops/constantPool.cpp
@@ -538,23 +538,18 @@ void ConstantPool::remove_resolved_klass_if_non_deterministic(int cp_index) {
assert(ArchiveBuilder::current()->is_in_buffer_space(this), "must be");
assert(tag_at(cp_index).is_klass(), "must be resolved");
+ Klass* k = resolved_klass_at(cp_index);
bool can_archive;
- Klass* k = nullptr;
- if (CDSConfig::is_dumping_preimage_static_archive()) {
+ if (k == nullptr) {
+ // We'd come here if the referenced class has been excluded via
+ // SystemDictionaryShared::is_excluded_class(). As a result, ArchiveBuilder
+ // has cleared the resolved_klasses()->at(...) pointer to null. Thus, we
+ // need to revert the tag to JVM_CONSTANT_UnresolvedClass.
can_archive = false;
} else {
- k = resolved_klass_at(cp_index);
- if (k == nullptr) {
- // We'd come here if the referenced class has been excluded via
- // SystemDictionaryShared::is_excluded_class(). As a result, ArchiveBuilder
- // has cleared the resolved_klasses()->at(...) pointer to null. Thus, we
- // need to revert the tag to JVM_CONSTANT_UnresolvedClass.
- can_archive = false;
- } else {
- ConstantPool* src_cp = ArchiveBuilder::current()->get_source_addr(this);
- can_archive = AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index);
- }
+ ConstantPool* src_cp = ArchiveBuilder::current()->get_source_addr(this);
+ can_archive = AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index);
}
if (!can_archive) {
diff --git a/src/hotspot/share/oops/cpCache.cpp b/src/hotspot/share/oops/cpCache.cpp
index f60229dbfffc2..941ceac8de12b 100644
--- a/src/hotspot/share/oops/cpCache.cpp
+++ b/src/hotspot/share/oops/cpCache.cpp
@@ -430,25 +430,26 @@ void ConstantPoolCache::remove_resolved_field_entries_if_non_deterministic() {
bool archived = false;
bool resolved = rfi->is_resolved(Bytecodes::_getfield) ||
rfi->is_resolved(Bytecodes::_putfield);
- if (resolved && !CDSConfig::is_dumping_preimage_static_archive()
- && AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index)) {
+ if (resolved && AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index)) {
rfi->mark_and_relocate();
archived = true;
} else {
rfi->remove_unshareable_info();
}
- LogStreamHandle(Trace, aot, resolve) log;
- if (log.is_enabled()) {
- ResourceMark rm;
- int klass_cp_index = cp->uncached_klass_ref_index_at(cp_index);
- Symbol* klass_name = cp->klass_name_at(klass_cp_index);
- Symbol* name = cp->uncached_name_ref_at(cp_index);
- Symbol* signature = cp->uncached_signature_ref_at(cp_index);
- log.print("%s field CP entry [%3d]: %s => %s.%s:%s",
- (archived ? "archived" : "reverted"),
- cp_index,
- cp->pool_holder()->name()->as_C_string(),
- klass_name->as_C_string(), name->as_C_string(), signature->as_C_string());
+ if (resolved) {
+ LogStreamHandle(Trace, aot, resolve) log;
+ if (log.is_enabled()) {
+ ResourceMark rm;
+ int klass_cp_index = cp->uncached_klass_ref_index_at(cp_index);
+ Symbol* klass_name = cp->klass_name_at(klass_cp_index);
+ Symbol* name = cp->uncached_name_ref_at(cp_index);
+ Symbol* signature = cp->uncached_signature_ref_at(cp_index);
+ log.print("%s field CP entry [%3d]: %s => %s.%s:%s",
+ (archived ? "archived" : "reverted"),
+ cp_index,
+ cp->pool_holder()->name()->as_C_string(),
+ klass_name->as_C_string(), name->as_C_string(), signature->as_C_string());
+ }
}
ArchiveBuilder::alloc_stats()->record_field_cp_entry(archived, resolved && !archived);
}
@@ -469,31 +470,32 @@ void ConstantPoolCache::remove_resolved_method_entries_if_non_deterministic() {
// Just for safety -- this should not happen, but do not archive if we ever see this.
resolved &= !(rme->is_resolved(Bytecodes::_invokestatic));
- if (resolved && !CDSConfig::is_dumping_preimage_static_archive()
- && can_archive_resolved_method(src_cp, rme)) {
+ if (resolved && can_archive_resolved_method(src_cp, rme)) {
rme->mark_and_relocate(src_cp);
archived = true;
} else {
rme->remove_unshareable_info();
}
- LogStreamHandle(Trace, aot, resolve) log;
- if (log.is_enabled()) {
- ResourceMark rm;
- int klass_cp_index = cp->uncached_klass_ref_index_at(cp_index);
- Symbol* klass_name = cp->klass_name_at(klass_cp_index);
- Symbol* name = cp->uncached_name_ref_at(cp_index);
- Symbol* signature = cp->uncached_signature_ref_at(cp_index);
- log.print("%s%s method CP entry [%3d]: %s %s.%s:%s",
- (archived ? "archived" : "reverted"),
- (rme->is_resolved(Bytecodes::_invokeinterface) ? " interface" : ""),
- cp_index,
- cp->pool_holder()->name()->as_C_string(),
- klass_name->as_C_string(), name->as_C_string(), signature->as_C_string());
- if (archived) {
- Klass* resolved_klass = cp->resolved_klass_at(klass_cp_index);
- log.print(" => %s%s",
- resolved_klass->name()->as_C_string(),
- (rme->is_resolved(Bytecodes::_invokestatic) ? " *** static" : ""));
+ if (resolved) {
+ LogStreamHandle(Trace, aot, resolve) log;
+ if (log.is_enabled()) {
+ ResourceMark rm;
+ int klass_cp_index = cp->uncached_klass_ref_index_at(cp_index);
+ Symbol* klass_name = cp->klass_name_at(klass_cp_index);
+ Symbol* name = cp->uncached_name_ref_at(cp_index);
+ Symbol* signature = cp->uncached_signature_ref_at(cp_index);
+ log.print("%s%s method CP entry [%3d]: %s %s.%s:%s",
+ (archived ? "archived" : "reverted"),
+ (rme->is_resolved(Bytecodes::_invokeinterface) ? " interface" : ""),
+ cp_index,
+ cp->pool_holder()->name()->as_C_string(),
+ klass_name->as_C_string(), name->as_C_string(), signature->as_C_string());
+ if (archived) {
+ Klass* resolved_klass = cp->resolved_klass_at(klass_cp_index);
+ log.print(" => %s%s",
+ resolved_klass->name()->as_C_string(),
+ (rme->is_resolved(Bytecodes::_invokestatic) ? " *** static" : ""));
+ }
}
ArchiveBuilder::alloc_stats()->record_method_cp_entry(archived, resolved && !archived);
}
@@ -508,28 +510,29 @@ void ConstantPoolCache::remove_resolved_indy_entries_if_non_deterministic() {
int cp_index = rei->constant_pool_index();
bool archived = false;
bool resolved = rei->is_resolved();
- if (resolved && !CDSConfig::is_dumping_preimage_static_archive()
- && AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index)) {
+ if (resolved && AOTConstantPoolResolver::is_resolution_deterministic(src_cp, cp_index)) {
rei->mark_and_relocate();
archived = true;
} else {
rei->remove_unshareable_info();
}
- LogStreamHandle(Trace, aot, resolve) log;
- if (log.is_enabled()) {
- ResourceMark rm;
- int bsm = cp->bootstrap_method_ref_index_at(cp_index);
- int bsm_ref = cp->method_handle_index_at(bsm);
- Symbol* bsm_name = cp->uncached_name_ref_at(bsm_ref);
- Symbol* bsm_signature = cp->uncached_signature_ref_at(bsm_ref);
- Symbol* bsm_klass = cp->klass_name_at(cp->uncached_klass_ref_index_at(bsm_ref));
- log.print("%s indy CP entry [%3d]: %s (%d)",
- (archived ? "archived" : "reverted"),
- cp_index, cp->pool_holder()->name()->as_C_string(), i);
- log.print(" %s %s.%s:%s", (archived ? "=>" : " "), bsm_klass->as_C_string(),
- bsm_name->as_C_string(), bsm_signature->as_C_string());
+ if (resolved) {
+ LogStreamHandle(Trace, aot, resolve) log;
+ if (log.is_enabled()) {
+ ResourceMark rm;
+ int bsm = cp->bootstrap_method_ref_index_at(cp_index);
+ int bsm_ref = cp->method_handle_index_at(bsm);
+ Symbol* bsm_name = cp->uncached_name_ref_at(bsm_ref);
+ Symbol* bsm_signature = cp->uncached_signature_ref_at(bsm_ref);
+ Symbol* bsm_klass = cp->klass_name_at(cp->uncached_klass_ref_index_at(bsm_ref));
+ log.print("%s indy CP entry [%3d]: %s (%d)",
+ (archived ? "archived" : "reverted"),
+ cp_index, cp->pool_holder()->name()->as_C_string(), i);
+ log.print(" %s %s.%s:%s", (archived ? "=>" : " "), bsm_klass->as_C_string(),
+ bsm_name->as_C_string(), bsm_signature->as_C_string());
+ }
+ ArchiveBuilder::alloc_stats()->record_indy_cp_entry(archived, resolved && !archived);
}
- ArchiveBuilder::alloc_stats()->record_indy_cp_entry(archived, resolved && !archived);
}
}
diff --git a/src/hotspot/share/opto/callnode.cpp b/src/hotspot/share/opto/callnode.cpp
index ad6548a649e88..ef1ebc5cef949 100644
--- a/src/hotspot/share/opto/callnode.cpp
+++ b/src/hotspot/share/opto/callnode.cpp
@@ -72,7 +72,7 @@ void StartNode::calling_convention(BasicType* sig_bt, VMRegPair *parm_regs, uint
//------------------------------Registers--------------------------------------
const RegMask &StartNode::in_RegMask(uint) const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
//------------------------------match------------------------------------------
@@ -82,7 +82,7 @@ Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
case TypeFunc::Control:
case TypeFunc::I_O:
case TypeFunc::Memory:
- return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
+ return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
case TypeFunc::FramePtr:
return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
case TypeFunc::ReturnAdr:
@@ -777,12 +777,12 @@ Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
case TypeFunc::Control:
case TypeFunc::I_O:
case TypeFunc::Memory:
- return new MachProjNode(this,proj->_con,RegMask::EMPTY,MachProjNode::unmatched_proj);
+ return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
case TypeFunc::Parms+1: // For LONG & DOUBLE returns
assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "");
// 2nd half of doubles and longs
- return new MachProjNode(this,proj->_con, RegMask::EMPTY, (uint)OptoReg::Bad);
+ return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
case TypeFunc::Parms: { // Normal returns
uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
@@ -798,14 +798,14 @@ Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
if(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ) {
if(OptoReg::is_valid(regs.second())) {
for (OptoReg::Name r = regs.first(); r <= regs.second(); r = OptoReg::add(r, 1)) {
- rm.insert(r);
+ rm.Insert(r);
}
}
}
}
if( OptoReg::is_valid(regs.second()) )
- rm.insert(regs.second());
+ rm.Insert( regs.second() );
return new MachProjNode(this,proj->_con,rm,ideal_reg);
}
@@ -1492,14 +1492,12 @@ void SafePointNode::dump_spec(outputStream *st) const {
#endif
const RegMask &SafePointNode::in_RegMask(uint idx) const {
- if (idx < TypeFunc::Parms) {
- return RegMask::EMPTY;
- }
+ if( idx < TypeFunc::Parms ) return RegMask::Empty;
// Values outside the domain represent debug info
return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
}
const RegMask &SafePointNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
@@ -1610,7 +1608,7 @@ const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
}
const RegMask &SafePointScalarObjectNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
uint SafePointScalarObjectNode::match_edge(uint idx) const {
@@ -1661,7 +1659,7 @@ const RegMask &SafePointScalarMergeNode::in_RegMask(uint idx) const {
}
const RegMask &SafePointScalarMergeNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
uint SafePointScalarMergeNode::match_edge(uint idx) const {
diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp
index 0293f42d79123..ef912ff471ac3 100644
--- a/src/hotspot/share/opto/cfgnode.cpp
+++ b/src/hotspot/share/opto/cfgnode.cpp
@@ -1014,7 +1014,7 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) {
}
const RegMask &RegionNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
#ifndef PRODUCT
@@ -2859,15 +2859,13 @@ bool PhiNode::is_tripcount(BasicType bt) const {
//------------------------------out_RegMask------------------------------------
const RegMask &PhiNode::in_RegMask(uint i) const {
- return i ? out_RegMask() : RegMask::EMPTY;
+ return i ? out_RegMask() : RegMask::Empty;
}
const RegMask &PhiNode::out_RegMask() const {
uint ideal_reg = _type->ideal_reg();
assert( ideal_reg != Node::NotAMachineReg, "invalid type at Phi" );
- if (ideal_reg == 0) {
- return RegMask::EMPTY;
- }
+ if( ideal_reg == 0 ) return RegMask::Empty;
assert(ideal_reg != Op_RegFlags, "flags register is not spillable");
return *(Compile::current()->matcher()->idealreg2spillmask[ideal_reg]);
}
@@ -2894,22 +2892,22 @@ Node* GotoNode::Identity(PhaseGVN* phase) {
}
const RegMask &GotoNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
//=============================================================================
const RegMask &JumpNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
//=============================================================================
const RegMask &JProjNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
//=============================================================================
const RegMask &CProjNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
diff --git a/src/hotspot/share/opto/cfgnode.hpp b/src/hotspot/share/opto/cfgnode.hpp
index 78ad085e03dbf..fffe00a4114c7 100644
--- a/src/hotspot/share/opto/cfgnode.hpp
+++ b/src/hotspot/share/opto/cfgnode.hpp
@@ -741,7 +741,7 @@ class BlackholeNode : public MultiNode {
// Fake the incoming arguments mask for blackholes: accept all registers
// and all stack slots. This would avoid any redundant register moves
// for blackhole inputs.
- return RegMask::ALL;
+ return RegMask::All;
}
#ifndef PRODUCT
virtual void format(PhaseRegAlloc* ra, outputStream* st) const;
diff --git a/src/hotspot/share/opto/chaitin.cpp b/src/hotspot/share/opto/chaitin.cpp
index 903203bd0944e..45a913506266e 100644
--- a/src/hotspot/share/opto/chaitin.cpp
+++ b/src/hotspot/share/opto/chaitin.cpp
@@ -49,11 +49,9 @@ void LRG::dump() const {
_mask.dump();
if( _msize_valid ) {
if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
- else {
- tty->print(", #!!!_%d_vs_%d ", _mask_size, _mask.size());
- }
+ else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
} else {
- tty->print(", #?(%d) ", _mask.size());
+ tty->print(", #?(%d) ",_mask.Size());
}
tty->print("EffDeg: ");
@@ -743,7 +741,7 @@ void PhaseChaitin::Register_Allocate() {
}
} else { // Misaligned; extract 2 bits
OptoReg::Name hi = lrg.reg(); // Get hi register
- lrg.remove(hi); // Yank from mask
+ lrg.Remove(hi); // Yank from mask
int lo = lrg.mask().find_first_elem(); // Find lo
set_pair(i, hi, lo);
}
@@ -775,7 +773,7 @@ void PhaseChaitin::de_ssa() {
Node *n = block->get_node(j);
// Pre-color to the zero live range, or pick virtual register
const RegMask &rm = n->out_RegMask();
- _lrg_map.map(n->_idx, !rm.is_empty() ? lr_counter++ : 0);
+ _lrg_map.map(n->_idx, !rm.is_Empty() ? lr_counter++ : 0);
}
}
@@ -796,7 +794,7 @@ void PhaseChaitin::mark_ssa() {
Node *n = block->get_node(j);
// Pre-color to the zero live range, or pick virtual register
const RegMask &rm = n->out_RegMask();
- _lrg_map.map(n->_idx, !rm.is_empty() ? n->_idx : 0);
+ _lrg_map.map(n->_idx, !rm.is_Empty() ? n->_idx : 0);
max_idx = (n->_idx > max_idx) ? n->_idx : max_idx;
}
}
@@ -881,7 +879,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Limit result register mask to acceptable registers
const RegMask &rm = n->out_RegMask();
- lrg.and_with(rm);
+ lrg.AND( rm );
uint ireg = n->ideal_reg();
assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
@@ -937,7 +935,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
switch (ireg) {
case MachProjNode::fat_proj:
// Fat projections have size equal to number of registers killed
- lrg.set_num_regs(rm.size());
+ lrg.set_num_regs(rm.Size());
lrg.set_reg_pressure(lrg.num_regs());
lrg._fat_proj = 1;
lrg._is_bound = 1;
@@ -1128,7 +1126,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// Later, AFTER aggressive, this live range will have to spill
// but the spiller handles slow-path calls very nicely.
} else {
- lrg.and_with(rm);
+ lrg.AND( rm );
}
// Check for bound register masks
@@ -1166,7 +1164,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
if (!is_vect && !n->is_SpillCopy() &&
(lrg._def == nullptr || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
lrgmask.is_misaligned_pair()) {
- lrg.clear();
+ lrg.Clear();
}
// Check for maximum frequency value
@@ -1407,7 +1405,7 @@ void PhaseChaitin::Simplify( ) {
// Is 'reg' register legal for 'lrg'?
static bool is_legal_reg(LRG& lrg, OptoReg::Name reg) {
- if (lrg.mask().can_represent(reg) && lrg.mask().member(reg)) {
+ if (lrg.mask().can_represent(reg) && lrg.mask().Member(reg)) {
// RA uses OptoReg which represent the highest element of a registers set.
// For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
// in which XMMd is used by RA to represent such vectors. A double value
@@ -1461,7 +1459,7 @@ static OptoReg::Name find_first_set(LRG& lrg, RegMask& mask) {
return assigned;
} else {
// Remove more for each iteration
- mask.remove(assigned - num_regs + 1); // Unmask the lowest reg
+ mask.Remove(assigned - num_regs + 1); // Unmask the lowest reg
mask.clear_to_sets(RegMask::SlotsPerVecA); // Align by SlotsPerVecA bits
assigned = mask.find_first_set(lrg, num_regs);
}
@@ -1512,7 +1510,7 @@ OptoReg::Name PhaseChaitin::bias_color(LRG& lrg) {
// Choose a color which is legal for him
ResourceMark rm(C->regmask_arena());
RegMask tempmask(lrg.mask(), C->regmask_arena());
- tempmask.and_with(lrgs(copy_lrg).mask());
+ tempmask.AND(lrgs(copy_lrg).mask());
tempmask.clear_to_sets(lrg.num_regs());
OptoReg::Name reg = find_first_set(lrg, tempmask);
if (OptoReg::is_valid(reg))
@@ -1535,9 +1533,9 @@ OptoReg::Name PhaseChaitin::bias_color(LRG& lrg) {
if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
// This 'Remove; find; Insert' idiom is an expensive way to find the
// SECOND element in the mask.
- lrg.remove(reg);
+ lrg.Remove(reg);
OptoReg::Name reg2 = lrg.mask().find_first_elem();
- lrg.insert(reg);
+ lrg.Insert(reg);
if (OptoReg::is_reg(reg2)) {
reg = reg2;
}
@@ -1547,8 +1545,8 @@ OptoReg::Name PhaseChaitin::bias_color(LRG& lrg) {
// Choose a color in the current chunk
OptoReg::Name PhaseChaitin::choose_color(LRG& lrg) {
- assert(C->in_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().member(OptoReg::Name(_matcher._old_SP - 1)), "must not allocate stack0 (inside preserve area)");
- assert(C->out_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().member(OptoReg::Name(_matcher._old_SP + 0)), "must not allocate stack0 (inside preserve area)");
+ assert(C->in_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP - 1)), "must not allocate stack0 (inside preserve area)");
+ assert(C->out_preserve_stack_slots() == 0 || lrg.mask().is_offset() || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP + 0)), "must not allocate stack0 (inside preserve area)");
if( lrg.num_regs() == 1 || // Common Case
!lrg._fat_proj ) // Aligned+adjacent pairs ok
@@ -1624,20 +1622,20 @@ uint PhaseChaitin::Select( ) {
// at retry_next_chunk.
if (nreg < LRG::SPILL_REG) {
#ifndef PRODUCT
- uint size = lrg->mask().size();
+ uint size = lrg->mask().Size();
ResourceMark rm(C->regmask_arena());
RegMask trace_mask(lrg->mask(), C->regmask_arena());
#endif
- lrg->subtract_inner(nlrg.mask());
+ lrg->SUBTRACT_inner(nlrg.mask());
#ifndef PRODUCT
- if (trace_spilling() && lrg->mask().size() != size) {
+ if (trace_spilling() && lrg->mask().Size() != size) {
ttyLocker ttyl;
tty->print("L%d ", lidx);
trace_mask.dump();
tty->print(" intersected L%d ", neighbor);
nlrg.mask().dump();
tty->print(" removed ");
- trace_mask.subtract(lrg->mask());
+ trace_mask.SUBTRACT(lrg->mask());
trace_mask.dump();
tty->print(" leaving ");
lrg->mask().dump();
@@ -1703,15 +1701,15 @@ uint PhaseChaitin::Select( ) {
} else {
assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecZ, "sanity");
}
- lrg->clear(); // Clear the mask
- lrg->insert(reg); // Set regmask to match selected reg
+ lrg->Clear(); // Clear the mask
+ lrg->Insert(reg); // Set regmask to match selected reg
// For vectors and pairs, also insert the low bit of the pair
// We always choose the high bit, then mask the low bits by register size
if (lrg->is_scalable() && OptoReg::is_stack(lrg->reg())) { // stack
n_regs = lrg->scalable_reg_slots();
}
for (int i = 1; i < n_regs; i++) {
- lrg->insert(OptoReg::add(reg, -i));
+ lrg->Insert(OptoReg::add(reg,-i));
}
lrg->set_mask_size(n_regs);
} else { // Else fatproj
diff --git a/src/hotspot/share/opto/chaitin.hpp b/src/hotspot/share/opto/chaitin.hpp
index b477c54fcae49..9b3f8123ac205 100644
--- a/src/hotspot/share/opto/chaitin.hpp
+++ b/src/hotspot/share/opto/chaitin.hpp
@@ -103,11 +103,11 @@ class LRG : public ResourceObj {
private:
RegMask _mask; // Allowed registers for this LRG
- uint _mask_size; // cache of _mask.size();
+ uint _mask_size; // cache of _mask.Size();
public:
- int compute_mask_size() const { return _mask.is_infinite_stack() ? INFINITE_STACK_SIZE : _mask.size(); }
+ int compute_mask_size() const { return _mask.is_infinite_stack() ? INFINITE_STACK_SIZE : _mask.Size(); }
void set_mask_size( int size ) {
- assert((size == (int)INFINITE_STACK_SIZE) || (size == (int)_mask.size()), "");
+ assert((size == (int)INFINITE_STACK_SIZE) || (size == (int)_mask.Size()), "");
_mask_size = size;
#ifdef ASSERT
_msize_valid=1;
@@ -128,17 +128,17 @@ class LRG : public ResourceObj {
// count of bits in the current mask.
int get_invalid_mask_size() const { return _mask_size; }
const RegMask &mask() const { return _mask; }
- void set_mask(const RegMask& rm) { _mask.assignFrom(rm); DEBUG_ONLY(_msize_valid = 0;) }
+ void set_mask( const RegMask &rm ) { _mask = rm; DEBUG_ONLY(_msize_valid=0;)}
void init_mask(Arena* arena) { new (&_mask) RegMask(arena); }
- void and_with( const RegMask &rm ) { _mask.and_with(rm); DEBUG_ONLY(_msize_valid=0;)}
- void subtract( const RegMask &rm ) { _mask.subtract(rm); DEBUG_ONLY(_msize_valid=0;)}
- void subtract_inner(const RegMask& rm) { _mask.subtract_inner(rm); DEBUG_ONLY(_msize_valid = 0;) }
- void clear() { _mask.clear() ; DEBUG_ONLY(_msize_valid=1); _mask_size = 0; }
- void set_all() { _mask.set_all(); DEBUG_ONLY(_msize_valid = 1); _mask_size = _mask.rm_size_in_bits(); }
+ void AND( const RegMask &rm ) { _mask.AND(rm); DEBUG_ONLY(_msize_valid=0;)}
+ void SUBTRACT( const RegMask &rm ) { _mask.SUBTRACT(rm); DEBUG_ONLY(_msize_valid=0;)}
+ void SUBTRACT_inner(const RegMask& rm) { _mask.SUBTRACT_inner(rm); DEBUG_ONLY(_msize_valid = 0;) }
+ void Clear() { _mask.Clear() ; DEBUG_ONLY(_msize_valid=1); _mask_size = 0; }
+ void Set_All() { _mask.Set_All(); DEBUG_ONLY(_msize_valid = 1); _mask_size = _mask.rm_size_in_bits(); }
bool rollover() { DEBUG_ONLY(_msize_valid = 1); _mask_size = _mask.rm_size_in_bits(); return _mask.rollover(); }
- void insert( OptoReg::Name reg ) { _mask.insert(reg); DEBUG_ONLY(_msize_valid=0;) }
- void remove( OptoReg::Name reg ) { _mask.remove(reg); DEBUG_ONLY(_msize_valid=0;) }
+ void Insert( OptoReg::Name reg ) { _mask.Insert(reg); DEBUG_ONLY(_msize_valid=0;) }
+ void Remove( OptoReg::Name reg ) { _mask.Remove(reg); DEBUG_ONLY(_msize_valid=0;) }
void clear_to_sets() { _mask.clear_to_sets(_num_regs); DEBUG_ONLY(_msize_valid=0;) }
private:
@@ -624,7 +624,7 @@ class PhaseChaitin : public PhaseRegAlloc {
void check_pressure_at_fatproj(uint fatproj_location, RegMask& fatproj_mask) {
// this pressure is only valid at this instruction, i.e. we don't need to lower
// the register pressure since the fat proj was never live before (going backwards)
- uint new_pressure = current_pressure() + fatproj_mask.size();
+ uint new_pressure = current_pressure() + fatproj_mask.Size();
if (new_pressure > final_pressure()) {
_final_pressure = new_pressure;
}
diff --git a/src/hotspot/share/opto/coalesce.cpp b/src/hotspot/share/opto/coalesce.cpp
index 82c1f7050c7fc..90a2dd0e152ec 100644
--- a/src/hotspot/share/opto/coalesce.cpp
+++ b/src/hotspot/share/opto/coalesce.cpp
@@ -118,7 +118,7 @@ void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
// Merge in the IFG
_phc._ifg->Union( lr1, lr2 );
// Combine register restrictions
- lrg1->and_with(lrg2->mask());
+ lrg1->AND(lrg2->mask());
}
}
}
@@ -503,8 +503,8 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
lrgs(lr2).is_multidef() )
? NodeSentinel : src_def;
lrgs(lr2)._def = nullptr; // No def for lrg 2
- lrgs(lr2).clear(); // Force empty mask for LRG 2
- // lrgs(lr2)._size = 0; // Live-range 2 goes dead
+ lrgs(lr2).Clear(); // Force empty mask for LRG 2
+ //lrgs(lr2)._size = 0; // Live-range 2 goes dead
lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop;
lrgs(lr2)._is_oop = 0; // In particular, not an oop for GC info
@@ -570,9 +570,9 @@ uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy,
// If we attempt to coalesce across a bound def
if( lrgs(lidx).is_bound() ) {
// Do not let the coalesced LRG expect to get the bound color
- rm.subtract(lrgs(lidx).mask());
+ rm.SUBTRACT( lrgs(lidx).mask() );
// Recompute rm_size
- rm_size = rm.size();
+ rm_size = rm.Size();
//if( rm._flags ) rm_size += 1000000;
if( reg_degree >= rm_size ) return max_juint;
}
@@ -695,9 +695,9 @@ bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block
// intersecting their allowed register sets.
ResourceMark rm(C->regmask_arena());
RegMask mask(lrgs(lr1).mask(), C->regmask_arena());
- mask.and_with(lrgs(lr2).mask());
+ mask.AND(lrgs(lr2).mask());
// Number of bits free
- uint rm_size = mask.size();
+ uint rm_size = mask.Size();
if (UseFPUForSpilling && mask.is_infinite_stack() ) {
// Don't coalesce when frequency difference is large
diff --git a/src/hotspot/share/opto/connode.hpp b/src/hotspot/share/opto/connode.hpp
index 8cf3eea7570ea..4788858796015 100644
--- a/src/hotspot/share/opto/connode.hpp
+++ b/src/hotspot/share/opto/connode.hpp
@@ -43,8 +43,8 @@ class ConNode : public TypeNode {
}
virtual int Opcode() const;
virtual uint hash() const;
- virtual const RegMask& out_RegMask() const { return RegMask::EMPTY; }
- virtual const RegMask& in_RegMask(uint) const { return RegMask::EMPTY; }
+ virtual const RegMask &out_RegMask() const { return RegMask::Empty; }
+ virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
virtual Node* Ideal(PhaseGVN* phase, bool can_reshape) {
return Node::Ideal(phase, can_reshape);
diff --git a/src/hotspot/share/opto/divnode.cpp b/src/hotspot/share/opto/divnode.cpp
index 06ba1856941a3..823745ea8e7fd 100644
--- a/src/hotspot/share/opto/divnode.cpp
+++ b/src/hotspot/share/opto/divnode.cpp
@@ -1668,10 +1668,10 @@ Node *DivModINode::match( const ProjNode *proj, const Matcher *match ) {
uint ideal_reg = proj->ideal_reg();
RegMask rm;
if (proj->_con == div_proj_num) {
- rm.assignFrom(match->divI_proj_mask());
+ rm = match->divI_proj_mask();
} else {
assert(proj->_con == mod_proj_num, "must be div or mod projection");
- rm.assignFrom(match->modI_proj_mask());
+ rm = match->modI_proj_mask();
}
return new MachProjNode(this, proj->_con, rm, ideal_reg);
}
@@ -1683,10 +1683,10 @@ Node *DivModLNode::match( const ProjNode *proj, const Matcher *match ) {
uint ideal_reg = proj->ideal_reg();
RegMask rm;
if (proj->_con == div_proj_num) {
- rm.assignFrom(match->divL_proj_mask());
+ rm = match->divL_proj_mask();
} else {
assert(proj->_con == mod_proj_num, "must be div or mod projection");
- rm.assignFrom(match->modL_proj_mask());
+ rm = match->modL_proj_mask();
}
return new MachProjNode(this, proj->_con, rm, ideal_reg);
}
@@ -1721,10 +1721,10 @@ Node* UDivModINode::match( const ProjNode *proj, const Matcher *match ) {
uint ideal_reg = proj->ideal_reg();
RegMask rm;
if (proj->_con == div_proj_num) {
- rm.assignFrom(match->divI_proj_mask());
+ rm = match->divI_proj_mask();
} else {
assert(proj->_con == mod_proj_num, "must be div or mod projection");
- rm.assignFrom(match->modI_proj_mask());
+ rm = match->modI_proj_mask();
}
return new MachProjNode(this, proj->_con, rm, ideal_reg);
}
@@ -1736,10 +1736,10 @@ Node* UDivModLNode::match( const ProjNode *proj, const Matcher *match ) {
uint ideal_reg = proj->ideal_reg();
RegMask rm;
if (proj->_con == div_proj_num) {
- rm.assignFrom(match->divL_proj_mask());
+ rm = match->divL_proj_mask();
} else {
assert(proj->_con == mod_proj_num, "must be div or mod projection");
- rm.assignFrom(match->modL_proj_mask());
+ rm = match->modL_proj_mask();
}
return new MachProjNode(this, proj->_con, rm, ideal_reg);
}
diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp
index a148b167ee301..cbf0666c00e4e 100644
--- a/src/hotspot/share/opto/escape.cpp
+++ b/src/hotspot/share/opto/escape.cpp
@@ -1296,8 +1296,9 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray &alloc_wo
castpps.push(use);
} else if (use->is_AddP() || use->is_Cmp()) {
others.push(use);
+ } else if (use->is_SafePoint()) {
+ // processed later
} else {
- // Safepoints to be processed later; other users aren't expected here
assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt());
}
}
diff --git a/src/hotspot/share/opto/gcm.cpp b/src/hotspot/share/opto/gcm.cpp
index 4a1553b1e0092..72c001a64c473 100644
--- a/src/hotspot/share/opto/gcm.cpp
+++ b/src/hotspot/share/opto/gcm.cpp
@@ -1449,9 +1449,8 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
// single register. Hoisting stretches the live range of the
// single register and may force spilling.
MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
- if (mach != nullptr && mach->out_RegMask().is_bound1() && !mach->out_RegMask().is_empty()) {
+ if (mach != nullptr && mach->out_RegMask().is_bound1() && !mach->out_RegMask().is_Empty())
in_latency = true;
- }
#ifndef PRODUCT
if (trace_opto_pipelining()) {
diff --git a/src/hotspot/share/opto/ifg.cpp b/src/hotspot/share/opto/ifg.cpp
index 1480e806f76a7..438209df8f823 100644
--- a/src/hotspot/share/opto/ifg.cpp
+++ b/src/hotspot/share/opto/ifg.cpp
@@ -55,7 +55,7 @@ void PhaseIFG::init( uint maxlrg ) {
for( uint i = 0; i < maxlrg; i++ ) {
_adjs[i].initialize(maxlrg);
_lrgs[i].init_mask(_arena);
- _lrgs[i].set_all();
+ _lrgs[i].Set_All();
}
}
@@ -655,7 +655,7 @@ bool PhaseChaitin::remove_node_if_not_used(Block* b, uint location, Node* n, uin
void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype) {
ResourceMark rm(C->regmask_arena());
RegMask mask_tmp(lrg.mask(), C->regmask_arena());
- mask_tmp.and_with(*Matcher::idealreg2regmask[op_regtype]);
+ mask_tmp.AND(*Matcher::idealreg2regmask[op_regtype]);
pressure.check_pressure_at_fatproj(location, mask_tmp);
}
@@ -729,7 +729,7 @@ void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg,
}
// Remove bound register(s) from 'l's choices
- old.assignFrom(interfering_lrg.mask());
+ old = interfering_lrg.mask();
uint old_size = interfering_lrg.mask_size();
// Remove the bits from LRG 'mask' from LRG 'l' so 'l' no
@@ -738,21 +738,21 @@ void PhaseChaitin::remove_bound_register_from_interfering_live_ranges(LRG& lrg,
assert(!interfering_lrg._is_vector || !interfering_lrg._fat_proj, "sanity");
if (interfering_lrg.num_regs() > 1 && !interfering_lrg._fat_proj) {
- r2mask.assignFrom(mask);
+ r2mask = mask;
// Leave only aligned set of bits.
r2mask.smear_to_sets(interfering_lrg.num_regs());
// It includes vector case.
- interfering_lrg.subtract(r2mask);
+ interfering_lrg.SUBTRACT(r2mask);
interfering_lrg.compute_set_mask_size();
} else if (r_size != 1) {
// fat proj
- interfering_lrg.subtract(mask);
+ interfering_lrg.SUBTRACT(mask);
interfering_lrg.compute_set_mask_size();
} else {
// Common case: size 1 bound removal
OptoReg::Name r_reg = mask.find_first_elem();
- if (interfering_lrg.mask().member(r_reg)) {
- interfering_lrg.remove(r_reg);
+ if (interfering_lrg.mask().Member(r_reg)) {
+ interfering_lrg.Remove(r_reg);
interfering_lrg.set_mask_size(interfering_lrg.mask().is_infinite_stack() ? LRG::INFINITE_STACK_SIZE : old_size - 1);
}
}
@@ -933,7 +933,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Since rematerializable DEFs are not bound but the live range is,
// some uses must be bound. If we spill live range 'r', it can
// rematerialize at each use site according to its bindings.
- if (lrg.is_bound() && !n->rematerialize() && !lrg.mask().is_empty()) {
+ if (lrg.is_bound() && !n->rematerialize() && !lrg.mask().is_Empty()) {
remove_bound_register_from_interfering_live_ranges(lrg, &liveout, must_spill);
}
interfere_with_live(lid, &liveout);
diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp
index 83e975b95a26e..b397c2c5852e4 100644
--- a/src/hotspot/share/opto/ifnode.cpp
+++ b/src/hotspot/share/opto/ifnode.cpp
@@ -82,7 +82,7 @@ const Type* IfNode::Value(PhaseGVN* phase) const {
}
const RegMask &IfNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
//------------------------------split_if---------------------------------------
diff --git a/src/hotspot/share/opto/lcm.cpp b/src/hotspot/share/opto/lcm.cpp
index 53a503866fac1..fd7644f858760 100644
--- a/src/hotspot/share/opto/lcm.cpp
+++ b/src/hotspot/share/opto/lcm.cpp
@@ -855,12 +855,12 @@ void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& ne
static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
// Fill in the kill mask for the call
for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
- if (!regs.member(r)) { // Not already defined by the call
+ if( !regs.Member(r) ) { // Not already defined by the call
// Save-on-call register?
if ((save_policy[r] == 'C') ||
(save_policy[r] == 'A') ||
((save_policy[r] == 'E') && exclude_soe)) {
- proj->_rout.insert(r);
+ proj->_rout.Insert(r);
}
}
}
@@ -884,7 +884,7 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
// Schedule next to call
block->map_node(n, node_cnt++);
// Collect defined registers
- regs.or_with(n->out_RegMask());
+ regs.OR(n->out_RegMask());
// Check for scheduling the next control-definer
if( n->bottom_type() == Type::CONTROL )
// Warm up next pile of heuristic bits
@@ -907,12 +907,12 @@ uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, Grow
// Act as if the call defines the Frame Pointer.
// Certainly the FP is alive and well after the call.
- regs.insert(_matcher.c_frame_pointer());
+ regs.Insert(_matcher.c_frame_pointer());
// Set all registers killed and not already defined by the call.
uint r_cnt = mcall->tf()->range()->cnt();
int op = mcall->ideal_Opcode();
- MachProjNode* proj = new MachProjNode(mcall, r_cnt + 1, RegMask::EMPTY, MachProjNode::fat_proj);
+ MachProjNode *proj = new MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
map_node_to_block(proj, block);
block->insert_node(proj, node_cnt++);
@@ -1164,10 +1164,10 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray& ready_cnt, Vecto
if (n->is_Mach() && n->as_Mach()->has_call()) {
RegMask regs;
- regs.insert(_matcher.c_frame_pointer());
- regs.or_with(n->out_RegMask());
+ regs.Insert(_matcher.c_frame_pointer());
+ regs.OR(n->out_RegMask());
- MachProjNode* proj = new MachProjNode(n, 1, RegMask::EMPTY, MachProjNode::fat_proj);
+ MachProjNode *proj = new MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
map_node_to_block(proj, block);
block->insert_node(proj, phi_cnt++);
diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp
index bd8a550b9ab76..ee3a3d3ba47bd 100644
--- a/src/hotspot/share/opto/library_call.cpp
+++ b/src/hotspot/share/opto/library_call.cpp
@@ -7273,7 +7273,7 @@ bool LibraryCallKit::inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id) {
const TypeInstPtr* tinst = _gvn.type(cipherBlockChaining_object)->isa_instptr();
assert(tinst != nullptr, "CBC obj is null");
assert(tinst->is_loaded(), "CBC obj is not loaded");
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
@@ -7359,7 +7359,7 @@ bool LibraryCallKit::inline_electronicCodeBook_AESCrypt(vmIntrinsics::ID id) {
const TypeInstPtr* tinst = _gvn.type(electronicCodeBook_object)->isa_instptr();
assert(tinst != nullptr, "ECB obj is null");
assert(tinst->is_loaded(), "ECB obj is not loaded");
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
@@ -7429,7 +7429,7 @@ bool LibraryCallKit::inline_counterMode_AESCrypt(vmIntrinsics::ID id) {
const TypeInstPtr* tinst = _gvn.type(counterMode_object)->isa_instptr();
assert(tinst != nullptr, "CTR obj is null");
assert(tinst->is_loaded(), "CTR obj is not loaded");
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
@@ -7469,7 +7469,7 @@ Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object)
// However, ppc64 vncipher processes MixColumns and requires the same round keys with encryption.
// The ppc64 and riscv64 stubs of encryption and decryption use the same round keys (sessionK[0]).
Node* objSessionK = load_field_from_object(aescrypt_object, "sessionK", "[[I");
- assert (objSessionK != nullptr, "wrong version of com.sun.crypto.provider.AES_Crypt");
+ assert (objSessionK != nullptr, "wrong version of com.sun.crypto.provider.AESCrypt");
if (objSessionK == nullptr) {
return (Node *) nullptr;
}
@@ -7477,7 +7477,7 @@ Node * LibraryCallKit::get_key_start_from_aescrypt_object(Node *aescrypt_object)
#else
Node* objAESCryptKey = load_field_from_object(aescrypt_object, "K", "[I");
#endif // PPC64
- assert (objAESCryptKey != nullptr, "wrong version of com.sun.crypto.provider.AES_Crypt");
+ assert (objAESCryptKey != nullptr, "wrong version of com.sun.crypto.provider.AESCrypt");
if (objAESCryptKey == nullptr) return (Node *) nullptr;
// now have the array, need to get the start address of the K array
@@ -7512,7 +7512,7 @@ Node* LibraryCallKit::inline_cipherBlockChaining_AESCrypt_predicate(bool decrypt
assert(tinst->is_loaded(), "CBCobj is not loaded");
// we want to do an instanceof comparison against the AESCrypt class
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
if (!klass_AESCrypt->is_loaded()) {
// if AESCrypt is not even loaded, we never take the intrinsic fast path
Node* ctrl = control();
@@ -7575,7 +7575,7 @@ Node* LibraryCallKit::inline_electronicCodeBook_AESCrypt_predicate(bool decrypti
assert(tinst->is_loaded(), "ECBobj is not loaded");
// we want to do an instanceof comparison against the AESCrypt class
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
if (!klass_AESCrypt->is_loaded()) {
// if AESCrypt is not even loaded, we never take the intrinsic fast path
Node* ctrl = control();
@@ -7635,7 +7635,7 @@ Node* LibraryCallKit::inline_counterMode_AESCrypt_predicate() {
assert(tinst->is_loaded(), "CTRobj is not loaded");
// we want to do an instanceof comparison against the AESCrypt class
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
if (!klass_AESCrypt->is_loaded()) {
// if AESCrypt is not even loaded, we never take the intrinsic fast path
Node* ctrl = control();
@@ -8608,7 +8608,7 @@ bool LibraryCallKit::inline_galoisCounterMode_AESCrypt() {
const TypeInstPtr* tinst = _gvn.type(gctr_object)->isa_instptr();
assert(tinst != nullptr, "GCTR obj is null");
assert(tinst->is_loaded(), "GCTR obj is not loaded");
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
assert(klass_AESCrypt->is_loaded(), "predicate checks that this class is loaded");
ciInstanceKlass* instklass_AESCrypt = klass_AESCrypt->as_instance_klass();
const TypeKlassPtr* aklass = TypeKlassPtr::make(instklass_AESCrypt);
@@ -8662,7 +8662,7 @@ Node* LibraryCallKit::inline_galoisCounterMode_AESCrypt_predicate() {
assert(tinst->is_loaded(), "GCTR obj is not loaded");
// we want to do an instanceof comparison against the AESCrypt class
- ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AES_Crypt"));
+ ciKlass* klass_AESCrypt = tinst->instance_klass()->find_klass(ciSymbol::make("com/sun/crypto/provider/AESCrypt"));
if (!klass_AESCrypt->is_loaded()) {
// if AESCrypt is not even loaded, we never take the intrinsic fast path
Node* ctrl = control();
diff --git a/src/hotspot/share/opto/loopUnswitch.cpp b/src/hotspot/share/opto/loopUnswitch.cpp
index f79afee31039c..b40a0492df511 100644
--- a/src/hotspot/share/opto/loopUnswitch.cpp
+++ b/src/hotspot/share/opto/loopUnswitch.cpp
@@ -522,9 +522,7 @@ IfTrueNode* PhaseIdealLoop::create_new_if_for_multiversion(IfTrueNode* multivers
// Hook region into slow_path, in stead of the multiversion_slow_proj.
// This also moves all other dependencies of the multiversion_slow_proj to the region.
- // The lazy_replace ensures that any get_ctrl that used to have multiversion_slow_proj
- // as their control are forwarded to the new region node as their control.
- lazy_replace(multiversion_slow_proj, region);
+ _igvn.replace_node(multiversion_slow_proj, region);
return new_if_true;
}
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index e8058edb4e5e5..4cb1862cbb939 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -5176,20 +5176,21 @@ void PhaseIdealLoop::build_and_optimize() {
continue;
}
Node* head = lpt->_head;
- if (!lpt->is_innermost()) continue;
+ if (!head->is_BaseCountedLoop() || !lpt->is_innermost()) continue;
// check for vectorized loops, any reassociation of invariants was already done
- if (head->is_CountedLoop() && head->as_CountedLoop()->is_unroll_only()) {
- continue;
- } else {
- AutoNodeBudget node_budget(this);
- lpt->reassociate_invariants(this);
+ if (head->is_CountedLoop()) {
+ if (head->as_CountedLoop()->is_unroll_only()) {
+ continue;
+ } else {
+ AutoNodeBudget node_budget(this);
+ lpt->reassociate_invariants(this);
+ }
}
// Because RCE opportunities can be masked by split_thru_phi,
// look for RCE candidates and inhibit split_thru_phi
// on just their loop-phi's for this pass of loop opts
if (SplitIfBlocks && do_split_ifs &&
- head->is_BaseCountedLoop() &&
head->as_BaseCountedLoop()->is_valid_counted_loop(head->as_BaseCountedLoop()->bt()) &&
(lpt->policy_range_check(this, true, T_LONG) ||
(head->is_CountedLoop() && lpt->policy_range_check(this, true, T_INT)))) {
diff --git a/src/hotspot/share/opto/machnode.cpp b/src/hotspot/share/opto/machnode.cpp
index e58befd8032d8..5da929e474860 100644
--- a/src/hotspot/share/opto/machnode.cpp
+++ b/src/hotspot/share/opto/machnode.cpp
@@ -525,7 +525,7 @@ bool MachNode::rematerialize() const {
uint idx = oper_input_base();
if (req() > idx) {
const RegMask &rm = in_RegMask(idx);
- if (!rm.is_empty() && rm.is_bound(ideal_reg())) {
+ if (!rm.is_Empty() && rm.is_bound(ideal_reg())) {
return false;
}
}
@@ -619,11 +619,8 @@ void MachNullCheckNode::save_label( Label** label, uint* block_num ) {
}
const RegMask &MachNullCheckNode::in_RegMask( uint idx ) const {
- if (idx == 0) {
- return RegMask::EMPTY;
- } else {
- return in(1)->as_Mach()->out_RegMask();
- }
+ if( idx == 0 ) return RegMask::Empty;
+ else return in(1)->as_Mach()->out_RegMask();
}
//=============================================================================
diff --git a/src/hotspot/share/opto/machnode.hpp b/src/hotspot/share/opto/machnode.hpp
index 30ac9181beccf..43e9a35df34b4 100644
--- a/src/hotspot/share/opto/machnode.hpp
+++ b/src/hotspot/share/opto/machnode.hpp
@@ -737,7 +737,7 @@ class MachNullCheckNode : public MachBranchNode {
virtual const class Type *bottom_type() const { return TypeTuple::IFBOTH; }
virtual uint ideal_reg() const { return NotAMachineReg; }
virtual const RegMask &in_RegMask(uint) const;
- virtual const RegMask& out_RegMask() const { return RegMask::EMPTY; }
+ virtual const RegMask &out_RegMask() const { return RegMask::Empty; }
#ifndef PRODUCT
virtual const char *Name() const { return "NullCheck"; }
virtual void format( PhaseRegAlloc *, outputStream *st ) const;
@@ -769,7 +769,7 @@ class MachProjNode : public ProjNode {
virtual int Opcode() const;
virtual const Type *bottom_type() const;
virtual const TypePtr *adr_type() const;
- virtual const RegMask& in_RegMask(uint) const { return RegMask::EMPTY; }
+ virtual const RegMask &in_RegMask(uint) const { return RegMask::Empty; }
virtual const RegMask &out_RegMask() const { return _rout; }
virtual uint ideal_reg() const { return _ideal_reg; }
// Need size_of() for virtual ProjNode::clone()
diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp
index c63cefe7ac201..7d73487cf8840 100644
--- a/src/hotspot/share/opto/matcher.cpp
+++ b/src/hotspot/share/opto/matcher.cpp
@@ -176,12 +176,12 @@ void Matcher::match( ) {
if (C->failing()) {
return;
}
- assert(_return_addr_mask.is_empty(),
+ assert(_return_addr_mask.is_Empty(),
"return address mask must be empty initially");
- _return_addr_mask.insert(return_addr());
+ _return_addr_mask.Insert(return_addr());
#ifdef _LP64
// Pointers take 2 slots in 64-bit land
- _return_addr_mask.insert(OptoReg::add(return_addr(), 1));
+ _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
#endif
// Map a Java-signature return type into return register-value
@@ -195,9 +195,9 @@ void Matcher::match( ) {
OptoRegPair regs = return_value(ireg);
// And mask for same
- _return_value_mask.assignFrom(RegMask(regs.first()));
+ _return_value_mask = RegMask(regs.first());
if( OptoReg::is_valid(regs.second()) )
- _return_value_mask.insert(regs.second());
+ _return_value_mask.Insert(regs.second());
}
// ---------------
@@ -261,7 +261,7 @@ void Matcher::match( ) {
assert( is_even(_in_arg_limit), "out_preserve must be even" );
for( i = 0; i < argcnt; i++ ) {
// Permit args to have no register
- _calling_convention_mask[i].clear();
+ _calling_convention_mask[i].Clear();
if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
_parm_regs[i].set_bad();
continue;
@@ -273,11 +273,11 @@ void Matcher::match( ) {
OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
if( OptoReg::is_valid(reg1))
- _calling_convention_mask[i].insert(reg1);
+ _calling_convention_mask[i].Insert(reg1);
OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
if( OptoReg::is_valid(reg2))
- _calling_convention_mask[i].insert(reg2);
+ _calling_convention_mask[i].Insert(reg2);
// Saved biased stack-slot register number
_parm_regs[i].set_pair(reg2, reg1);
@@ -422,11 +422,11 @@ static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
new (rms + i) RegMask(Compile::current()->comp_arena());
}
// Do all the pre-defined register masks
- rms[TypeFunc::Control ].assignFrom(RegMask::EMPTY);
- rms[TypeFunc::I_O ].assignFrom(RegMask::EMPTY);
- rms[TypeFunc::Memory ].assignFrom(RegMask::EMPTY);
- rms[TypeFunc::ReturnAdr].assignFrom(ret_adr);
- rms[TypeFunc::FramePtr ].assignFrom(fp);
+ rms[TypeFunc::Control ] = RegMask::Empty;
+ rms[TypeFunc::I_O ] = RegMask::Empty;
+ rms[TypeFunc::Memory ] = RegMask::Empty;
+ rms[TypeFunc::ReturnAdr] = ret_adr;
+ rms[TypeFunc::FramePtr ] = fp;
return rms;
}
@@ -471,15 +471,15 @@ void Matcher::init_first_stack_mask() {
assert(index == NOF_STACK_MASKS, "wrong size");
// At first, start with the empty mask
- C->FIRST_STACK_mask().clear();
+ C->FIRST_STACK_mask().Clear();
// Add in the incoming argument area
OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
for (OptoReg::Name i = init_in; i < _in_arg_limit; i = OptoReg::add(i, 1)) {
- C->FIRST_STACK_mask().insert(i);
+ C->FIRST_STACK_mask().Insert(i);
}
// Add in all bits past the outgoing argument area
- C->FIRST_STACK_mask().set_all_from(_out_arg_limit);
+ C->FIRST_STACK_mask().Set_All_From(_out_arg_limit);
// Make spill masks. Registers for their class, plus FIRST_STACK_mask.
RegMask aligned_stack_mask(C->FIRST_STACK_mask(), C->comp_arena());
@@ -488,44 +488,44 @@ void Matcher::init_first_stack_mask() {
assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
RegMask scalable_stack_mask(aligned_stack_mask, C->comp_arena());
- idealreg2spillmask[Op_RegP]->assignFrom(*idealreg2regmask[Op_RegP]);
+ *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
#ifdef _LP64
- idealreg2spillmask[Op_RegN]->assignFrom(*idealreg2regmask[Op_RegN]);
- idealreg2spillmask[Op_RegN]->or_with(C->FIRST_STACK_mask());
- idealreg2spillmask[Op_RegP]->or_with(aligned_stack_mask);
+ *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
+ idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
+ idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
#else
- idealreg2spillmask[Op_RegP]->or_with(C->FIRST_STACK_mask());
+ idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
#endif
- idealreg2spillmask[Op_RegI]->assignFrom(*idealreg2regmask[Op_RegI]);
- idealreg2spillmask[Op_RegI]->or_with(C->FIRST_STACK_mask());
- idealreg2spillmask[Op_RegL]->assignFrom(*idealreg2regmask[Op_RegL]);
- idealreg2spillmask[Op_RegL]->or_with(aligned_stack_mask);
- idealreg2spillmask[Op_RegF]->assignFrom(*idealreg2regmask[Op_RegF]);
- idealreg2spillmask[Op_RegF]->or_with(C->FIRST_STACK_mask());
- idealreg2spillmask[Op_RegD]->assignFrom(*idealreg2regmask[Op_RegD]);
- idealreg2spillmask[Op_RegD]->or_with(aligned_stack_mask);
+ *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
+ idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
+ *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
+ idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
+ *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
+ idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
+ *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
+ idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
if (Matcher::has_predicated_vectors()) {
- idealreg2spillmask[Op_RegVectMask]->assignFrom(*idealreg2regmask[Op_RegVectMask]);
- idealreg2spillmask[Op_RegVectMask]->or_with(aligned_stack_mask);
+ *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
+ idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
} else {
- idealreg2spillmask[Op_RegVectMask]->assignFrom(RegMask::EMPTY);
+ *idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_BYTE,4)) {
- idealreg2spillmask[Op_VecS]->assignFrom(*idealreg2regmask[Op_VecS]);
- idealreg2spillmask[Op_VecS]->or_with(C->FIRST_STACK_mask());
+ *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
+ idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
} else {
- idealreg2spillmask[Op_VecS]->assignFrom(RegMask::EMPTY);
+ *idealreg2spillmask[Op_VecS] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_FLOAT,2)) {
// For VecD we need dual alignment and 8 bytes (2 slots) for spills.
// RA guarantees such alignment since it is needed for Double and Long values.
- idealreg2spillmask[Op_VecD]->assignFrom(*idealreg2regmask[Op_VecD]);
- idealreg2spillmask[Op_VecD]->or_with(aligned_stack_mask);
+ *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
+ idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
} else {
- idealreg2spillmask[Op_VecD]->assignFrom(RegMask::EMPTY);
+ *idealreg2spillmask[Op_VecD] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_FLOAT,4)) {
@@ -538,45 +538,45 @@ void Matcher::init_first_stack_mask() {
// otherwise vector spills could stomp over stack slots in caller frame.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
- aligned_stack_mask.remove(in);
+ aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
- aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
- assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
- idealreg2spillmask[Op_VecX]->assignFrom(*idealreg2regmask[Op_VecX]);
- idealreg2spillmask[Op_VecX]->or_with(aligned_stack_mask);
+ aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
+ assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
+ *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
+ idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
} else {
- idealreg2spillmask[Op_VecX]->assignFrom(RegMask::EMPTY);
+ *idealreg2spillmask[Op_VecX] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_FLOAT,8)) {
// For VecY we need octo alignment and 32 bytes (8 slots) for spills.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
- aligned_stack_mask.remove(in);
+ aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
- aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
- assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
- idealreg2spillmask[Op_VecY]->assignFrom(*idealreg2regmask[Op_VecY]);
- idealreg2spillmask[Op_VecY]->or_with(aligned_stack_mask);
+ aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
+ assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
+ *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
+ idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
} else {
- idealreg2spillmask[Op_VecY]->assignFrom(RegMask::EMPTY);
+ *idealreg2spillmask[Op_VecY] = RegMask::Empty;
}
if (Matcher::vector_size_supported(T_FLOAT,16)) {
// For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
- aligned_stack_mask.remove(in);
+ aligned_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
- aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
- assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
- idealreg2spillmask[Op_VecZ]->assignFrom(*idealreg2regmask[Op_VecZ]);
- idealreg2spillmask[Op_VecZ]->or_with(aligned_stack_mask);
+ aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
+ assert(aligned_stack_mask.is_infinite_stack(), "should be infinite stack");
+ *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
+ idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
} else {
- idealreg2spillmask[Op_VecZ]->assignFrom(RegMask::EMPTY);
+ *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
}
if (Matcher::supports_scalable_vector()) {
@@ -586,31 +586,31 @@ void Matcher::init_first_stack_mask() {
// Exclude last input arg stack slots to avoid spilling vector register there,
// otherwise RegVectMask spills could stomp over stack slots in caller frame.
for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
- scalable_stack_mask.remove(in);
+ scalable_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
// For RegVectMask
scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack");
- idealreg2spillmask[Op_RegVectMask]->assignFrom(*idealreg2regmask[Op_RegVectMask]);
- idealreg2spillmask[Op_RegVectMask]->or_with(scalable_stack_mask);
+ *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
+ idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
}
// Exclude last input arg stack slots to avoid spilling vector register there,
// otherwise vector spills could stomp over stack slots in caller frame.
for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
- scalable_stack_mask.remove(in);
+ scalable_stack_mask.Remove(in);
in = OptoReg::add(in, -1);
}
// For VecA
- scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
- assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack");
- idealreg2spillmask[Op_VecA]->assignFrom(*idealreg2regmask[Op_VecA]);
- idealreg2spillmask[Op_VecA]->or_with(scalable_stack_mask);
+ scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
+ assert(scalable_stack_mask.is_infinite_stack(), "should be infinite stack");
+ *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
+ idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
} else {
- idealreg2spillmask[Op_VecA]->assignFrom(RegMask::EMPTY);
+ *idealreg2spillmask[Op_VecA] = RegMask::Empty;
}
if (UseFPUForSpilling) {
@@ -618,20 +618,20 @@ void Matcher::init_first_stack_mask() {
// symmetric and that the registers involved are the same size.
// On sparc for instance we may have to use 64 bit moves will
// kill 2 registers when used with F0-F31.
- idealreg2spillmask[Op_RegI]->or_with(*idealreg2regmask[Op_RegF]);
- idealreg2spillmask[Op_RegF]->or_with(*idealreg2regmask[Op_RegI]);
+ idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
+ idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
#ifdef _LP64
- idealreg2spillmask[Op_RegN]->or_with(*idealreg2regmask[Op_RegF]);
- idealreg2spillmask[Op_RegL]->or_with(*idealreg2regmask[Op_RegD]);
- idealreg2spillmask[Op_RegD]->or_with(*idealreg2regmask[Op_RegL]);
- idealreg2spillmask[Op_RegP]->or_with(*idealreg2regmask[Op_RegD]);
+ idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
+ idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
+ idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
+ idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
#else
- idealreg2spillmask[Op_RegP]->or_with(*idealreg2regmask[Op_RegF]);
+ idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
#ifdef ARM
// ARM has support for moving 64bit values between a pair of
// integer registers and a double register
- idealreg2spillmask[Op_RegL]->or_with(*idealreg2regmask[Op_RegD]);
- idealreg2spillmask[Op_RegD]->or_with(*idealreg2regmask[Op_RegL]);
+ idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
+ idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
#endif
#endif
}
@@ -639,40 +639,40 @@ void Matcher::init_first_stack_mask() {
// Make up debug masks. Any spill slot plus callee-save (SOE) registers.
// Caller-save (SOC, AS) registers are assumed to be trashable by the various
// inline-cache fixup routines.
- idealreg2debugmask[Op_RegN]->assignFrom(*idealreg2spillmask[Op_RegN]);
- idealreg2debugmask[Op_RegI]->assignFrom(*idealreg2spillmask[Op_RegI]);
- idealreg2debugmask[Op_RegL]->assignFrom(*idealreg2spillmask[Op_RegL]);
- idealreg2debugmask[Op_RegF]->assignFrom(*idealreg2spillmask[Op_RegF]);
- idealreg2debugmask[Op_RegD]->assignFrom(*idealreg2spillmask[Op_RegD]);
- idealreg2debugmask[Op_RegP]->assignFrom(*idealreg2spillmask[Op_RegP]);
- idealreg2debugmask[Op_RegVectMask]->assignFrom(*idealreg2spillmask[Op_RegVectMask]);
-
- idealreg2debugmask[Op_VecA]->assignFrom(*idealreg2spillmask[Op_VecA]);
- idealreg2debugmask[Op_VecS]->assignFrom(*idealreg2spillmask[Op_VecS]);
- idealreg2debugmask[Op_VecD]->assignFrom(*idealreg2spillmask[Op_VecD]);
- idealreg2debugmask[Op_VecX]->assignFrom(*idealreg2spillmask[Op_VecX]);
- idealreg2debugmask[Op_VecY]->assignFrom(*idealreg2spillmask[Op_VecY]);
- idealreg2debugmask[Op_VecZ]->assignFrom(*idealreg2spillmask[Op_VecZ]);
+ *idealreg2debugmask [Op_RegN] = *idealreg2spillmask[Op_RegN];
+ *idealreg2debugmask [Op_RegI] = *idealreg2spillmask[Op_RegI];
+ *idealreg2debugmask [Op_RegL] = *idealreg2spillmask[Op_RegL];
+ *idealreg2debugmask [Op_RegF] = *idealreg2spillmask[Op_RegF];
+ *idealreg2debugmask [Op_RegD] = *idealreg2spillmask[Op_RegD];
+ *idealreg2debugmask [Op_RegP] = *idealreg2spillmask[Op_RegP];
+ *idealreg2debugmask [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
+
+ *idealreg2debugmask [Op_VecA] = *idealreg2spillmask[Op_VecA];
+ *idealreg2debugmask [Op_VecS] = *idealreg2spillmask[Op_VecS];
+ *idealreg2debugmask [Op_VecD] = *idealreg2spillmask[Op_VecD];
+ *idealreg2debugmask [Op_VecX] = *idealreg2spillmask[Op_VecX];
+ *idealreg2debugmask [Op_VecY] = *idealreg2spillmask[Op_VecY];
+ *idealreg2debugmask [Op_VecZ] = *idealreg2spillmask[Op_VecZ];
// Prevent stub compilations from attempting to reference
// callee-saved (SOE) registers from debug info
bool exclude_soe = !Compile::current()->is_method_compilation();
RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
- idealreg2debugmask[Op_RegN]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_RegI]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_RegL]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_RegF]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_RegD]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_RegP]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_RegVectMask]->subtract(*caller_save_mask);
-
- idealreg2debugmask[Op_VecA]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_VecS]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_VecD]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_VecX]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_VecY]->subtract(*caller_save_mask);
- idealreg2debugmask[Op_VecZ]->subtract(*caller_save_mask);
+ idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
+
+ idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
+ idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
}
//---------------------------is_save_on_entry----------------------------------
@@ -702,9 +702,8 @@ void Matcher::Fixup_Save_On_Entry( ) {
RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
// Returns have 0 or 1 returned values depending on call signature.
// Return register is specified by return_value in the AD file.
- if (ret_edge_cnt > TypeFunc::Parms) {
- ret_rms[TypeFunc::Parms + 0].assignFrom(_return_value_mask);
- }
+ if (ret_edge_cnt > TypeFunc::Parms)
+ ret_rms[TypeFunc::Parms+0] = _return_value_mask;
// Input RegMask array shared by all ForwardExceptions
uint forw_exc_edge_cnt = TypeFunc::Parms;
@@ -716,10 +715,10 @@ void Matcher::Fixup_Save_On_Entry( ) {
// Rethrow takes exception oop only, but in the argument 0 slot.
OptoReg::Name reg = find_receiver();
if (reg >= 0) {
- reth_rms[TypeFunc::Parms].assignFrom(mreg2regmask[reg]);
+ reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
#ifdef _LP64
// Need two slots for ptrs in 64-bit land
- reth_rms[TypeFunc::Parms].insert(OptoReg::add(OptoReg::Name(reg), 1));
+ reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
#endif
}
@@ -738,8 +737,8 @@ void Matcher::Fixup_Save_On_Entry( ) {
for( i=1; i < root->req(); i++ ) {
MachReturnNode *m = root->in(i)->as_MachReturn();
if( m->ideal_Opcode() == Op_TailCall ) {
- tail_call_rms[TypeFunc::Parms + 0].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 0));
- tail_call_rms[TypeFunc::Parms + 1].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 1));
+ tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
+ tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
break;
}
}
@@ -751,8 +750,8 @@ void Matcher::Fixup_Save_On_Entry( ) {
for( i=1; i < root->req(); i++ ) {
MachReturnNode *m = root->in(i)->as_MachReturn();
if( m->ideal_Opcode() == Op_TailJump ) {
- tail_jump_rms[TypeFunc::Parms + 0].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 0));
- tail_jump_rms[TypeFunc::Parms + 1].assignFrom(m->MachNode::in_RegMask(TypeFunc::Parms + 1));
+ tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
+ tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
break;
}
}
@@ -785,14 +784,14 @@ void Matcher::Fixup_Save_On_Entry( ) {
if( is_save_on_entry(i) ) {
// Add the save-on-entry to the mask array
- ret_rms [ ret_edge_cnt].assignFrom(mreg2regmask[i]);
- reth_rms [ reth_edge_cnt].assignFrom(mreg2regmask[i]);
- tail_call_rms[tail_call_edge_cnt].assignFrom(mreg2regmask[i]);
- tail_jump_rms[tail_jump_edge_cnt].assignFrom(mreg2regmask[i]);
- forw_exc_rms [ forw_exc_edge_cnt].assignFrom(mreg2regmask[i]);
+ ret_rms [ ret_edge_cnt] = mreg2regmask[i];
+ reth_rms [ reth_edge_cnt] = mreg2regmask[i];
+ tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
+ tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
+ forw_exc_rms [ forw_exc_edge_cnt] = mreg2regmask[i];
// Halts need the SOE registers, but only in the stack as debug info.
// A just-prior uncommon-trap or deoptimization will use the SOE regs.
- halt_rms [ halt_edge_cnt].assignFrom(*idealreg2spillmask[_register_save_type[i]]);
+ halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
Node *mproj;
@@ -803,12 +802,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
_register_save_type[i+1] == Op_RegF &&
is_save_on_entry(i+1) ) {
// Add other bit for double
- ret_rms [ ret_edge_cnt].insert(OptoReg::Name(i+1));
- reth_rms [ reth_edge_cnt].insert(OptoReg::Name(i+1));
- tail_call_rms[tail_call_edge_cnt].insert(OptoReg::Name(i+1));
- tail_jump_rms[tail_jump_edge_cnt].insert(OptoReg::Name(i+1));
- forw_exc_rms [ forw_exc_edge_cnt].insert(OptoReg::Name(i+1));
- halt_rms [ halt_edge_cnt].insert(OptoReg::Name(i+1));
+ ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
+ reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
+ tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
+ tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
+ forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
+ halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
proj_cnt += 2; // Skip 2 for doubles
}
@@ -816,12 +815,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
_register_save_type[i-1] == Op_RegF &&
_register_save_type[i ] == Op_RegF &&
is_save_on_entry(i-1) ) {
- ret_rms [ ret_edge_cnt].assignFrom(RegMask::EMPTY);
- reth_rms [ reth_edge_cnt].assignFrom(RegMask::EMPTY);
- tail_call_rms[tail_call_edge_cnt].assignFrom(RegMask::EMPTY);
- tail_jump_rms[tail_jump_edge_cnt].assignFrom(RegMask::EMPTY);
- forw_exc_rms [ forw_exc_edge_cnt].assignFrom(RegMask::EMPTY);
- halt_rms [ halt_edge_cnt].assignFrom(RegMask::EMPTY);
+ ret_rms [ ret_edge_cnt] = RegMask::Empty;
+ reth_rms [ reth_edge_cnt] = RegMask::Empty;
+ tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
+ tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
+ forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
+ halt_rms [ halt_edge_cnt] = RegMask::Empty;
mproj = C->top();
}
// Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
@@ -831,12 +830,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
_register_save_type[i+1] == Op_RegI &&
is_save_on_entry(i+1) ) {
// Add other bit for long
- ret_rms [ ret_edge_cnt].insert(OptoReg::Name(i+1));
- reth_rms [ reth_edge_cnt].insert(OptoReg::Name(i+1));
- tail_call_rms[tail_call_edge_cnt].insert(OptoReg::Name(i+1));
- tail_jump_rms[tail_jump_edge_cnt].insert(OptoReg::Name(i+1));
- forw_exc_rms [ forw_exc_edge_cnt].insert(OptoReg::Name(i+1));
- halt_rms [ halt_edge_cnt].insert(OptoReg::Name(i+1));
+ ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
+ reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
+ tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
+ tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
+ forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
+ halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
proj_cnt += 2; // Skip 2 for longs
}
@@ -844,12 +843,12 @@ void Matcher::Fixup_Save_On_Entry( ) {
_register_save_type[i-1] == Op_RegI &&
_register_save_type[i ] == Op_RegI &&
is_save_on_entry(i-1) ) {
- ret_rms [ ret_edge_cnt].assignFrom(RegMask::EMPTY);
- reth_rms [ reth_edge_cnt].assignFrom(RegMask::EMPTY);
- tail_call_rms[tail_call_edge_cnt].assignFrom(RegMask::EMPTY);
- tail_jump_rms[tail_jump_edge_cnt].assignFrom(RegMask::EMPTY);
- forw_exc_rms [ forw_exc_edge_cnt].assignFrom(RegMask::EMPTY);
- halt_rms [ halt_edge_cnt].assignFrom(RegMask::EMPTY);
+ ret_rms [ ret_edge_cnt] = RegMask::Empty;
+ reth_rms [ reth_edge_cnt] = RegMask::Empty;
+ tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
+ tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
+ forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
+ halt_rms [ halt_edge_cnt] = RegMask::Empty;
mproj = C->top();
} else {
// Make a projection for it off the Start
@@ -876,34 +875,34 @@ void Matcher::init_spill_mask( Node *ret ) {
if( idealreg2regmask[Op_RegI] ) return; // One time only init
OptoReg::c_frame_pointer = c_frame_pointer();
- c_frame_ptr_mask.assignFrom(RegMask(c_frame_pointer()));
+ c_frame_ptr_mask = RegMask(c_frame_pointer());
#ifdef _LP64
// pointers are twice as big
- c_frame_ptr_mask.insert(OptoReg::add(c_frame_pointer(), 1));
+ c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
#endif
// Start at OptoReg::stack0()
- STACK_ONLY_mask.clear();
+ STACK_ONLY_mask.Clear();
// STACK_ONLY_mask is all stack bits
- STACK_ONLY_mask.set_all_from(OptoReg::stack2reg(0));
+ STACK_ONLY_mask.Set_All_From(OptoReg::stack2reg(0));
for (OptoReg::Name i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg);
i = OptoReg::add(i, 1)) {
// Copy the register names over into the shared world.
// SharedInfo::regName[i] = regName[i];
// Handy RegMasks per machine register
- mreg2regmask[i].insert(i);
+ mreg2regmask[i].Insert(i);
// Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
if (_register_save_policy[i] == 'C' ||
_register_save_policy[i] == 'A') {
- caller_save_regmask.insert(i);
+ caller_save_regmask.Insert(i);
}
// Exclude save-on-entry registers from debug masks for stub compilations.
if (_register_save_policy[i] == 'C' ||
_register_save_policy[i] == 'A' ||
_register_save_policy[i] == 'E') {
- caller_save_regmask_exclude_soe.insert(i);
+ caller_save_regmask_exclude_soe.Insert(i);
}
}
@@ -1241,8 +1240,8 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
}
// Do all the pre-defined non-Empty register masks
- msfpt->_in_rms[TypeFunc::ReturnAdr].assignFrom(_return_addr_mask);
- msfpt->_in_rms[TypeFunc::FramePtr ].assignFrom(c_frame_ptr_mask);
+ msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
+ msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
// Place first outgoing argument can possibly be put.
OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
@@ -1316,17 +1315,17 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
- rm->insert(r);
+ rm->Insert(r);
}
}
// Grab first register, adjust stack slots and insert in mask.
OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
if (OptoReg::is_valid(reg1))
- rm->insert(reg1);
+ rm->Insert( reg1 );
// Grab second register (if any), adjust stack slots and insert in mask.
OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
if (OptoReg::is_valid(reg2))
- rm->insert(reg2);
+ rm->Insert( reg2 );
} // End of for all arguments
}
@@ -1343,11 +1342,11 @@ MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
// is excluded on the max-per-method basis, debug info cannot land in
// this killed area.
uint r_cnt = mcall->tf()->range()->cnt();
- MachProjNode* proj = new MachProjNode(mcall, r_cnt + 10000, RegMask::EMPTY, MachProjNode::fat_proj);
+ MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++) {
- proj->_rout.insert(OptoReg::Name(i));
+ proj->_rout.Insert(OptoReg::Name(i));
}
- if (!proj->_rout.is_empty()) {
+ if (!proj->_rout.is_Empty()) {
push_projection(proj);
}
}
diff --git a/src/hotspot/share/opto/matcher.hpp b/src/hotspot/share/opto/matcher.hpp
index e4396b423ac0e..0b609b70ab5e9 100644
--- a/src/hotspot/share/opto/matcher.hpp
+++ b/src/hotspot/share/opto/matcher.hpp
@@ -408,14 +408,14 @@ class Matcher : public PhaseTransform {
static int inline_cache_reg_encode();
// Register for DIVI projection of divmodI
- static const RegMask& divI_proj_mask();
+ static RegMask divI_proj_mask();
// Register for MODI projection of divmodI
- static const RegMask& modI_proj_mask();
+ static RegMask modI_proj_mask();
// Register for DIVL projection of divmodL
- static const RegMask& divL_proj_mask();
+ static RegMask divL_proj_mask();
// Register for MODL projection of divmodL
- static const RegMask& modL_proj_mask();
+ static RegMask modL_proj_mask();
// Use hardware DIV instruction when it is faster than
// a code which use multiply for division by constant.
diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp
index 9187ef1a36150..2080b7cbeb521 100644
--- a/src/hotspot/share/opto/memnode.cpp
+++ b/src/hotspot/share/opto/memnode.cpp
@@ -4325,7 +4325,7 @@ Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) {
switch (proj->_con) {
case TypeFunc::Control:
case TypeFunc::Memory:
- return new MachProjNode(this, proj->_con, RegMask::EMPTY, MachProjNode::unmatched_proj);
+ return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
}
ShouldNotReachHere();
return nullptr;
@@ -4572,7 +4572,7 @@ const RegMask &InitializeNode::in_RegMask(uint idx) const {
// This edge should be set to top, by the set_complete. But be conservative.
if (idx == InitializeNode::RawAddress)
return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]);
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
Node* InitializeNode::memory(uint alias_idx) {
@@ -5784,7 +5784,7 @@ void MergeMemNode::set_base_memory(Node *new_base) {
//------------------------------out_RegMask------------------------------------
const RegMask &MergeMemNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
//------------------------------dump_spec--------------------------------------
diff --git a/src/hotspot/share/opto/mempointer.cpp b/src/hotspot/share/opto/mempointer.cpp
index 68abaffe6429a..a63ba8ef7010b 100644
--- a/src/hotspot/share/opto/mempointer.cpp
+++ b/src/hotspot/share/opto/mempointer.cpp
@@ -112,7 +112,7 @@ void MemPointerParser::canonicalize_raw_summands() {
}
}
// Keep summands with non-zero scale.
- if (!scaleI.is_zero() && !scaleL.is_zero()) {
+ if (!scaleI.is_zero() && !scaleL.is_NaN()) {
_raw_summands.at_put(pos_put++, MemPointerRawSummand(variable, scaleI, scaleL, int_group));
}
}
diff --git a/src/hotspot/share/opto/mulnode.cpp b/src/hotspot/share/opto/mulnode.cpp
index 1e757c2be8110..4691c6a45b063 100644
--- a/src/hotspot/share/opto/mulnode.cpp
+++ b/src/hotspot/share/opto/mulnode.cpp
@@ -963,7 +963,7 @@ static bool const_shift_count(PhaseGVN* phase, const Node* shift_node, int* coun
}
// Returns whether the shift amount is constant. If so, sets real_shift and masked_shift.
-static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, int& real_shift, uint& masked_shift) {
+static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, int& real_shift, int& masked_shift) {
if (const_shift_count(phase, shift_node, &real_shift)) {
masked_shift = real_shift & (nBits - 1);
return true;
@@ -972,23 +972,23 @@ static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBit
}
// Convenience for when we don't care about the real amount
-static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, uint& masked_shift) {
+static bool mask_shift_amount(PhaseGVN* phase, const Node* shift_node, uint nBits, int& masked_shift) {
int real_shift;
return mask_shift_amount(phase, shift_node, nBits, real_shift, masked_shift);
}
// Use this in ::Ideal only with shiftNode == this!
// Returns the masked shift amount if constant or 0 if not constant.
-static uint mask_and_replace_shift_amount(PhaseGVN* phase, Node* shift_node, uint nBits) {
+static int mask_and_replace_shift_amount(PhaseGVN* phase, Node* shift_node, uint nBits) {
int real_shift;
- uint masked_shift;
+ int masked_shift;
if (mask_shift_amount(phase, shift_node, nBits, real_shift, masked_shift)) {
if (masked_shift == 0) {
// Let Identity() handle 0 shift count.
return 0;
}
- if (real_shift != (int)masked_shift) {
+ if (real_shift != masked_shift) {
PhaseIterGVN* igvn = phase->is_IterGVN();
if (igvn != nullptr) {
igvn->_worklist.push(shift_node);
@@ -1013,15 +1013,15 @@ static uint mask_and_replace_shift_amount(PhaseGVN* phase, Node* shift_node, uin
// There are 2 cases:
// if con_outer + con_inner >= nbits => 0
// if con_outer + con_inner < nbits => X << (con_outer + con_inner)
-static Node* collapse_nested_shift_left(PhaseGVN* phase, const Node* outer_shift, uint con_outer, BasicType bt) {
+static Node* collapse_nested_shift_left(PhaseGVN* phase, const Node* outer_shift, int con_outer, BasicType bt) {
assert(bt == T_LONG || bt == T_INT, "Unexpected type");
const Node* inner_shift = outer_shift->in(1);
if (inner_shift->Opcode() != Op_LShift(bt)) {
return nullptr;
}
- uint nbits = bits_per_java_integer(bt);
- uint con_inner;
+ int nbits = static_cast(bits_per_java_integer(bt));
+ int con_inner;
if (!mask_shift_amount(phase, inner_shift, nbits, con_inner)) {
return nullptr;
}
@@ -1046,81 +1046,86 @@ static Node* collapse_nested_shift_left(PhaseGVN* phase, const Node* outer_shift
//------------------------------Identity---------------------------------------
Node* LShiftINode::Identity(PhaseGVN* phase) {
- return IdentityIL(phase, T_INT);
+ int count = 0;
+ if (const_shift_count(phase, this, &count) && (count & (BitsPerJavaInteger - 1)) == 0) {
+ // Shift by a multiple of 32 does nothing
+ return in(1);
+ }
+ return this;
}
-Node* LShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
- uint con = mask_and_replace_shift_amount(phase, this, bits_per_java_integer(bt));
+//------------------------------Ideal------------------------------------------
+// If the right input is a constant, and the left input is an add of a
+// constant, flatten the tree: (X+con1)< X< X << (con1 + con2)
+Node *LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaInteger);
if (con == 0) {
return nullptr;
}
- // If the right input is a constant, and the left input is an add of a
- // constant, flatten the tree: (X+con1)< X<Opcode();
- if (add1_op == Op_Add(bt)) { // Left input is an add?
- assert(add1 != add1->in(1), "dead loop in LShiftINode::Ideal");
+ if( add1_op == Op_AddI ) { // Left input is an add?
+ assert( add1 != add1->in(1), "dead loop in LShiftINode::Ideal" );
// Transform is legal, but check for profit. Avoid breaking 'i2s'
// and 'i2b' patterns which typically fold into 'StoreC/StoreB'.
- if (bt != T_INT || con < 16) {
+ if( con < 16 ) {
// Left input is an add of the same number?
- if (con != (bits_per_java_integer(bt) - 1) && add1->in(1) == add1->in(2)) {
+ if (add1->in(1) == add1->in(2)) {
// Convert "(x + x) << c0" into "x << (c0 + 1)"
- // In general, this optimization cannot be applied for c0 == 31 (for LShiftI) since
+ // In general, this optimization cannot be applied for c0 == 31 since
// 2x << 31 != x << 32 = x << 0 = x (e.g. x = 1: 2 << 31 = 0 != 1)
- // or c0 != 63 (for LShiftL) because:
- // (x + x) << 63 = 2x << 63, while
- // (x + x) << 63 --transform--> x << 64 = x << 0 = x (!= 2x << 63, for example for x = 1)
- // According to the Java spec, chapter 15.19, we only consider the six lowest-order bits of the right-hand operand
- // (i.e. "right-hand operand" & 0b111111). Therefore, x << 64 is the same as x << 0 (64 = 0b10000000 & 0b0111111 = 0).
- return LShiftNode::make(add1->in(1), phase->intcon(con + 1), bt);
+ return new LShiftINode(add1->in(1), phase->intcon(con + 1));
}
// Left input is an add of a constant?
- const TypeInteger* t12 = phase->type(add1->in(2))->isa_integer(bt);
- if (t12 != nullptr && t12->is_con()) { // Left input is an add of a con?
+ const TypeInt *t12 = phase->type(add1->in(2))->isa_int();
+ if( t12 && t12->is_con() ){ // Left input is an add of a con?
// Compute X << con0
- Node* lsh = phase->transform(LShiftNode::make(add1->in(1), in(2), bt));
+ Node *lsh = phase->transform( new LShiftINode( add1->in(1), in(2) ) );
// Compute X<integercon(java_shift_left(t12->get_con_as_long(bt), con, bt), bt), bt);
+ return new AddINode( lsh, phase->intcon(t12->get_con() << con));
}
}
}
// Check for "(x >> C1) << C2"
- if (add1_op == Op_RShift(bt) || add1_op == Op_URShift(bt)) {
+ if (add1_op == Op_RShiftI || add1_op == Op_URShiftI) {
int add1Con = 0;
const_shift_count(phase, add1, &add1Con);
// Special case C1 == C2, which just masks off low bits
- if (add1Con > 0 && con == (uint)add1Con) {
+ if (add1Con > 0 && con == add1Con) {
// Convert to "(x & -(1 << C2))"
- return MulNode::make_and(add1->in(1), phase->integercon(java_negate(java_shift_left(1, con, bt), bt), bt), bt);
+ return new AndINode(add1->in(1), phase->intcon(java_negate(jint(1 << con))));
} else {
// Wait until the right shift has been sharpened to the correct count
- if (add1Con > 0 && (uint)add1Con < bits_per_java_integer(bt)) {
+ if (add1Con > 0 && add1Con < BitsPerJavaInteger) {
// As loop parsing can produce LShiftI nodes, we should wait until the graph is fully formed
// to apply optimizations, otherwise we can inadvertently stop vectorization opportunities.
if (phase->is_IterGVN()) {
- if (con > (uint)add1Con) {
+ if (con > add1Con) {
// Creates "(x << (C2 - C1)) & -(1 << C2)"
- Node* lshift = phase->transform(LShiftNode::make(add1->in(1), phase->intcon(con - add1Con), bt));
- return MulNode::make_and(lshift, phase->integercon(java_negate(java_shift_left(1, con, bt), bt), bt), bt);
+ Node* lshift = phase->transform(new LShiftINode(add1->in(1), phase->intcon(con - add1Con)));
+ return new AndINode(lshift, phase->intcon(java_negate(jint(1 << con))));
} else {
- assert(con < (uint)add1Con, "must be (%d < %d)", con, add1Con);
+ assert(con < add1Con, "must be (%d < %d)", con, add1Con);
// Creates "(x >> (C1 - C2)) & -(1 << C2)"
// Handle logical and arithmetic shifts
Node* rshift;
- if (add1_op == Op_RShift(bt)) {
- rshift = phase->transform(RShiftNode::make(add1->in(1), phase->intcon(add1Con - con), bt));
+ if (add1_op == Op_RShiftI) {
+ rshift = phase->transform(new RShiftINode(add1->in(1), phase->intcon(add1Con - con)));
} else {
- rshift = phase->transform(URShiftNode::make(add1->in(1), phase->intcon(add1Con - con), bt));
+ rshift = phase->transform(new URShiftINode(add1->in(1), phase->intcon(add1Con - con)));
}
- return MulNode::make_and(rshift, phase->integercon(java_negate(java_shift_left(1, con, bt)), bt), bt);
+ return new AndINode(rshift, phase->intcon(java_negate(jint(1 << con))));
}
} else {
phase->record_for_igvn(this);
@@ -1130,29 +1135,29 @@ Node* LShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
}
// Check for "((x >> C1) & Y) << C2"
- if (add1_op == Op_And(bt)) {
- Node* add2 = add1->in(1);
+ if (add1_op == Op_AndI) {
+ Node *add2 = add1->in(1);
int add2_op = add2->Opcode();
- if (add2_op == Op_RShift(bt) || add2_op == Op_URShift(bt)) {
+ if (add2_op == Op_RShiftI || add2_op == Op_URShiftI) {
// Special case C1 == C2, which just masks off low bits
if (add2->in(2) == in(2)) {
// Convert to "(x & (Y << C2))"
- Node* y_sh = phase->transform(LShiftNode::make(add1->in(2), phase->intcon(con), bt));
- return MulNode::make_and(add2->in(1), y_sh, bt);
+ Node* y_sh = phase->transform(new LShiftINode(add1->in(2), phase->intcon(con)));
+ return new AndINode(add2->in(1), y_sh);
}
int add2Con = 0;
const_shift_count(phase, add2, &add2Con);
- if (add2Con > 0 && (uint)add2Con < bits_per_java_integer(bt)) {
+ if (add2Con > 0 && add2Con < BitsPerJavaInteger) {
if (phase->is_IterGVN()) {
// Convert to "((x >> C1) << C2) & (Y << C2)"
// Make "(x >> C1) << C2", which will get folded away by the rule above
- Node* x_sh = phase->transform(LShiftNode::make(add2, phase->intcon(con), bt));
+ Node* x_sh = phase->transform(new LShiftINode(add2, phase->intcon(con)));
// Make "Y << C2", which will simplify when Y is a constant
- Node* y_sh = phase->transform(LShiftNode::make(add1->in(2), phase->intcon(con), bt));
+ Node* y_sh = phase->transform(new LShiftINode(add1->in(2), phase->intcon(con)));
- return MulNode::make_and(x_sh, y_sh, bt);
+ return new AndINode(x_sh, y_sh);
} else {
phase->record_for_igvn(this);
}
@@ -1162,16 +1167,14 @@ Node* LShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
// Check for ((x & ((1<<(32-c0))-1)) << c0) which ANDs off high bits
// before shifting them away.
- const jlong bits_mask = max_unsigned_integer(bt) >> con;
- assert(bt != T_INT || bits_mask == right_n_bits(bits_per_java_integer(bt)-con), "inconsistent");
- if (add1_op == Op_And(bt) &&
- phase->type(add1->in(2)) == TypeInteger::make(bits_mask, bt)) {
- return LShiftNode::make(add1->in(1), in(2), bt);
- }
+ const jint bits_mask = right_n_bits(BitsPerJavaInteger-con);
+ if( add1_op == Op_AndI &&
+ phase->type(add1->in(2)) == TypeInt::make( bits_mask ) )
+ return new LShiftINode( add1->in(1), in(2) );
- // Collapse nested left-shifts with constant rhs:
+ // Performs:
// (X << con1) << con2 ==> X << (con1 + con2)
- Node* doubleShift = collapse_nested_shift_left(phase, this, con, bt);
+ Node* doubleShift = collapse_nested_shift_left(phase, this, con, T_INT);
if (doubleShift != nullptr) {
return doubleShift;
}
@@ -1179,103 +1182,237 @@ Node* LShiftNode::IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt) {
return nullptr;
}
-//------------------------------Ideal------------------------------------------
-Node* LShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
- return IdealIL(phase, can_reshape, T_INT);
-}
-
-const Type* LShiftNode::ValueIL(PhaseGVN* phase, BasicType bt) const {
- const Type* t1 = phase->type(in(1));
- const Type* t2 = phase->type(in(2));
+//------------------------------Value------------------------------------------
+// A LShiftINode shifts its input2 left by input1 amount.
+const Type* LShiftINode::Value(PhaseGVN* phase) const {
+ const Type *t1 = phase->type( in(1) );
+ const Type *t2 = phase->type( in(2) );
// Either input is TOP ==> the result is TOP
- if (t1 == Type::TOP) {
- return Type::TOP;
- }
- if (t2 == Type::TOP) {
- return Type::TOP;
- }
+ if( t1 == Type::TOP ) return Type::TOP;
+ if( t2 == Type::TOP ) return Type::TOP;
// Left input is ZERO ==> the result is ZERO.
- if (t1 == TypeInteger::zero(bt)) {
- return TypeInteger::zero(bt);
- }
+ if( t1 == TypeInt::ZERO ) return TypeInt::ZERO;
// Shift by zero does nothing
- if (t2 == TypeInt::ZERO) {
- return t1;
- }
+ if( t2 == TypeInt::ZERO ) return t1;
// Either input is BOTTOM ==> the result is BOTTOM
- if ((t1 == TypeInteger::bottom(bt)) || (t2 == TypeInt::INT) ||
- (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM)) {
- return TypeInteger::bottom(bt);
- }
+ if( (t1 == TypeInt::INT) || (t2 == TypeInt::INT) ||
+ (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+ return TypeInt::INT;
- const TypeInteger* r1 = t1->is_integer(bt); // Handy access
- const TypeInt* r2 = t2->is_int(); // Handy access
+ const TypeInt *r1 = t1->is_int(); // Handy access
+ const TypeInt *r2 = t2->is_int(); // Handy access
- if (!r2->is_con()) {
- return TypeInteger::bottom(bt);
- }
+ if (!r2->is_con())
+ return TypeInt::INT;
uint shift = r2->get_con();
- shift &= bits_per_java_integer(bt) - 1; // semantics of Java shifts
- // Shift by a multiple of 32/64 does nothing:
- if (shift == 0) {
- return t1;
- }
+ shift &= BitsPerJavaInteger-1; // semantics of Java shifts
+ // Shift by a multiple of 32 does nothing:
+ if (shift == 0) return t1;
// If the shift is a constant, shift the bounds of the type,
// unless this could lead to an overflow.
if (!r1->is_con()) {
- jlong lo = r1->lo_as_long(), hi = r1->hi_as_long();
-#ifdef ASSERT
- if (bt == T_INT) {
- jint lo_int = r1->is_int()->_lo, hi_int = r1->is_int()->_hi;
- assert((java_shift_right(java_shift_left(lo, shift, bt), shift, bt) == lo) == (((lo_int << shift) >> shift) == lo_int), "inconsistent");
- assert((java_shift_right(java_shift_left(hi, shift, bt), shift, bt) == hi) == (((hi_int << shift) >> shift) == hi_int), "inconsistent");
- }
-#endif
- if (java_shift_right(java_shift_left(lo, shift, bt), shift, bt) == lo &&
- java_shift_right(java_shift_left(hi, shift, bt), shift, bt) == hi) {
+ jint lo = r1->_lo, hi = r1->_hi;
+ if (((lo << shift) >> shift) == lo &&
+ ((hi << shift) >> shift) == hi) {
// No overflow. The range shifts up cleanly.
- return TypeInteger::make(java_shift_left(lo, shift, bt),
- java_shift_left(hi, shift, bt),
- MAX2(r1->_widen, r2->_widen), bt);
+ return TypeInt::make((jint)lo << (jint)shift,
+ (jint)hi << (jint)shift,
+ MAX2(r1->_widen,r2->_widen));
}
- return TypeInteger::bottom(bt);
+ return TypeInt::INT;
}
- return TypeInteger::make(java_shift_left(r1->get_con_as_long(bt), shift, bt), bt);
-}
-
-//------------------------------Value------------------------------------------
-const Type* LShiftINode::Value(PhaseGVN* phase) const {
- return ValueIL(phase, T_INT);
+ return TypeInt::make( (jint)r1->get_con() << (jint)shift );
}
-Node* LShiftNode::IdentityIL(PhaseGVN* phase, BasicType bt) {
+//=============================================================================
+//------------------------------Identity---------------------------------------
+Node* LShiftLNode::Identity(PhaseGVN* phase) {
int count = 0;
- if (const_shift_count(phase, this, &count) && (count & (bits_per_java_integer(bt) - 1)) == 0) {
- // Shift by a multiple of 32/64 does nothing
+ if (const_shift_count(phase, this, &count) && (count & (BitsPerJavaLong - 1)) == 0) {
+ // Shift by a multiple of 64 does nothing
return in(1);
}
return this;
}
-//=============================================================================
-//------------------------------Identity---------------------------------------
-Node* LShiftLNode::Identity(PhaseGVN* phase) {
- return IdentityIL(phase, T_LONG);
-}
-
//------------------------------Ideal------------------------------------------
-Node* LShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
- return IdealIL(phase, can_reshape, T_LONG);
+// If the right input is a constant, and the left input is an add of a
+// constant, flatten the tree: (X+con1)< X< X << (con1 + con2)
+Node *LShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
+ int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaLong);
+ if (con == 0) {
+ return nullptr;
+ }
+
+ // Left input is an add?
+ Node *add1 = in(1);
+ int add1_op = add1->Opcode();
+ if( add1_op == Op_AddL ) { // Left input is an add?
+ // Avoid dead data cycles from dead loops
+ assert( add1 != add1->in(1), "dead loop in LShiftLNode::Ideal" );
+
+ // Left input is an add of the same number?
+ if (con != (BitsPerJavaLong - 1) && add1->in(1) == add1->in(2)) {
+ // Convert "(x + x) << c0" into "x << (c0 + 1)"
+ // Can only be applied if c0 != 63 because:
+ // (x + x) << 63 = 2x << 63, while
+ // (x + x) << 63 --transform--> x << 64 = x << 0 = x (!= 2x << 63, for example for x = 1)
+ // According to the Java spec, chapter 15.19, we only consider the six lowest-order bits of the right-hand operand
+ // (i.e. "right-hand operand" & 0b111111). Therefore, x << 64 is the same as x << 0 (64 = 0b10000000 & 0b0111111 = 0).
+ return new LShiftLNode(add1->in(1), phase->intcon(con + 1));
+ }
+
+ // Left input is an add of a constant?
+ const TypeLong *t12 = phase->type(add1->in(2))->isa_long();
+ if( t12 && t12->is_con() ){ // Left input is an add of a con?
+ // Compute X << con0
+ Node *lsh = phase->transform( new LShiftLNode( add1->in(1), in(2) ) );
+ // Compute X<longcon(t12->get_con() << con));
+ }
+ }
+
+ // Check for "(x >> C1) << C2"
+ if (add1_op == Op_RShiftL || add1_op == Op_URShiftL) {
+ int add1Con = 0;
+ const_shift_count(phase, add1, &add1Con);
+
+ // Special case C1 == C2, which just masks off low bits
+ if (add1Con > 0 && con == add1Con) {
+ // Convert to "(x & -(1 << C2))"
+ return new AndLNode(add1->in(1), phase->longcon(java_negate(jlong(CONST64(1) << con))));
+ } else {
+ // Wait until the right shift has been sharpened to the correct count
+ if (add1Con > 0 && add1Con < BitsPerJavaLong) {
+ // As loop parsing can produce LShiftI nodes, we should wait until the graph is fully formed
+ // to apply optimizations, otherwise we can inadvertently stop vectorization opportunities.
+ if (phase->is_IterGVN()) {
+ if (con > add1Con) {
+ // Creates "(x << (C2 - C1)) & -(1 << C2)"
+ Node* lshift = phase->transform(new LShiftLNode(add1->in(1), phase->intcon(con - add1Con)));
+ return new AndLNode(lshift, phase->longcon(java_negate(jlong(CONST64(1) << con))));
+ } else {
+ assert(con < add1Con, "must be (%d < %d)", con, add1Con);
+ // Creates "(x >> (C1 - C2)) & -(1 << C2)"
+
+ // Handle logical and arithmetic shifts
+ Node* rshift;
+ if (add1_op == Op_RShiftL) {
+ rshift = phase->transform(new RShiftLNode(add1->in(1), phase->intcon(add1Con - con)));
+ } else {
+ rshift = phase->transform(new URShiftLNode(add1->in(1), phase->intcon(add1Con - con)));
+ }
+
+ return new AndLNode(rshift, phase->longcon(java_negate(jlong(CONST64(1) << con))));
+ }
+ } else {
+ phase->record_for_igvn(this);
+ }
+ }
+ }
+ }
+
+ // Check for "((x >> C1) & Y) << C2"
+ if (add1_op == Op_AndL) {
+ Node* add2 = add1->in(1);
+ int add2_op = add2->Opcode();
+ if (add2_op == Op_RShiftL || add2_op == Op_URShiftL) {
+ // Special case C1 == C2, which just masks off low bits
+ if (add2->in(2) == in(2)) {
+ // Convert to "(x & (Y << C2))"
+ Node* y_sh = phase->transform(new LShiftLNode(add1->in(2), phase->intcon(con)));
+ return new AndLNode(add2->in(1), y_sh);
+ }
+
+ int add2Con = 0;
+ const_shift_count(phase, add2, &add2Con);
+ if (add2Con > 0 && add2Con < BitsPerJavaLong) {
+ if (phase->is_IterGVN()) {
+ // Convert to "((x >> C1) << C2) & (Y << C2)"
+
+ // Make "(x >> C1) << C2", which will get folded away by the rule above
+ Node* x_sh = phase->transform(new LShiftLNode(add2, phase->intcon(con)));
+ // Make "Y << C2", which will simplify when Y is a constant
+ Node* y_sh = phase->transform(new LShiftLNode(add1->in(2), phase->intcon(con)));
+
+ return new AndLNode(x_sh, y_sh);
+ } else {
+ phase->record_for_igvn(this);
+ }
+ }
+ }
+ }
+
+ // Check for ((x & ((CONST64(1)<<(64-c0))-1)) << c0) which ANDs off high bits
+ // before shifting them away.
+ const jlong bits_mask = jlong(max_julong >> con);
+ if( add1_op == Op_AndL &&
+ phase->type(add1->in(2)) == TypeLong::make( bits_mask ) )
+ return new LShiftLNode( add1->in(1), in(2) );
+
+ // Performs:
+ // (X << con1) << con2 ==> X << (con1 + con2)
+ Node* doubleShift = collapse_nested_shift_left(phase, this, con, T_LONG);
+ if (doubleShift != nullptr) {
+ return doubleShift;
+ }
+
+ return nullptr;
}
//------------------------------Value------------------------------------------
+// A LShiftLNode shifts its input2 left by input1 amount.
const Type* LShiftLNode::Value(PhaseGVN* phase) const {
- return ValueIL(phase, T_LONG);
+ const Type *t1 = phase->type( in(1) );
+ const Type *t2 = phase->type( in(2) );
+ // Either input is TOP ==> the result is TOP
+ if( t1 == Type::TOP ) return Type::TOP;
+ if( t2 == Type::TOP ) return Type::TOP;
+
+ // Left input is ZERO ==> the result is ZERO.
+ if( t1 == TypeLong::ZERO ) return TypeLong::ZERO;
+ // Shift by zero does nothing
+ if( t2 == TypeInt::ZERO ) return t1;
+
+ // Either input is BOTTOM ==> the result is BOTTOM
+ if( (t1 == TypeLong::LONG) || (t2 == TypeInt::INT) ||
+ (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) )
+ return TypeLong::LONG;
+
+ const TypeLong *r1 = t1->is_long(); // Handy access
+ const TypeInt *r2 = t2->is_int(); // Handy access
+
+ if (!r2->is_con())
+ return TypeLong::LONG;
+
+ uint shift = r2->get_con();
+ shift &= BitsPerJavaLong - 1; // semantics of Java shifts
+ // Shift by a multiple of 64 does nothing:
+ if (shift == 0) return t1;
+
+ // If the shift is a constant, shift the bounds of the type,
+ // unless this could lead to an overflow.
+ if (!r1->is_con()) {
+ jlong lo = r1->_lo, hi = r1->_hi;
+ if (((lo << shift) >> shift) == lo &&
+ ((hi << shift) >> shift) == hi) {
+ // No overflow. The range shifts up cleanly.
+ return TypeLong::make((jlong)lo << (jint)shift,
+ (jlong)hi << (jint)shift,
+ MAX2(r1->_widen,r2->_widen));
+ }
+ return TypeLong::LONG;
+ }
+
+ return TypeLong::make( (jlong)r1->get_con() << (jint)shift );
}
RShiftNode* RShiftNode::make(Node* in1, Node* in2, BasicType bt) {
@@ -1512,18 +1649,6 @@ const Type* RShiftLNode::Value(PhaseGVN* phase) const {
return ValueIL(phase, T_LONG);
}
-URShiftNode* URShiftNode::make(Node* in1, Node* in2, BasicType bt) {
- switch (bt) {
- case T_INT:
- return new URShiftINode(in1, in2);
- case T_LONG:
- return new URShiftLNode(in1, in2);
- default:
- fatal("Not implemented for %s", type2name(bt));
- }
- return nullptr;
-}
-
//=============================================================================
//------------------------------Identity---------------------------------------
Node* URShiftINode::Identity(PhaseGVN* phase) {
@@ -1559,7 +1684,7 @@ Node* URShiftINode::Identity(PhaseGVN* phase) {
}
//------------------------------Ideal------------------------------------------
-Node* URShiftINode::Ideal(PhaseGVN* phase, bool can_reshape) {
+Node *URShiftINode::Ideal(PhaseGVN *phase, bool can_reshape) {
int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaInteger);
if (con == 0) {
return nullptr;
@@ -1723,7 +1848,7 @@ Node* URShiftLNode::Identity(PhaseGVN* phase) {
}
//------------------------------Ideal------------------------------------------
-Node* URShiftLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
+Node *URShiftLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
int con = mask_and_replace_shift_amount(phase, this, BitsPerJavaLong);
if (con == 0) {
return nullptr;
diff --git a/src/hotspot/share/opto/mulnode.hpp b/src/hotspot/share/opto/mulnode.hpp
index 1e19e8ec5cd70..b736c17b30034 100644
--- a/src/hotspot/share/opto/mulnode.hpp
+++ b/src/hotspot/share/opto/mulnode.hpp
@@ -260,14 +260,10 @@ inline Node* make_and(Node* a, Node* b) {
class LShiftNode : public Node {
public:
- LShiftNode(Node* in1, Node* in2) : Node(nullptr,in1,in2) {
+ LShiftNode(Node *in1, Node *in2) : Node(nullptr,in1,in2) {
init_class_id(Class_LShift);
}
- const Type* ValueIL(PhaseGVN* phase, BasicType bt) const;
- Node* IdentityIL(PhaseGVN* phase, BasicType bt);
- Node* IdealIL(PhaseGVN* phase, bool can_reshape, BasicType bt);
-
static LShiftNode* make(Node* in1, Node* in2, BasicType bt);
};
@@ -275,12 +271,12 @@ class LShiftNode : public Node {
// Logical shift left
class LShiftINode : public LShiftNode {
public:
- LShiftINode(Node* in1, Node* in2) : LShiftNode(in1,in2) {}
+ LShiftINode(Node *in1, Node *in2) : LShiftNode(in1,in2) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
- virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
- const Type* bottom_type() const { return TypeInt::INT; }
+ const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
@@ -291,9 +287,9 @@ class LShiftLNode : public LShiftNode {
LShiftLNode(Node *in1, Node *in2) : LShiftNode(in1,in2) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
- virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
- const Type* bottom_type() const { return TypeLong::LONG; }
+ const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
};
@@ -362,17 +358,11 @@ class RShiftLNode : public RShiftNode {
virtual uint ideal_reg() const { return Op_RegL; }
};
-class URShiftNode : public Node {
-public:
- URShiftNode(Node* in1, Node* in2) : Node(nullptr, in1, in2) {}
- static URShiftNode* make(Node* in1, Node* in2, BasicType bt);
-};
-
//------------------------------URShiftBNode-----------------------------------
// Logical shift right
-class URShiftBNode : public URShiftNode {
+class URShiftBNode : public Node {
public:
- URShiftBNode(Node* in1, Node* in2) : URShiftNode(in1,in2) {
+ URShiftBNode( Node *in1, Node *in2 ) : Node(nullptr,in1,in2) {
ShouldNotReachHere(); // only vector variant is used
}
virtual int Opcode() const;
@@ -380,9 +370,9 @@ class URShiftBNode : public URShiftNode {
//------------------------------URShiftSNode-----------------------------------
// Logical shift right
-class URShiftSNode : public URShiftNode {
+class URShiftSNode : public Node {
public:
- URShiftSNode(Node* in1, Node* in2) : URShiftNode(in1,in2) {
+ URShiftSNode( Node *in1, Node *in2 ) : Node(nullptr,in1,in2) {
ShouldNotReachHere(); // only vector variant is used
}
virtual int Opcode() const;
@@ -390,27 +380,27 @@ class URShiftSNode : public URShiftNode {
//------------------------------URShiftINode-----------------------------------
// Logical shift right
-class URShiftINode : public URShiftNode {
+class URShiftINode : public Node {
public:
- URShiftINode(Node* in1, Node* in2) : URShiftNode(in1,in2) {}
+ URShiftINode( Node *in1, Node *in2 ) : Node(nullptr,in1,in2) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
- virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
- const Type* bottom_type() const { return TypeInt::INT; }
+ const Type *bottom_type() const { return TypeInt::INT; }
virtual uint ideal_reg() const { return Op_RegI; }
};
//------------------------------URShiftLNode-----------------------------------
// Logical shift right
-class URShiftLNode : public URShiftNode {
+class URShiftLNode : public Node {
public:
- URShiftLNode(Node* in1, Node* in2) : URShiftNode(in1,in2) {}
+ URShiftLNode( Node *in1, Node *in2 ) : Node(nullptr,in1,in2) {}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
- virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
+ virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
- const Type* bottom_type() const { return TypeLong::LONG; }
+ const Type *bottom_type() const { return TypeLong::LONG; }
virtual uint ideal_reg() const { return Op_RegL; }
};
diff --git a/src/hotspot/share/opto/multnode.cpp b/src/hotspot/share/opto/multnode.cpp
index 4d8d1f4246c2a..f429d5daac076 100644
--- a/src/hotspot/share/opto/multnode.cpp
+++ b/src/hotspot/share/opto/multnode.cpp
@@ -36,7 +36,7 @@
//=============================================================================
//------------------------------MultiNode--------------------------------------
const RegMask &MultiNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
Node *MultiNode::match( const ProjNode *proj, const Matcher *m ) { return proj->clone(); }
@@ -185,7 +185,7 @@ const Type* ProjNode::Value(PhaseGVN* phase) const {
//------------------------------out_RegMask------------------------------------
// Pass the buck uphill
const RegMask &ProjNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
//------------------------------ideal_reg--------------------------------------
diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp
index 497d0d1aeb0bd..cca98bd8abaf2 100644
--- a/src/hotspot/share/opto/node.cpp
+++ b/src/hotspot/share/opto/node.cpp
@@ -2800,12 +2800,12 @@ uint Node::match_edge(uint idx) const {
// Register classes are defined for specific machines
const RegMask &Node::out_RegMask() const {
ShouldNotCallThis();
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
const RegMask &Node::in_RegMask(uint) const {
ShouldNotCallThis();
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
void Node_Array::grow(uint i) {
diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp
index 9ce9e705eec27..c2b3c4fb0ad25 100644
--- a/src/hotspot/share/opto/node.hpp
+++ b/src/hotspot/share/opto/node.hpp
@@ -2086,7 +2086,6 @@ Op_IL(Sub)
Op_IL(Mul)
Op_IL(URShift)
Op_IL(LShift)
-Op_IL(RShift)
Op_IL(Xor)
Op_IL(Cmp)
Op_IL(Div)
diff --git a/src/hotspot/share/opto/postaloc.cpp b/src/hotspot/share/opto/postaloc.cpp
index c961340e71acf..56d3ba6bbe002 100644
--- a/src/hotspot/share/opto/postaloc.cpp
+++ b/src/hotspot/share/opto/postaloc.cpp
@@ -173,7 +173,7 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre
const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def));
OptoReg::Name def_reg = def_lrg.reg();
const RegMask &use_mask = n->in_RegMask(idx);
- bool can_use = use_mask.member(def_reg);
+ bool can_use = use_mask.Member(def_reg);
if (!RegMask::is_vector(def->ideal_reg())) {
// Check for a copy to or from a misaligned pair.
// It is workaround for a sparc with misaligned pairs.
@@ -678,7 +678,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
int n_regs = RegMask::num_registers(def_ideal_reg, lrgs(_lrg_map.live_range_id(def)));
for (int l = 1; l < n_regs; l++) {
OptoReg::Name ureg_lo = OptoReg::add(ureg,-l);
- bool is_adjacent = lrgs(useidx).mask().member(ureg_lo);
+ bool is_adjacent = lrgs(useidx).mask().Member(ureg_lo);
assert(is_adjacent || OptoReg::is_reg(ureg_lo),
"only registers can be non-adjacent");
if (value[ureg_lo] == nullptr && is_adjacent) { // Nearly always adjacent
@@ -762,13 +762,13 @@ void PhaseChaitin::post_allocate_copy_removal() {
// If the value occupies a register pair, record same info
// in both registers.
OptoReg::Name nreg_lo = OptoReg::add(nreg,-1);
- bool is_adjacent = lrgs(lidx).mask().member(nreg_lo);
+ bool is_adjacent = lrgs(lidx).mask().Member(nreg_lo);
assert(is_adjacent || OptoReg::is_reg(nreg_lo), "only registers can be non-adjacent");
if (!is_adjacent) { // Nearly always adjacent
// Sparc occasionally has non-adjacent pairs.
// Find the actual other value
- RegMask tmp(lrgs(lidx).mask());
- tmp.remove(nreg);
+ RegMask tmp = lrgs(lidx).mask();
+ tmp.Remove(nreg);
nreg_lo = tmp.find_first_elem();
}
if (value[nreg] != val || value[nreg_lo] != val) {
diff --git a/src/hotspot/share/opto/reg_split.cpp b/src/hotspot/share/opto/reg_split.cpp
index 96f87fe6947d6..327c30b152e72 100644
--- a/src/hotspot/share/opto/reg_split.cpp
+++ b/src/hotspot/share/opto/reg_split.cpp
@@ -476,7 +476,7 @@ bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
return true; // Found 1st use!
}
}
- if (!n->out_RegMask().is_empty()) {
+ if (!n->out_RegMask().is_Empty()) {
return false;
}
}
@@ -1038,7 +1038,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
// bound use if we can't rematerialize the def, or if we need the
// split to form a misaligned pair.
if (!umask.is_infinite_stack() &&
- (int)umask.size() <= lrgs(useidx).num_regs() &&
+ (int)umask.Size() <= lrgs(useidx).num_regs() &&
(!def->rematerialize() ||
(!is_vect && umask.is_misaligned_pair()))) {
// These need a Split regardless of overlap or pressure
@@ -1128,7 +1128,7 @@ uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
if( n->is_SpillCopy() ) {
ResourceMark rm(C->regmask_arena());
RegMask tmp_rm(umask, C->regmask_arena());
- tmp_rm.subtract(Matcher::STACK_ONLY_mask);
+ tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask);
if( dmask.overlap(tmp_rm) ) {
if( def != n->in(inpidx) ) {
n->set_req(inpidx, def);
diff --git a/src/hotspot/share/opto/regmask.cpp b/src/hotspot/share/opto/regmask.cpp
index dcbc4dbac8e79..57cf13a8b3184 100644
--- a/src/hotspot/share/opto/regmask.cpp
+++ b/src/hotspot/share/opto/regmask.cpp
@@ -47,9 +47,9 @@ void OptoReg::dump(int r, outputStream *st) {
//=============================================================================
-const RegMask RegMask::EMPTY;
+const RegMask RegMask::Empty;
-const RegMask RegMask::ALL(
+const RegMask RegMask::All(
# define BODY(I) -1,
FORALL_BODY
# undef BODY
@@ -126,7 +126,7 @@ void RegMask::clear_to_pairs() {
}
bool RegMask::is_misaligned_pair() const {
- return size() == 2 && !is_aligned_pairs();
+ return Size() == 2 && !is_aligned_pairs();
}
bool RegMask::is_aligned_pairs() const {
@@ -227,7 +227,7 @@ bool RegMask::is_bound(uint ireg) const {
// for current regmask, where reg is the highest number.
bool RegMask::is_valid_reg(OptoReg::Name reg, const int size) const {
for (int i = 0; i < size; i++) {
- if (!member(reg - i)) {
+ if (!Member(reg - i)) {
return false;
}
}
diff --git a/src/hotspot/share/opto/regmask.hpp b/src/hotspot/share/opto/regmask.hpp
index 453fbb45d33b7..67e160940ccf2 100644
--- a/src/hotspot/share/opto/regmask.hpp
+++ b/src/hotspot/share/opto/regmask.hpp
@@ -299,6 +299,39 @@ class RegMask {
}
}
+ // Make us a copy of src
+ void copy(const RegMask& src) {
+ assert(_offset == src._offset, "offset mismatch");
+ _hwm = src._hwm;
+ _lwm = src._lwm;
+
+ // Copy base mask
+ memcpy(_rm_word, src._rm_word, sizeof(uintptr_t) * RM_SIZE_IN_WORDS);
+ _infinite_stack = src._infinite_stack;
+
+ // Copy extension
+ if (src._rm_word_ext != nullptr) {
+ assert(src._rm_size_in_words > RM_SIZE_IN_WORDS, "sanity");
+ assert(_original_ext_address == &_rm_word_ext, "clone sanity check");
+ grow(src._rm_size_in_words, false);
+ memcpy(_rm_word_ext, src._rm_word_ext,
+ sizeof(uintptr_t) * (src._rm_size_in_words - RM_SIZE_IN_WORDS));
+ }
+
+ // If the source is smaller than us, we need to set the gap according to
+ // the sources infinite_stack flag.
+ if (src._rm_size_in_words < _rm_size_in_words) {
+ int value = 0;
+ if (src.is_infinite_stack()) {
+ value = 0xFF;
+ _hwm = rm_word_max_index();
+ }
+ set_range(src._rm_size_in_words, value, _rm_size_in_words - src._rm_size_in_words);
+ }
+
+ assert(valid_watermarks(), "post-condition");
+ }
+
// Make the watermarks as tight as possible.
void trim_watermarks() {
if (_hwm < _lwm) {
@@ -416,62 +449,31 @@ class RegMask {
RegMask(OptoReg::Name reg,
Arena* arena DEBUG_ONLY(COMMA bool read_only = false))
: RegMask(arena DEBUG_ONLY(COMMA read_only)) {
- insert(reg);
+ Insert(reg);
}
explicit RegMask(OptoReg::Name reg) : RegMask(reg, nullptr) {}
- // Make us represent the same set of registers as src.
- void assignFrom(const RegMask& src) {
- assert(_offset == src._offset, "offset mismatch");
- _hwm = src._hwm;
- _lwm = src._lwm;
-
- // Copy base mask
- memcpy(_rm_word, src._rm_word, sizeof(uintptr_t) * RM_SIZE_IN_WORDS);
- _infinite_stack = src._infinite_stack;
-
- // Copy extension
- if (src._rm_word_ext != nullptr) {
- assert(src._rm_size_in_words > RM_SIZE_IN_WORDS, "sanity");
- assert(_original_ext_address == &_rm_word_ext, "clone sanity check");
- grow(src._rm_size_in_words, false);
- memcpy(_rm_word_ext, src._rm_word_ext,
- sizeof(uintptr_t) * (src._rm_size_in_words - RM_SIZE_IN_WORDS));
- }
+ // ----------------------------------------
+ // Deep copying constructors and assignment
+ // ----------------------------------------
- // If the source is smaller than us, we need to set the gap according to
- // the sources infinite_stack flag.
- if (src._rm_size_in_words < _rm_size_in_words) {
- int value = 0;
- if (src.is_infinite_stack()) {
- value = 0xFF;
- _hwm = rm_word_max_index();
- }
- set_range(src._rm_size_in_words, value, _rm_size_in_words - src._rm_size_in_words);
- }
-
- assert(valid_watermarks(), "post-condition");
- }
-
- // Construct from other register mask (deep copy) and register an arena
- // for potential register mask extension. Passing nullptr as arena disables
- // extension.
RegMask(const RegMask& rm, Arena* arena)
: _arena(arena), _rm_size_in_words(RM_SIZE_IN_WORDS), _offset(rm._offset) {
- assignFrom(rm);
+ copy(rm);
}
- // Copy constructor (deep copy). By default does not allow extension.
- explicit RegMask(const RegMask& rm) : RegMask(rm, nullptr) {}
+ RegMask(const RegMask& rm) : RegMask(rm, nullptr) {}
- // Disallow copy assignment (use assignFrom instead)
- RegMask& operator=(const RegMask&) = delete;
+ RegMask& operator=(const RegMask& rm) {
+ copy(rm);
+ return *this;
+ }
// ----------------
// End deep copying
// ----------------
- bool member(OptoReg::Name reg) const {
+ bool Member(OptoReg::Name reg) const {
reg = reg - offset_bits();
if (reg < 0) {
return false;
@@ -484,7 +486,7 @@ class RegMask {
}
// Empty mask check. Ignores registers included through the infinite_stack flag.
- bool is_empty() const {
+ bool is_Empty() const {
assert(valid_watermarks(), "sanity");
for (unsigned i = _lwm; i <= _hwm; i++) {
if (rm_word(i) != 0) {
@@ -640,7 +642,7 @@ class RegMask {
bool is_UP() const;
// Clear a register mask. Does not clear any offset.
- void clear() {
+ void Clear() {
_lwm = rm_word_max_index();
_hwm = 0;
set_range(0, 0, _rm_size_in_words);
@@ -649,13 +651,13 @@ class RegMask {
}
// Fill a register mask with 1's
- void set_all() {
+ void Set_All() {
assert(_offset == 0, "offset non-zero");
- set_all_from_offset();
+ Set_All_From_Offset();
}
// Fill a register mask with 1's from the current offset.
- void set_all_from_offset() {
+ void Set_All_From_Offset() {
_lwm = 0;
_hwm = rm_word_max_index();
set_range(0, 0xFF, _rm_size_in_words);
@@ -664,7 +666,7 @@ class RegMask {
}
// Fill a register mask with 1's starting from the given register.
- void set_all_from(OptoReg::Name reg) {
+ void Set_All_From(OptoReg::Name reg) {
reg = reg - offset_bits();
assert(reg != OptoReg::Bad, "sanity");
assert(reg != OptoReg::Special, "sanity");
@@ -687,7 +689,7 @@ class RegMask {
}
// Insert register into mask
- void insert(OptoReg::Name reg) {
+ void Insert(OptoReg::Name reg) {
reg = reg - offset_bits();
assert(reg != OptoReg::Bad, "sanity");
assert(reg != OptoReg::Special, "sanity");
@@ -704,7 +706,7 @@ class RegMask {
}
// Remove register from mask
- void remove(OptoReg::Name reg) {
+ void Remove(OptoReg::Name reg) {
reg = reg - offset_bits();
assert(reg >= 0, "register outside mask");
assert(reg < (int)rm_size_in_bits(), "register outside mask");
@@ -712,8 +714,8 @@ class RegMask {
rm_word(r >> LogBitsPerWord) &= ~(uintptr_t(1) << (r & WORD_BIT_MASK));
}
- // Or 'rm' into 'this'
- void or_with(const RegMask& rm) {
+ // OR 'rm' into 'this'
+ void OR(const RegMask &rm) {
assert(_offset == rm._offset, "offset mismatch");
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
grow(rm._rm_size_in_words);
@@ -734,8 +736,8 @@ class RegMask {
assert(valid_watermarks(), "sanity");
}
- // And 'rm' into 'this'
- void and_with(const RegMask& rm) {
+ // AND 'rm' into 'this'
+ void AND(const RegMask &rm) {
assert(_offset == rm._offset, "offset mismatch");
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
grow(rm._rm_size_in_words);
@@ -766,7 +768,7 @@ class RegMask {
}
// Subtract 'rm' from 'this'.
- void subtract(const RegMask& rm) {
+ void SUBTRACT(const RegMask &rm) {
assert(_offset == rm._offset, "offset mismatch");
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
grow(rm._rm_size_in_words);
@@ -789,7 +791,7 @@ class RegMask {
// Subtract 'rm' from 'this', but ignore everything in 'rm' that does not
// overlap with us and do not modify our infinite_stack flag. Supports masks of
// differing offsets. Does not support 'rm' with the infinite_stack flag set.
- void subtract_inner(const RegMask& rm) {
+ void SUBTRACT_inner(const RegMask& rm) {
assert(valid_watermarks() && rm.valid_watermarks(), "sanity");
assert(!rm.is_infinite_stack(), "not supported");
// Various translations due to differing offsets
@@ -819,12 +821,12 @@ class RegMask {
return false;
}
_offset += _rm_size_in_words;
- set_all_from_offset();
+ Set_All_From_Offset();
return true;
}
// Compute size of register mask: number of bits
- uint size() const {
+ uint Size() const {
uint sum = 0;
assert(valid_watermarks(), "sanity");
for (unsigned i = _lwm; i <= _hwm; i++) {
@@ -893,8 +895,8 @@ class RegMask {
void dump_hex(outputStream* st = tty) const; // Print a mask (raw hex)
#endif
- static const RegMask EMPTY; // Common empty mask
- static const RegMask ALL; // Common all mask
+ static const RegMask Empty; // Common empty mask
+ static const RegMask All; // Common all mask
bool can_represent(OptoReg::Name reg, unsigned int size = 1) const {
reg = reg - offset_bits();
diff --git a/src/hotspot/share/opto/rootnode.cpp b/src/hotspot/share/opto/rootnode.cpp
index 60167c5436a13..4ced13abdb1f6 100644
--- a/src/hotspot/share/opto/rootnode.cpp
+++ b/src/hotspot/share/opto/rootnode.cpp
@@ -88,5 +88,5 @@ const Type* HaltNode::Value(PhaseGVN* phase) const {
}
const RegMask &HaltNode::out_RegMask() const {
- return RegMask::EMPTY;
+ return RegMask::Empty;
}
diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp
index 6ab1ff37de9fd..c0f005048ec66 100644
--- a/src/hotspot/share/opto/superword.cpp
+++ b/src/hotspot/share/opto/superword.cpp
@@ -2500,8 +2500,6 @@ static bool can_subword_truncate(Node* in, const Type* type) {
case Op_RotateRight:
case Op_RotateLeft:
case Op_PopCountI:
- case Op_ReverseBytesS:
- case Op_ReverseBytesUS:
case Op_ReverseBytesI:
case Op_ReverseI:
case Op_CountLeadingZerosI:
diff --git a/src/hotspot/share/prims/jvmtiAgentList.cpp b/src/hotspot/share/prims/jvmtiAgentList.cpp
index 41fc9c0f3594c..8da5b75be4611 100644
--- a/src/hotspot/share/prims/jvmtiAgentList.cpp
+++ b/src/hotspot/share/prims/jvmtiAgentList.cpp
@@ -196,11 +196,6 @@ void JvmtiAgentList::load_xrun_agents() {
// Invokes Agent_OnAttach for agents loaded dynamically during runtime.
void JvmtiAgentList::load_agent(const char* agent_name, bool is_absolute_path,
const char* options, outputStream* st) {
- if (JvmtiEnvBase::get_phase() != JVMTI_PHASE_LIVE) {
- st->print_cr("Dynamic agent loading is only permitted in the live phase");
- return;
- }
-
JvmtiAgent* const agent = new JvmtiAgent(agent_name, options, is_absolute_path, /* dynamic agent */ true);
if (agent->load(st)) {
add(agent);
diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp
index 0884fce2ff7ac..fa6ede86cd9a4 100644
--- a/src/hotspot/share/prims/jvmtiExport.cpp
+++ b/src/hotspot/share/prims/jvmtiExport.cpp
@@ -879,6 +879,7 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
JvmtiThreadState * _state;
Klass* _class_being_redefined;
JvmtiClassLoadKind _load_kind;
+ bool _has_been_modified;
public:
inline JvmtiClassFileLoadHookPoster(Symbol* h_name, Handle class_loader,
@@ -895,6 +896,7 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
_curr_data = *data_ptr;
_curr_env = nullptr;
_cached_class_file_ptr = cache_ptr;
+ _has_been_modified = false;
_state = JvmtiExport::get_jvmti_thread_state(_thread);
if (_state != nullptr) {
@@ -933,6 +935,8 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
copy_modified_data();
}
+ bool has_been_modified() { return _has_been_modified; }
+
private:
void post_all_envs() {
if (_load_kind != jvmti_class_load_kind_retransform) {
@@ -979,6 +983,7 @@ class JvmtiClassFileLoadHookPoster : public StackObj {
}
if (new_data != nullptr) {
// this agent has modified class data.
+ _has_been_modified = true;
if (caching_needed && *_cached_class_file_ptr == nullptr) {
// data has been changed by the new retransformable agent
// and it hasn't already been cached, cache it
@@ -1053,18 +1058,18 @@ bool JvmtiExport::_should_post_class_file_load_hook = false;
int JvmtiExport::_should_notify_object_alloc = 0;
// this entry is for class file load hook on class load, redefine and retransform
-void JvmtiExport::post_class_file_load_hook(Symbol* h_name,
+bool JvmtiExport::post_class_file_load_hook(Symbol* h_name,
Handle class_loader,
Handle h_protection_domain,
unsigned char **data_ptr,
unsigned char **end_ptr,
JvmtiCachedClassFileData **cache_ptr) {
if (JvmtiEnv::get_phase() < JVMTI_PHASE_PRIMORDIAL) {
- return;
+ return false;
}
if (JavaThread::current()->should_hide_jvmti_events()) {
- return;
+ return false;
}
JvmtiClassFileLoadHookPoster poster(h_name, class_loader,
@@ -1072,6 +1077,7 @@ void JvmtiExport::post_class_file_load_hook(Symbol* h_name,
data_ptr, end_ptr,
cache_ptr);
poster.post();
+ return poster.has_been_modified();
}
void JvmtiExport::report_unsupported(bool on) {
diff --git a/src/hotspot/share/prims/jvmtiExport.hpp b/src/hotspot/share/prims/jvmtiExport.hpp
index 8906d6b81df6c..062057c70abf8 100644
--- a/src/hotspot/share/prims/jvmtiExport.hpp
+++ b/src/hotspot/share/prims/jvmtiExport.hpp
@@ -377,10 +377,11 @@ class JvmtiExport : public AllStatic {
static bool is_early_phase() NOT_JVMTI_RETURN_(false);
static bool has_early_class_hook_env() NOT_JVMTI_RETURN_(false);
static bool has_early_vmstart_env() NOT_JVMTI_RETURN_(false);
- static void post_class_file_load_hook(Symbol* h_name, Handle class_loader,
+ // Return true if the class was modified by the hook.
+ static bool post_class_file_load_hook(Symbol* h_name, Handle class_loader,
Handle h_protection_domain,
unsigned char **data_ptr, unsigned char **end_ptr,
- JvmtiCachedClassFileData **cache_ptr) NOT_JVMTI_RETURN;
+ JvmtiCachedClassFileData **cache_ptr) NOT_JVMTI_RETURN_(false);
static void post_native_method_bind(Method* method, address* function_ptr) NOT_JVMTI_RETURN;
static void post_compiled_method_load(JvmtiEnv* env, nmethod *nm) NOT_JVMTI_RETURN;
static void post_compiled_method_load(nmethod *nm) NOT_JVMTI_RETURN;
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index 0d92f22af79d4..8b703cb442a74 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -548,7 +548,6 @@ static SpecialFlag const special_jvm_flags[] = {
{ "ZGenerational", JDK_Version::jdk(23), JDK_Version::jdk(24), JDK_Version::undefined() },
{ "ZMarkStackSpaceLimit", JDK_Version::undefined(), JDK_Version::jdk(25), JDK_Version::undefined() },
{ "G1UpdateBufferSize", JDK_Version::undefined(), JDK_Version::jdk(26), JDK_Version::jdk(27) },
- { "ShenandoahPacing", JDK_Version::jdk(25), JDK_Version::jdk(26), JDK_Version::jdk(27) },
#if defined(AARCH64)
{ "NearCpool", JDK_Version::undefined(), JDK_Version::jdk(25), JDK_Version::undefined() },
#endif
@@ -1521,12 +1520,6 @@ void Arguments::set_heap_size() {
!FLAG_IS_DEFAULT(InitialRAMPercentage) ||
!FLAG_IS_DEFAULT(MaxRAM);
- if (CompilerConfig::should_set_client_emulation_mode_flags() &&
- FLAG_IS_DEFAULT(MaxRAM)) {
- // Reduce the maximum available memory if client emulation mode is enabled.
- FLAG_SET_DEFAULT(MaxRAM, 1ULL*G);
- }
-
if (has_ram_limit) {
if (!FLAG_IS_DEFAULT(MaxRAM)) {
// The user has configured MaxRAM, use that instead of physical memory
diff --git a/src/hotspot/share/runtime/continuationEntry.hpp b/src/hotspot/share/runtime/continuationEntry.hpp
index 490293f5b118f..8361f2f912be6 100644
--- a/src/hotspot/share/runtime/continuationEntry.hpp
+++ b/src/hotspot/share/runtime/continuationEntry.hpp
@@ -39,7 +39,6 @@ class RegisterMap;
// Metadata stored in the continuation entry frame
class ContinuationEntry {
- friend class VMStructs;
friend class JVMCIVMStructs;
ContinuationEntryPD _pd;
#ifdef ASSERT
diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp
index daafcaea61bfe..853c6554022f9 100644
--- a/src/hotspot/share/runtime/deoptimization.cpp
+++ b/src/hotspot/share/runtime/deoptimization.cpp
@@ -1804,11 +1804,10 @@ void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason
deoptimize_single_frame(thread, fr, reason);
}
-address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bool make_not_entrant) {
+#if INCLUDE_JVMCI
+address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
// there is no exception handler for this pc => deoptimize
- if (make_not_entrant) {
- nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
- }
+ nm->make_not_entrant(nmethod::InvalidationReason::MISSING_EXCEPTION_HANDLER);
// Use Deoptimization::deoptimize for all of its side-effects:
// gathering traps statistics, logging...
@@ -1822,15 +1821,6 @@ address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bo
frame runtime_frame = thread->last_frame();
frame caller_frame = runtime_frame.sender(®_map);
assert(caller_frame.cb()->as_nmethod_or_null() == nm, "expect top frame compiled method");
-
- Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler);
-
- if (!nm->is_compiled_by_jvmci()) {
- return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
- }
-
-#if INCLUDE_JVMCI
- // JVMCI support
vframe* vf = vframe::new_vframe(&caller_frame, ®_map, thread);
compiledVFrame* cvf = compiledVFrame::cast(vf);
ScopeDesc* imm_scope = cvf->scope();
@@ -1846,15 +1836,16 @@ address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm, bo
}
}
+ Deoptimization::deoptimize(thread, caller_frame, Deoptimization::Reason_not_compiled_exception_handler);
MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, nm->method()), true);
if (trap_mdo != nullptr) {
trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
}
-#endif
return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
}
+#endif
void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
assert(thread == Thread::current() ||
@@ -2757,10 +2748,10 @@ const char* Deoptimization::_trap_reason_name[] = {
"unstable_if",
"unstable_fused_if",
"receiver_constraint",
- "not_compiled_exception_handler",
"short_running_loop" JVMCI_ONLY("_or_aliasing"),
#if INCLUDE_JVMCI
"transfer_to_interpreter",
+ "not_compiled_exception_handler",
"unresolved",
"jsr_mismatch",
#endif
diff --git a/src/hotspot/share/runtime/deoptimization.hpp b/src/hotspot/share/runtime/deoptimization.hpp
index d168d9c8af682..5d97e2056adcc 100644
--- a/src/hotspot/share/runtime/deoptimization.hpp
+++ b/src/hotspot/share/runtime/deoptimization.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -117,11 +117,11 @@ class Deoptimization : AllStatic {
Reason_unstable_if, // a branch predicted always false was taken
Reason_unstable_fused_if, // fused two ifs that had each one untaken branch. One is now taken.
Reason_receiver_constraint, // receiver subtype check failed
- Reason_not_compiled_exception_handler, // missing compiled exception handler
Reason_short_running_long_loop, // profile reports loop runs for small number of iterations
#if INCLUDE_JVMCI
Reason_aliasing = Reason_short_running_long_loop, // optimistic assumption about aliasing failed
Reason_transfer_to_interpreter, // explicit transferToInterpreter()
+ Reason_not_compiled_exception_handler,
Reason_unresolved,
Reason_jsr_mismatch,
#endif
@@ -184,8 +184,8 @@ class Deoptimization : AllStatic {
// Deoptimizes a frame lazily. Deopt happens on return to the frame.
static void deoptimize(JavaThread* thread, frame fr, DeoptReason reason = Reason_constraint);
- static address deoptimize_for_missing_exception_handler(nmethod* nm, bool make_not_entrant);
#if INCLUDE_JVMCI
+ static address deoptimize_for_missing_exception_handler(nmethod* nm);
static oop get_cached_box(AutoBoxObjectValue* bv, frame* fr, RegisterMap* reg_map, bool& cache_init_error, TRAPS);
#endif
diff --git a/src/hotspot/share/runtime/globals.hpp b/src/hotspot/share/runtime/globals.hpp
index 238517197b2c4..513edaf658816 100644
--- a/src/hotspot/share/runtime/globals.hpp
+++ b/src/hotspot/share/runtime/globals.hpp
@@ -502,7 +502,7 @@ const int ObjectAlignmentInBytes = 8;
"If > 0, provokes an error after VM initialization; the value " \
"determines which error to provoke. See controlled_crash() " \
"in vmError.cpp.") \
- range(0, 18) \
+ range(0, 17) \
\
develop(uint, TestCrashInErrorHandler, 0, \
"If > 0, provokes an error inside VM error handler (a secondary " \
diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp
index 36544cf1118ea..8bb8095878f3a 100644
--- a/src/hotspot/share/runtime/javaThread.cpp
+++ b/src/hotspot/share/runtime/javaThread.cpp
@@ -535,6 +535,7 @@ JavaThread::JavaThread(MemTag mem_tag) :
set_requires_cross_modify_fence(false);
pd_initialize();
+ assert(deferred_card_mark().is_empty(), "Default MemRegion ctor");
}
JavaThread* JavaThread::create_attaching_thread() {
@@ -1358,6 +1359,9 @@ void JavaThread::pop_jni_handle_block() {
}
void JavaThread::oops_do_no_frames(OopClosure* f, NMethodClosure* cf) {
+ // Verify that the deferred card marks have been flushed.
+ assert(deferred_card_mark().is_empty(), "Should be empty during GC");
+
// Traverse the GCHandles
Thread::oops_do_no_frames(f, cf);
diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp
index a6a00bfbd033e..c8be1594a69fa 100644
--- a/src/hotspot/share/runtime/javaThread.hpp
+++ b/src/hotspot/share/runtime/javaThread.hpp
@@ -149,6 +149,11 @@ class JavaThread: public Thread {
oop _vm_result_oop; // oop result is GC-preserved
Metadata* _vm_result_metadata; // non-oop result
+ // See ReduceInitialCardMarks: this holds the precise space interval of
+ // the most recent slow path allocation for which compiled code has
+ // elided card-marks for performance along the fast-path.
+ MemRegion _deferred_card_mark;
+
ObjectMonitor* volatile _current_pending_monitor; // ObjectMonitor this thread is waiting to lock
bool _current_pending_monitor_is_from_java; // locking is from Java code
ObjectMonitor* volatile _current_waiting_monitor; // ObjectMonitor on which this thread called Object.wait()
@@ -771,6 +776,9 @@ class JavaThread: public Thread {
void set_vm_result_metadata(Metadata* x) { _vm_result_metadata = x; }
+ MemRegion deferred_card_mark() const { return _deferred_card_mark; }
+ void set_deferred_card_mark(MemRegion mr) { _deferred_card_mark = mr; }
+
// Is thread in scope of an InternalOOMEMark?
bool is_in_internal_oome_mark() const { return _is_in_internal_oome_mark; }
void set_is_in_internal_oome_mark(bool b) { _is_in_internal_oome_mark = b; }
diff --git a/src/hotspot/share/runtime/objectMonitor.cpp b/src/hotspot/share/runtime/objectMonitor.cpp
index f6d569b1b7a50..142324fec7a80 100644
--- a/src/hotspot/share/runtime/objectMonitor.cpp
+++ b/src/hotspot/share/runtime/objectMonitor.cpp
@@ -1863,10 +1863,10 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
// could be profitable.
//
// TODO-FIXME: change the following logic to a loop of the form
- // while (!timeout && !interrupted && node.TState == TS_WAIT) park()
+ // while (!timeout && !interrupted && _notified == 0) park()
int ret = OS_OK;
- bool was_notified = false;
+ int WasNotified = 0;
// Need to check interrupt state whilst still _thread_in_vm
bool interrupted = interruptible && current->is_interrupted(false);
@@ -1882,7 +1882,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
ThreadBlockInVMPreprocess tbivs(current, csos, true /* allow_suspend */);
if (interrupted || HAS_PENDING_EXCEPTION) {
// Intentionally empty
- } else if (node.TState == ObjectWaiter::TS_WAIT) {
+ } else if (!node._notified) {
if (millis <= 0) {
current->_ParkEvent->park();
} else {
@@ -1910,6 +1910,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
Thread::SpinAcquire(&_wait_set_lock);
if (node.TState == ObjectWaiter::TS_WAIT) {
dequeue_specific_waiter(&node); // unlink from wait_set
+ assert(!node._notified, "invariant");
node.TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_wait_set_lock);
@@ -1922,7 +1923,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
guarantee(node.TState != ObjectWaiter::TS_WAIT, "invariant");
OrderAccess::loadload();
if (has_successor(current)) clear_successor();
- was_notified = node.TState == ObjectWaiter::TS_ENTER;
+ WasNotified = node._notified;
// Reentry phase -- reacquire the monitor.
// re-enter contended monitor after object.wait().
@@ -1935,7 +1936,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
if (JvmtiExport::should_post_monitor_waited()) {
JvmtiExport::post_monitor_waited(current, this, ret == OS_TIMEOUT);
- if (was_notified && has_successor(current)) {
+ if (node._notified && has_successor(current)) {
// In this part of the monitor wait-notify-reenter protocol it
// is possible (and normal) for another thread to do a fastpath
// monitor enter-exit while this thread is still trying to get
@@ -2002,7 +2003,7 @@ void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
}
// check if the notification happened
- if (!was_notified) {
+ if (!WasNotified) {
// no, it could be timeout or Thread.interrupt() or both
// check for interrupt event, otherwise it is timeout
if (interruptible && current->is_interrupted(true) && !HAS_PENDING_EXCEPTION) {
@@ -2025,6 +2026,7 @@ bool ObjectMonitor::notify_internal(JavaThread* current) {
ObjectWaiter* iterator = dequeue_waiter();
if (iterator != nullptr) {
guarantee(iterator->TState == ObjectWaiter::TS_WAIT, "invariant");
+ guarantee(!iterator->_notified, "invariant");
if (iterator->is_vthread()) {
oop vthread = iterator->vthread();
@@ -2046,6 +2048,7 @@ bool ObjectMonitor::notify_internal(JavaThread* current) {
inc_unmounted_vthreads();
}
+ iterator->_notified = true;
iterator->_notifier_tid = JFR_THREAD_ID(current);
did_notify = true;
add_to_entry_list(current, iterator);
@@ -2207,6 +2210,7 @@ bool ObjectMonitor::vthread_wait_reenter(JavaThread* current, ObjectWaiter* node
Thread::SpinAcquire(&_wait_set_lock);
if (node->TState == ObjectWaiter::TS_WAIT) {
dequeue_specific_waiter(node); // unlink from wait_set
+ assert(!node->_notified, "invariant");
node->TState = ObjectWaiter::TS_RUN;
}
Thread::SpinRelease(&_wait_set_lock);
@@ -2512,6 +2516,7 @@ ObjectWaiter::ObjectWaiter(JavaThread* current) {
_notifier_tid = 0;
_recursions = 0;
TState = TS_RUN;
+ _notified = false;
_is_wait = false;
_at_reenter = false;
_interrupted = false;
diff --git a/src/hotspot/share/runtime/objectMonitor.hpp b/src/hotspot/share/runtime/objectMonitor.hpp
index 77919d9995586..058e0317ec189 100644
--- a/src/hotspot/share/runtime/objectMonitor.hpp
+++ b/src/hotspot/share/runtime/objectMonitor.hpp
@@ -52,6 +52,7 @@ class ObjectWaiter : public CHeapObj {
uint64_t _notifier_tid;
int _recursions;
volatile TStates TState;
+ volatile bool _notified;
bool _is_wait;
bool _at_reenter;
bool _interrupted;
@@ -66,8 +67,9 @@ class ObjectWaiter : public CHeapObj {
uint8_t state() const { return TState; }
ObjectMonitor* monitor() const { return _monitor; }
bool is_wait() const { return _is_wait; }
+ bool notified() const { return _notified; }
bool at_reenter() const { return _at_reenter; }
- bool at_monitorenter() const { return !_is_wait || TState != TS_WAIT; }
+ bool at_monitorenter() const { return !_is_wait || _at_reenter || _notified; }
oop vthread() const;
void wait_reenter_begin(ObjectMonitor *mon);
void wait_reenter_end(ObjectMonitor *mon);
diff --git a/src/hotspot/share/runtime/perfMemory.cpp b/src/hotspot/share/runtime/perfMemory.cpp
index 9594149333e6e..a75a41e95a94c 100644
--- a/src/hotspot/share/runtime/perfMemory.cpp
+++ b/src/hotspot/share/runtime/perfMemory.cpp
@@ -114,7 +114,9 @@ void PerfMemory::initialize() {
// the warning is issued only in debug mode in order to avoid
// additional output to the stdout or stderr output streams.
//
- log_debug(perf)("could not create PerfData Memory region, reverting to malloc");
+ if (PrintMiscellaneous && Verbose) {
+ warning("Could not create PerfData Memory region, reverting to malloc");
+ }
_prologue = NEW_C_HEAP_OBJ(PerfDataPrologue, mtInternal);
}
@@ -248,7 +250,10 @@ char* PerfMemory::get_perfdata_file_path() {
if(!Arguments::copy_expand_pid(PerfDataSaveFile, strlen(PerfDataSaveFile),
dest_file, JVM_MAXPATHLEN)) {
FREE_C_HEAP_ARRAY(char, dest_file);
- log_debug(perf)("invalid performance data file path name specified, fall back to a default name");
+ if (PrintMiscellaneous && Verbose) {
+ warning("Invalid performance data file path name specified, "\
+ "fall back to a default name");
+ }
} else {
return dest_file;
}
diff --git a/src/hotspot/share/runtime/serviceThread.cpp b/src/hotspot/share/runtime/serviceThread.cpp
index 03168842e3634..9a0bfe03ac3be 100644
--- a/src/hotspot/share/runtime/serviceThread.cpp
+++ b/src/hotspot/share/runtime/serviceThread.cpp
@@ -45,7 +45,7 @@
#include "services/lowMemoryDetector.hpp"
#include "services/threadIdTable.hpp"
-JavaThread* ServiceThread::_instance = nullptr;
+DEBUG_ONLY(JavaThread* ServiceThread::_instance = nullptr;)
JvmtiDeferredEvent* ServiceThread::_jvmti_event = nullptr;
// The service thread has it's own static deferred event queue.
// Events can be posted before JVMTI vm_start, so it's too early to call JvmtiThreadState::state_for
@@ -62,7 +62,7 @@ void ServiceThread::initialize() {
JavaThread::vm_exit_on_osthread_failure(thread);
JavaThread::start_internal_daemon(THREAD, thread, thread_oop, NearMaxPriority);
- _instance = thread;
+ DEBUG_ONLY(_instance = thread;)
}
static void cleanup_oopstorages() {
diff --git a/src/hotspot/share/runtime/serviceThread.hpp b/src/hotspot/share/runtime/serviceThread.hpp
index cfce8603cd5a2..f65847ece0069 100644
--- a/src/hotspot/share/runtime/serviceThread.hpp
+++ b/src/hotspot/share/runtime/serviceThread.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@ class JvmtiDeferredEvent;
class ServiceThread : public JavaThread {
private:
- static JavaThread* _instance;
+ DEBUG_ONLY(static JavaThread* _instance;)
static JvmtiDeferredEvent* _jvmti_event;
static JvmtiDeferredEventQueue _jvmti_service_queue;
@@ -44,7 +44,6 @@ class ServiceThread : public JavaThread {
public:
static void initialize();
- static bool has_started() { return _instance != nullptr; }
// Hide this thread from external view.
bool is_hidden_from_external_view() const { return true; }
diff --git a/src/hotspot/share/runtime/sharedRuntime.cpp b/src/hotspot/share/runtime/sharedRuntime.cpp
index 35bc3f5f1beaf..efc47dd11c643 100644
--- a/src/hotspot/share/runtime/sharedRuntime.cpp
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp
@@ -791,8 +791,7 @@ address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc,
if (t != nullptr) {
return nm->code_begin() + t->pco();
} else {
- bool make_not_entrant = true;
- return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
+ return Deoptimization::deoptimize_for_missing_exception_handler(nm);
}
}
#endif // INCLUDE_JVMCI
@@ -848,15 +847,6 @@ address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc,
ExceptionHandlerTable table(nm);
HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
-
- // If the compiler did not anticipate a recursive exception, resulting in an exception
- // thrown from the catch bci, then the compiled exception handler might be missing.
- // This is rare. Just deoptimize and let the interpreter handle it.
- if (t == nullptr && recursive_exception_occurred) {
- bool make_not_entrant = false;
- return Deoptimization::deoptimize_for_missing_exception_handler(nm, make_not_entrant);
- }
-
if (t == nullptr && (nm->is_compiled_by_c1() || handler_bci != -1)) {
// Allow abbreviated catch tables. The idea is to allow a method
// to materialize its exceptions without committing to the exact
diff --git a/src/hotspot/share/runtime/sharedRuntime.hpp b/src/hotspot/share/runtime/sharedRuntime.hpp
index f4ff51504f0e5..93cd92b3a32df 100644
--- a/src/hotspot/share/runtime/sharedRuntime.hpp
+++ b/src/hotspot/share/runtime/sharedRuntime.hpp
@@ -384,6 +384,10 @@ class SharedRuntime: AllStatic {
// deopt blob
static void generate_deopt_blob(void);
+ static bool handle_ic_miss_helper_internal(Handle receiver, nmethod* caller_nm, const frame& caller_frame,
+ methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info,
+ bool& needs_ic_stub_refill, TRAPS);
+
public:
static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; }
@@ -545,6 +549,7 @@ class SharedRuntime: AllStatic {
// A compiled caller has just called the interpreter, but compiled code
// exists. Patch the caller so he no longer calls into the interpreter.
static void fixup_callers_callsite(Method* moop, address ret_pc);
+ static bool should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb);
// Slow-path Locking and Unlocking
static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
diff --git a/src/hotspot/share/runtime/threads.cpp b/src/hotspot/share/runtime/threads.cpp
index 299905ff0a2a3..ffe1a86cda57e 100644
--- a/src/hotspot/share/runtime/threads.cpp
+++ b/src/hotspot/share/runtime/threads.cpp
@@ -97,7 +97,6 @@
#include "runtime/trimNativeHeap.hpp"
#include "runtime/vm_version.hpp"
#include "runtime/vmOperations.hpp"
-#include "sanitizers/address.hpp"
#include "services/attachListener.hpp"
#include "services/management.hpp"
#include "services/threadIdTable.hpp"
@@ -344,11 +343,6 @@ static void call_initPhase3(TRAPS) {
void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
TraceTime timer("Initialize java.lang classes", TRACETIME_LOG(Info, startuptime));
- // This is before the execution of the very first Java bytecode.
- if (CDSConfig::is_using_aot_linked_classes()) {
- AOTLinkedClassBulkLoader::link_classes(THREAD);
- }
-
initialize_class(vmSymbols::java_lang_String(), CHECK);
// Inject CompactStrings value after the static initializers for String ran.
@@ -703,10 +697,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// No more stub generation allowed after that point.
StubCodeDesc::freeze();
-#ifdef ADDRESS_SANITIZER
- Asan::initialize();
-#endif
-
// Set flag that basic initialization has completed. Used by exceptions and various
// debug stuff, that does not work until all basic classes have been initialized.
set_init_completed();
@@ -752,10 +742,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// and other cleanups. Needs to start before the compilers start posting events.
ServiceThread::initialize();
- if (CDSConfig::is_using_aot_linked_classes()) {
- nmethod::post_delayed_compiled_method_load_events();
- }
-
// Start the monitor deflation thread:
MonitorDeflationThread::initialize();
@@ -788,7 +774,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
if (CDSConfig::is_using_aot_linked_classes()) {
SystemDictionary::restore_archived_method_handle_intrinsics();
- AOTLinkedClassBulkLoader::init_javabase_classes(THREAD);
+ AOTLinkedClassBulkLoader::link_or_init_javabase_classes(THREAD);
}
// Start string deduplication thread if requested.
@@ -807,7 +793,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
call_initPhase2(CHECK_JNI_ERR);
if (CDSConfig::is_using_aot_linked_classes()) {
- AOTLinkedClassBulkLoader::init_non_javabase_classes(THREAD);
+ AOTLinkedClassBulkLoader::link_or_init_non_javabase_classes(THREAD);
}
#ifndef PRODUCT
HeapShared::initialize_test_class_from_archive(THREAD);
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index 85f921ef3e33c..dee0a5d4eb7d4 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -616,7 +616,6 @@
nonstatic_field(JavaThread, _active_handles, JNIHandleBlock*) \
nonstatic_field(JavaThread, _monitor_owner_id, int64_t) \
volatile_nonstatic_field(JavaThread, _terminated, JavaThread::TerminatedTypes) \
- nonstatic_field(JavaThread, _cont_entry, ContinuationEntry*) \
nonstatic_field(Thread, _osthread, OSThread*) \
\
/************/ \
@@ -797,8 +796,7 @@
nonstatic_field(Mutex, _name, const char*) \
static_field(Mutex, _mutex_array, Mutex**) \
static_field(Mutex, _num_mutex, int) \
- volatile_nonstatic_field(Mutex, _owner, Thread*) \
- static_field(ContinuationEntry, _return_pc, address)
+ volatile_nonstatic_field(Mutex, _owner, Thread*)
//--------------------------------------------------------------------------------
// VM_TYPES
@@ -1272,7 +1270,6 @@
declare_toplevel_type(FileMapHeader) \
declare_toplevel_type(CDSFileMapRegion) \
declare_toplevel_type(UpcallStub::FrameData) \
- declare_toplevel_type(ContinuationEntry) \
\
/************/ \
/* GC types */ \
@@ -1581,8 +1578,8 @@
declare_constant(Deoptimization::Reason_unstable_if) \
declare_constant(Deoptimization::Reason_unstable_fused_if) \
declare_constant(Deoptimization::Reason_receiver_constraint) \
- declare_constant(Deoptimization::Reason_not_compiled_exception_handler) \
NOT_ZERO(JVMCI_ONLY(declare_constant(Deoptimization::Reason_transfer_to_interpreter))) \
+ NOT_ZERO(JVMCI_ONLY(declare_constant(Deoptimization::Reason_not_compiled_exception_handler))) \
NOT_ZERO(JVMCI_ONLY(declare_constant(Deoptimization::Reason_unresolved))) \
NOT_ZERO(JVMCI_ONLY(declare_constant(Deoptimization::Reason_jsr_mismatch))) \
declare_constant(Deoptimization::Reason_tenured) \
diff --git a/src/hotspot/share/sanitizers/address.cpp b/src/hotspot/share/sanitizers/address.cpp
deleted file mode 100644
index b050039b1b821..0000000000000
--- a/src/hotspot/share/sanitizers/address.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifdef ADDRESS_SANITIZER
-
-#include "logging/log.hpp"
-#include "sanitizers/address.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/vmError.hpp"
-
-#include
-#include
-
-typedef void (*callback_setter_t) (void (*callback)(const char *));
-static callback_setter_t g_callback_setter = nullptr;
-static const char* g_report = nullptr;
-
-extern "C" void asan_error_callback(const char* report_text) {
- // Please keep things very short and simple here and use as little
- // as possible of any hotspot infrastructure. However shaky the JVM,
- // we should always at least get the ASAN report on stderr.
-
- // Note: this is threadsafe since ASAN synchronizes error reports
- g_report = report_text;
-
- // First, print off the bare error to stderr
- fprintf(stderr, "JVM caught ASAN Error\n");
- fprintf(stderr, "%s\n", report_text);
-
- // Then, let normal JVM error handling run its due course.
- fatal("ASAN Error");
-}
-
-void Asan::initialize() {
-
- // For documentation of __asan_set_error_report_callback() see asan_interface.h .
- g_callback_setter = (callback_setter_t) dlsym(RTLD_DEFAULT, "__asan_set_error_report_callback");
- if (g_callback_setter == nullptr) {
- log_info(asan)("*** Failed to install JVM callback for ASAN. ASAN errors will not generate hs-err files. ***");
- return;
- }
-
- g_callback_setter(asan_error_callback);
- log_info(asan)("JVM callback for ASAN errors successfully installed");
-
- // Controlling core dump behavior:
- //
- // In hotspot, CreateCoredumpOnCrash decides whether to create a core dump (on Posix, whether to
- // end the process with abort(3) or exit(3)).
- //
- // Core generation in the default ASAN reporter is controlled by two options:
- // - "abort_on_error=0" (default) - end with exit(3), "abort_on_error=1" end with abort(3)
- // - "disable_coredump=1" (default) disables cores by imposing a near-zero core soft limit.
- // By default both options are set to prevent cores. That default makes sense since ASAN cores
- // can get very large (due to the shadow map) and very numerous (ASAN is typically ran for
- // large-scale integration tests, not targeted micro-tests).
- //
- // In hotspot ASAN builds, we replace the default ASAN reporter. The soft limit imposed by
- // "disable_coredump=1" is still in effect. But "abort_on_error" is not honored. Since we'd
- // like to exhibit exactly the same behavior as the standard ASAN error reporter, we disable
- // core files if ASAN would inhibit them (we just switch off CreateCoredumpOnCrash).
- //
- // Thus:
- // abort_on_error disable_coredump core file?
- // 0 0 No (enforced by ergo-setting CreateCoredumpOnCrash=0)
- // (*) 0 1 No (enforced by ASAN-imposed soft limit)
- // 1 0 Yes, unless -XX:-CreateCoredumpOnCrash set on command line
- // 1 1 No (enforced by ASAN-imposed soft limit)
- // (*) is the default if no ASAN options are specified.
-
- const char* const asan_options = getenv("ASAN_OPTIONS");
- const bool asan_inhibits_cores = (asan_options == nullptr) ||
- (::strstr(asan_options, "abort_on_error=1") == nullptr) ||
- (::strstr(asan_options, "disable_coredump=0") == nullptr);
- if (asan_inhibits_cores) {
- if (CreateCoredumpOnCrash) {
- log_info(asan)("CreateCoredumpOnCrash overruled by%s asan options. Core generation disabled.",
- asan_options != nullptr ? "" : " default setting for");
- log_info(asan)("Use 'ASAN_OPTIONS=abort_on_error=1:disable_coredump=0:unmap_shadow_on_exit=1' "
- "to enable core generation.");
- }
- FLAG_SET_ERGO(CreateCoredumpOnCrash, false);
- }
-}
-
-bool Asan::had_error() {
- return g_report != nullptr;
-}
-
-void Asan::report(outputStream* st) {
- if (had_error()) {
- // Use raw print here to avoid truncation.
- st->print_raw(g_report);
- st->cr();
- st->cr();
- }
-}
-
-#endif // ADDRESS_SANITIZER
diff --git a/src/hotspot/share/sanitizers/address.hpp b/src/hotspot/share/sanitizers/address.hpp
index 109aa59dac07b..5186053f1c9e4 100644
--- a/src/hotspot/share/sanitizers/address.hpp
+++ b/src/hotspot/share/sanitizers/address.hpp
@@ -26,8 +26,6 @@
#define SHARE_SANITIZERS_ADDRESS_HPP
#ifdef ADDRESS_SANITIZER
-#include "memory/allStatic.hpp"
-
#include
#endif
@@ -76,14 +74,4 @@
} while (false)
#endif
-class outputStream;
-
-#ifdef ADDRESS_SANITIZER
-struct Asan : public AllStatic {
- static void initialize();
- static bool had_error();
- static void report(outputStream* st);
-};
-#endif
-
#endif // SHARE_SANITIZERS_ADDRESS_HPP
diff --git a/src/hotspot/share/services/diagnosticCommand.hpp b/src/hotspot/share/services/diagnosticCommand.hpp
index 2364b0ce4cd72..001d89a5aef22 100644
--- a/src/hotspot/share/services/diagnosticCommand.hpp
+++ b/src/hotspot/share/services/diagnosticCommand.hpp
@@ -356,9 +356,7 @@ class ThreadDumpDCmd : public DCmdWithParser {
ThreadDumpDCmd(outputStream* output, bool heap);
static const char* name() { return "Thread.print"; }
static const char* description() {
- return "Print all platform threads, and mounted virtual threads, "
- "with stack traces. The Thread.dump_to_file command will "
- "print all threads to a file.";
+ return "Print all threads with stacktraces.";
}
static const char* impact() {
return "Medium: Depends on the number of threads.";
@@ -770,8 +768,7 @@ class ThreadDumpToFileDCmd : public DCmdWithParser {
return "Thread.dump_to_file";
}
static const char *description() {
- return "Dump all threads, with stack traces, "
- "to a file in plain text or JSON format.";
+ return "Dump threads, with stack traces, to a file in plain text or JSON format.";
}
static const char* impact() {
return "Medium: Depends on the number of threads.";
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 68900f8bc8693..51ea80a0150af 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -26,7 +26,6 @@
#define SHARE_UTILITIES_GLOBALDEFINITIONS_HPP
#include "classfile_constants.h"
-#include "utilities/checkedCast.hpp"
#include "utilities/compilerWarnings.hpp"
#include "utilities/debug.hpp"
#include "utilities/forbiddenFunctions.hpp"
@@ -1254,21 +1253,13 @@ JAVA_INTEGER_SHIFT_OP(>>, java_shift_right_unsigned, jlong, julong)
#undef JAVA_INTEGER_SHIFT_OP
-inline jlong java_negate(jlong v, BasicType bt) {
- if (bt == T_INT) {
- return java_negate(checked_cast(v));
- }
- assert(bt == T_LONG, "int or long only");
- return java_negate(v);
-}
-
// Some convenient bit shift operations that accepts a BasicType as the last
// argument. These avoid potential mistakes with overloaded functions only
// distinguished by lhs argument type.
#define JAVA_INTEGER_SHIFT_BASIC_TYPE(FUNC) \
inline jlong FUNC(jlong lhs, jint rhs, BasicType bt) { \
if (bt == T_INT) { \
- return FUNC(checked_cast(lhs), rhs); \
+ return FUNC((jint) lhs, rhs); \
} \
assert(bt == T_LONG, "unsupported basic type"); \
return FUNC(lhs, rhs); \
diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp
index e0cbb60c74422..0fbd8ed4259ac 100644
--- a/src/hotspot/share/utilities/vmError.cpp
+++ b/src/hotspot/share/utilities/vmError.cpp
@@ -60,7 +60,6 @@
#include "runtime/vm_version.hpp"
#include "runtime/vmOperations.hpp"
#include "runtime/vmThread.hpp"
-#include "sanitizers/address.hpp"
#include "sanitizers/ub.hpp"
#include "utilities/debug.hpp"
#include "utilities/decoder.hpp"
@@ -911,16 +910,7 @@ void VMError::report(outputStream* st, bool _verbose) {
STEP_IF("printing date and time", _verbose)
os::print_date_and_time(st, buf, sizeof(buf));
-#ifdef ADDRESS_SANITIZER
- STEP_IF("printing ASAN error information", _verbose && Asan::had_error())
- st->cr();
- st->print_cr("------------------ A S A N ----------------");
- st->cr();
- Asan::report(st);
- st->cr();
-#endif // ADDRESS_SANITIZER
-
- STEP_IF("printing thread", _verbose)
+ STEP_IF("printing thread", _verbose)
st->cr();
st->print_cr("--------------- T H R E A D ---------------");
st->cr();
@@ -2196,14 +2186,6 @@ void VMError::controlled_crash(int how) {
fatal("Force crash with a nested ThreadsListHandle.");
}
}
- case 18: {
- // Trigger an error that should cause ASAN to report a double free or use-after-free.
- // Please note that this is not 100% bullet-proof since it assumes that this block
- // is not immediately repurposed by some other thread after free.
- void* const p = os::malloc(4096, mtTest);
- os::free(p);
- os::free(p);
- }
default:
// If another number is given, give a generic crash.
fatal("Crashing with number %d", how);
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/AESCipher.java b/src/java.base/share/classes/com/sun/crypto/provider/AESCipher.java
index 329f367717a95..12359cba7d12a 100644
--- a/src/java.base/share/classes/com/sun/crypto/provider/AESCipher.java
+++ b/src/java.base/share/classes/com/sun/crypto/provider/AESCipher.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -49,7 +49,7 @@
*
* @author Valerie Peng
*
- * @see AES_Crypt
+ * @see AESCrypt
* @see CipherBlockChaining
* @see ElectronicCodeBook
* @see CipherFeedback
@@ -174,7 +174,7 @@ static void checkKeySize(Key key, int fixedKeySize)
* PKCS5Padding.
*/
protected AESCipher(int keySize) {
- core = new CipherCore(new AES_Crypt(), AESConstants.AES_BLOCK_SIZE);
+ core = new CipherCore(new AESCrypt(), AESConstants.AES_BLOCK_SIZE);
fixedKeySize = keySize;
}
@@ -504,7 +504,7 @@ protected int engineDoFinal(byte[] input, int inputOffset, int inputLen,
protected int engineGetKeySize(Key key) throws InvalidKeyException {
byte[] encoded = key.getEncoded();
Arrays.fill(encoded, (byte)0);
- if (!AES_Crypt.isKeySizeValid(encoded.length)) {
+ if (!AESCrypt.isKeySizeValid(encoded.length)) {
throw new InvalidKeyException("Invalid AES key length: " +
encoded.length + " bytes");
}
diff --git a/src/java.base/share/classes/com/sun/crypto/provider/AESCrypt.java b/src/java.base/share/classes/com/sun/crypto/provider/AESCrypt.java
new file mode 100644
index 0000000000000..9bbc8c1676485
--- /dev/null
+++ b/src/java.base/share/classes/com/sun/crypto/provider/AESCrypt.java
@@ -0,0 +1,1437 @@
+/*
+ * Copyright (c) 2002, 2022, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/* $Id: Rijndael.java,v 1.6 2000/02/10 01:31:41 gelderen Exp $
+ *
+ * Copyright (C) 1995-2000 The Cryptix Foundation Limited.
+ * All rights reserved.
+ *
+ * Use, modification, copying and distribution of this softwareas is subject
+ * the terms and conditions of the Cryptix General Licence. You should have
+ * received a copy of the Cryptix General Licence along with this library;
+ * if not, you can download a copy from http://www.cryptix.org/ .
+ */
+
+package com.sun.crypto.provider;
+
+import java.security.InvalidKeyException;
+import java.security.MessageDigest;
+import java.util.Arrays;
+
+import jdk.internal.vm.annotation.IntrinsicCandidate;
+
+/**
+ * Rijndael --pronounced Reindaal-- is a symmetric cipher with a 128-bit
+ * block size and variable key-size (128-, 192- and 256-bit).
+ *
*
*
*
@@ -327,6 +327,10 @@
* @spec https://standards.ieee.org/ieee/754/6210/
* IEEE Standard for Floating-Point Arithmetic
*
+ * @author Josh Bloch
+ * @author Mike Cowlishaw
+ * @author Joseph D. Darcy
+ * @author Sergey V. Kuksenko
* @since 1.1
*/
public class BigDecimal extends Number implements Comparable {
@@ -1775,6 +1779,7 @@ public BigDecimal divide(BigDecimal divisor, RoundingMode roundingMode) {
* terminating decimal expansion, including dividing by zero
* @return {@code this / divisor}
* @since 1.5
+ * @author Joseph D. Darcy
*/
public BigDecimal divide(BigDecimal divisor) {
/*
@@ -1943,6 +1948,7 @@ public BigDecimal divideToIntegralValue(BigDecimal divisor) {
* @throws ArithmeticException if {@code mc.precision} {@literal >} 0 and the result
* requires a precision of more than {@code mc.precision} digits.
* @since 1.5
+ * @author Joseph D. Darcy
*/
public BigDecimal divideToIntegralValue(BigDecimal divisor, MathContext mc) {
if (mc.precision == 0 || // exact result
@@ -2113,7 +2119,7 @@ public BigDecimal[] divideAndRemainder(BigDecimal divisor, MathContext mc) {
* with rounding according to the context settings.
*
*
The preferred scale of the returned result is equal to
- * {@code Math.ceilDiv(this.scale(), 2)}. The value of the returned result is
+ * {@code this.scale()/2}. The value of the returned result is
* always within one ulp of the exact decimal value for the
* precision in question. If the rounding mode is {@link
* RoundingMode#HALF_UP HALF_UP}, {@link RoundingMode#HALF_DOWN
@@ -2174,7 +2180,7 @@ public BigDecimal sqrt(MathContext mc) {
// The code below favors relative simplicity over checking
// for special cases that could run faster.
- final int preferredScale = Math.ceilDiv(this.scale, 2);
+ final int preferredScale = this.scale/2;
BigDecimal result;
if (mc.roundingMode == RoundingMode.UNNECESSARY || mc.precision == 0) { // Exact result requested
diff --git a/src/java.base/share/classes/java/math/MathContext.java b/src/java.base/share/classes/java/math/MathContext.java
index f80fcc3e076e2..d0c1cb4a5a9ee 100644
--- a/src/java.base/share/classes/java/math/MathContext.java
+++ b/src/java.base/share/classes/java/math/MathContext.java
@@ -51,6 +51,8 @@
* @spec https://standards.ieee.org/ieee/754/6210/
* IEEE Standard for Floating-Point Arithmetic
*
+ * @author Mike Cowlishaw
+ * @author Joseph D. Darcy
* @since 1.5
*/
diff --git a/src/java.base/share/classes/java/math/RoundingMode.java b/src/java.base/share/classes/java/math/RoundingMode.java
index 4188c781cab8a..e66a64e143f98 100644
--- a/src/java.base/share/classes/java/math/RoundingMode.java
+++ b/src/java.base/share/classes/java/math/RoundingMode.java
@@ -115,6 +115,9 @@
* IEEE Standard for Floating-Point Arithmetic
* @jls 15.4 Floating-point Expressions
*
+ * @author Josh Bloch
+ * @author Mike Cowlishaw
+ * @author Joseph D. Darcy
* @since 1.5
*/
@SuppressWarnings("deprecation") // Legacy rounding mode constants in BigDecimal
diff --git a/src/java.base/share/classes/java/nio/ByteOrder.java b/src/java.base/share/classes/java/nio/ByteOrder.java
index ab6876448bea2..96f2317b956d0 100644
--- a/src/java.base/share/classes/java/nio/ByteOrder.java
+++ b/src/java.base/share/classes/java/nio/ByteOrder.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,19 +35,28 @@
* @since 1.4
*/
-public enum ByteOrder {
- /**
- * Constant denoting little-endian byte order. In this order, the bytes of
- * a multibyte value are ordered from least significant to most
- * significant.
- */
- LITTLE_ENDIAN,
+public final class ByteOrder {
+
+ private final String name;
+
+ private ByteOrder(String name) {
+ this.name = name;
+ }
+
/**
* Constant denoting big-endian byte order. In this order, the bytes of a
* multibyte value are ordered from most significant to least significant.
*/
- BIG_ENDIAN;
+ public static final ByteOrder BIG_ENDIAN
+ = new ByteOrder("BIG_ENDIAN");
+ /**
+ * Constant denoting little-endian byte order. In this order, the bytes of
+ * a multibyte value are ordered from least significant to most
+ * significant.
+ */
+ public static final ByteOrder LITTLE_ENDIAN
+ = new ByteOrder("LITTLE_ENDIAN");
// Retrieve the native byte order. It's used early during bootstrap, and
// must be initialized after BIG_ENDIAN and LITTLE_ENDIAN.
@@ -69,4 +78,18 @@ public enum ByteOrder {
public static ByteOrder nativeOrder() {
return NATIVE_ORDER;
}
+
+ /**
+ * Constructs a string describing this object.
+ *
+ *
This method returns the string
+ * {@code "BIG_ENDIAN"} for {@link #BIG_ENDIAN} and
+ * {@code "LITTLE_ENDIAN"} for {@link #LITTLE_ENDIAN}.
+ *
+ * @return The specified string
+ */
+ public String toString() {
+ return name;
+ }
+
}
diff --git a/src/java.base/share/classes/java/nio/channels/GatheringByteChannel.java b/src/java.base/share/classes/java/nio/channels/GatheringByteChannel.java
index e2e97562dee79..4e3b0cf136d2a 100644
--- a/src/java.base/share/classes/java/nio/channels/GatheringByteChannel.java
+++ b/src/java.base/share/classes/java/nio/channels/GatheringByteChannel.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -76,14 +76,11 @@ public interface GatheringByteChannel
* the final position of each updated buffer, except the last updated
* buffer, is guaranteed to be equal to that buffer's limit.
*
- *
For many types of channels, a write operation will return only after
+ *
Unless otherwise specified, a write operation will return only after
* writing all of the r requested bytes. Some types of channels,
* depending upon their state, may write only some of the bytes or possibly
- * none at all. A socket channel in {@linkplain
- * SelectableChannel#isBlocking non-blocking mode}, for example, cannot
- * write any more bytes than are free in the socket's output buffer. The
- * write method may need to be invoked more than once to ensure that all
- * {@linkplain ByteBuffer#hasRemaining remaining} bytes are written.
+ * none at all. A socket channel in non-blocking mode, for example, cannot
+ * write any more bytes than are free in the socket's output buffer.
*
*
This method may be invoked at any time. If another thread has
* already initiated a write operation upon this channel, however, then an
diff --git a/src/java.base/share/classes/java/nio/channels/WritableByteChannel.java b/src/java.base/share/classes/java/nio/channels/WritableByteChannel.java
index 5284c72b37b00..ef8efa5037c42 100644
--- a/src/java.base/share/classes/java/nio/channels/WritableByteChannel.java
+++ b/src/java.base/share/classes/java/nio/channels/WritableByteChannel.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -65,14 +65,11 @@ public interface WritableByteChannel
* Upon return the buffer's position will be equal to
* p {@code +} n; its limit will not have changed.
*
- *
For many types of channels, a write operation will return only after
+ *
Unless otherwise specified, a write operation will return only after
* writing all of the r requested bytes. Some types of channels,
* depending upon their state, may write only some of the bytes or possibly
- * none at all. A socket channel in {@linkplain
- * SelectableChannel#isBlocking non-blocking mode}, for example, cannot
- * write any more bytes than are free in the socket's output buffer. The
- * write method may need to be invoked more than once to ensure that all
- * {@linkplain ByteBuffer#hasRemaining remaining} bytes are written.
+ * none at all. A socket channel in non-blocking mode, for example, cannot
+ * write any more bytes than are free in the socket's output buffer.
*
*
This method may be invoked at any time. If another thread has
* already initiated a write operation upon this channel, however, then an
diff --git a/src/java.base/share/classes/java/time/Duration.java b/src/java.base/share/classes/java/time/Duration.java
index 7b3289a1f5957..23577a8a63495 100644
--- a/src/java.base/share/classes/java/time/Duration.java
+++ b/src/java.base/share/classes/java/time/Duration.java
@@ -138,37 +138,6 @@ public final class Duration
* Constant for a duration of zero.
*/
public static final Duration ZERO = new Duration(0, 0);
- /**
- * The minimum supported {@code Duration}, which is {@link Long#MIN_VALUE}
- * seconds.
- *
- * @apiNote This constant represents the smallest possible instance of
- * {@code Duration}. Since {@code Duration} is directed, the smallest
- * possible duration is negative.
- *
- * The constant is intended to be used as a sentinel value or in tests.
- * Care should be taken when performing arithmetic on {@code MIN} as there
- * is a high risk that {@link ArithmeticException} or {@link DateTimeException}
- * will be thrown.
- *
- * @since 26
- */
- public static final Duration MIN = new Duration(Long.MIN_VALUE, 0);
- /**
- * The maximum supported {@code Duration}, which is {@link Long#MAX_VALUE}
- * seconds and {@code 999,999,999} nanoseconds.
- *
- * @apiNote This constant represents the largest possible instance of
- * {@code Duration}.
- *
- * The constant is intended to be used as a sentinel value or in tests.
- * Care should be taken when performing arithmetic on {@code MAX} as there
- * is a high risk that {@link ArithmeticException} or {@link DateTimeException}
- * will be thrown.
- *
- * @since 26
- */
- public static final Duration MAX = new Duration(Long.MAX_VALUE, 999_999_999);
/**
* Serialization version.
*/
diff --git a/src/java.base/share/classes/java/time/temporal/ChronoUnit.java b/src/java.base/share/classes/java/time/temporal/ChronoUnit.java
index 6e944b296daa8..8f94e061d4d48 100644
--- a/src/java.base/share/classes/java/time/temporal/ChronoUnit.java
+++ b/src/java.base/share/classes/java/time/temporal/ChronoUnit.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -184,9 +184,10 @@ public enum ChronoUnit implements TemporalUnit {
* Artificial unit that represents the concept of forever.
* This is primarily used with {@link TemporalField} to represent unbounded fields
* such as the year or era.
- * The estimated duration of this unit is artificially defined as {@link Duration#MAX}.
+ * The estimated duration of this unit is artificially defined as the largest duration
+ * supported by {@link Duration}.
*/
- FOREVER("Forever", Duration.MAX);
+ FOREVER("Forever", Duration.ofSeconds(Long.MAX_VALUE, 999_999_999));
private final String name;
private final Duration duration;
diff --git a/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java b/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java
index 1338f2fd8043d..7503c154ddbea 100644
--- a/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java
+++ b/src/java.base/share/classes/java/util/concurrent/CompletableFuture.java
@@ -1904,8 +1904,8 @@ private Object waitingGet(boolean interruptible) {
while ((r = result) == null) {
if (q == null) {
q = new Signaller(interruptible, 0L, 0L);
- if (Thread.currentThread() instanceof ForkJoinWorkerThread wt)
- ForkJoinPool.helpAsyncBlocker(wt.pool, q);
+ if (Thread.currentThread() instanceof ForkJoinWorkerThread)
+ ForkJoinPool.helpAsyncBlocker(defaultExecutor(), q);
}
else if (!queued)
queued = tryPushStack(q);
@@ -1950,8 +1950,8 @@ else if (nanos <= 0L)
break;
else if (q == null) {
q = new Signaller(true, nanos, deadline);
- if (Thread.currentThread() instanceof ForkJoinWorkerThread wt)
- ForkJoinPool.helpAsyncBlocker(wt.pool, q);
+ if (Thread.currentThread() instanceof ForkJoinWorkerThread)
+ ForkJoinPool.helpAsyncBlocker(defaultExecutor(), q);
}
else if (!queued)
queued = tryPushStack(q);
diff --git a/src/java.base/share/classes/jdk/internal/math/DoubleConsts.java b/src/java.base/share/classes/jdk/internal/math/DoubleConsts.java
index 168e99d4ef5ee..d3a271fdd0709 100644
--- a/src/java.base/share/classes/jdk/internal/math/DoubleConsts.java
+++ b/src/java.base/share/classes/jdk/internal/math/DoubleConsts.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,8 @@
/**
* This class contains additional constants documenting limits of the
* {@code double} type.
+ *
+ * @author Joseph D. Darcy
*/
public class DoubleConsts {
diff --git a/src/java.base/share/classes/jdk/internal/math/FloatConsts.java b/src/java.base/share/classes/jdk/internal/math/FloatConsts.java
index 2bd484e99f39d..fd304c7871a7d 100644
--- a/src/java.base/share/classes/jdk/internal/math/FloatConsts.java
+++ b/src/java.base/share/classes/jdk/internal/math/FloatConsts.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,8 @@
/**
* This class contains additional constants documenting limits of the
* {@code float} type.
+ *
+ * @author Joseph D. Darcy
*/
public class FloatConsts {
diff --git a/src/java.base/share/classes/sun/security/util/DerValue.java b/src/java.base/share/classes/sun/security/util/DerValue.java
index ec8b482b07dc3..19e7083180b2e 100644
--- a/src/java.base/share/classes/sun/security/util/DerValue.java
+++ b/src/java.base/share/classes/sun/security/util/DerValue.java
@@ -859,22 +859,6 @@ public String getUniversalString() throws IOException {
return readStringInternal(tag_UniversalString, new UTF_32BE());
}
- /**
- * Checks that the BMPString does not contain any surrogate characters,
- * which are outside the Basic Multilingual Plane.
- *
- * @throws IOException if illegal characters are detected
- */
- public void validateBMPString() throws IOException {
- String bmpString = getBMPString();
- for (int i = 0; i < bmpString.length(); i++) {
- if (Character.isSurrogate(bmpString.charAt(i))) {
- throw new IOException(
- "Illegal character in BMPString, index: " + i);
- }
- }
- }
-
/**
* Reads the ASN.1 NULL value
*/
diff --git a/src/java.base/share/classes/sun/security/x509/AVA.java b/src/java.base/share/classes/sun/security/x509/AVA.java
index 214ae71828846..915421c76f2f4 100644
--- a/src/java.base/share/classes/sun/security/x509/AVA.java
+++ b/src/java.base/share/classes/sun/security/x509/AVA.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,13 +28,10 @@
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.Reader;
-import java.nio.charset.Charset;
import java.text.Normalizer;
import java.util.*;
-import static java.nio.charset.StandardCharsets.ISO_8859_1;
import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.nio.charset.StandardCharsets.UTF_16BE;
import sun.security.util.*;
import sun.security.pkcs.PKCS9Attribute;
@@ -592,10 +589,6 @@ private static boolean trailingSpace(Reader in) throws IOException {
throw new IOException("AVA, extra bytes = "
+ derval.data.available());
}
-
- if (value.tag == DerValue.tag_BMPString) {
- value.validateBMPString();
- }
}
AVA(DerInputStream in) throws IOException {
@@ -720,8 +713,7 @@ public String toRFC2253String(Map oidMap) {
* NOTE: this implementation only emits DirectoryStrings of the
* types returned by isDerString().
*/
- String valStr =
- new String(value.getDataBytes(), getCharset(value, false));
+ String valStr = new String(value.getDataBytes(), UTF_8);
/*
* 2.4 (cont): If the UTF-8 string does not have any of the
@@ -840,8 +832,7 @@ public String toRFC2253CanonicalString() {
* NOTE: this implementation only emits DirectoryStrings of the
* types returned by isDerString().
*/
- String valStr =
- new String(value.getDataBytes(), getCharset(value, true));
+ String valStr = new String(value.getDataBytes(), UTF_8);
/*
* 2.4 (cont): If the UTF-8 string does not have any of the
@@ -936,39 +927,6 @@ private static boolean isDerString(DerValue value, boolean canonical) {
}
}
- /*
- * Returns the charset that should be used to decode each DN string type.
- *
- * This method ensures that multi-byte (UTF8String and BMPString) types
- * are decoded using the correct charset and the String forms represent
- * the correct characters. For 8-bit ASCII-based types (PrintableString
- * and IA5String), we return ISO_8859_1 rather than ASCII, so that the
- * complete range of characters can be represented, as many certificates
- * do not comply with the Internationalized Domain Name ACE format.
- *
- * NOTE: this method only supports DirectoryStrings of the types returned
- * by isDerString().
- */
- private static Charset getCharset(DerValue value, boolean canonical) {
- if (canonical) {
- return switch (value.tag) {
- case DerValue.tag_PrintableString -> ISO_8859_1;
- case DerValue.tag_UTF8String -> UTF_8;
- default -> throw new Error("unexpected tag: " + value.tag);
- };
- }
-
- return switch (value.tag) {
- case DerValue.tag_PrintableString,
- DerValue.tag_T61String,
- DerValue.tag_IA5String,
- DerValue.tag_GeneralString -> ISO_8859_1;
- case DerValue.tag_BMPString -> UTF_16BE;
- case DerValue.tag_UTF8String -> UTF_8;
- default -> throw new Error("unexpected tag: " + value.tag);
- };
- }
-
boolean hasRFC2253Keyword() {
return AVAKeyword.hasKeyword(oid, RFC2253);
}
diff --git a/src/java.base/share/classes/sun/security/x509/AlgorithmId.java b/src/java.base/share/classes/sun/security/x509/AlgorithmId.java
index 8d2c761a011cf..7d525a9add7bf 100644
--- a/src/java.base/share/classes/sun/security/x509/AlgorithmId.java
+++ b/src/java.base/share/classes/sun/security/x509/AlgorithmId.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1996, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -127,35 +127,10 @@ public AlgorithmId(ObjectIdentifier oid, AlgorithmParameters algparams) {
public AlgorithmId(ObjectIdentifier oid, DerValue params)
throws IOException {
this.algid = oid;
-
- if (params == null) {
- this.encodedParams = null;
- this.algParams = null;
- return;
- }
-
- /*
- * If the parameters field explicitly contains an ASN.1 NULL, treat it as
- * "no parameters" rather than storing a literal NULL encoding.
- *
- * This canonicalization ensures consistent encoding/decoding behavior:
- * - Algorithms that omit parameters and those that encode explicit NULL
- * are treated equivalently (encodedParams == null).
- */
- if (params.tag == DerValue.tag_Null) {
- if (params.length() != 0) {
- throw new IOException("Invalid ASN.1 NULL in AlgorithmId parameters: " +
- "non-zero length");
- }
- // Canonicalize to "no parameters" representation for consistency
- this.encodedParams = null;
- this.algParams = null;
- return;
+ if (params != null) {
+ encodedParams = params.toByteArray();
+ decodeParams();
}
-
- // Normal case: non-NULL params -> store and decode
- this.encodedParams = params.toByteArray();
- decodeParams();
}
protected void decodeParams() throws IOException {
@@ -188,10 +163,38 @@ public void encode(DerOutputStream out) {
bytes.putOID(algid);
if (encodedParams == null) {
- if (OIDS_REQUIRING_NULL.contains(algid.toString())) {
+ // MessageDigest algorithms usually have a NULL parameters even
+ // if most RFCs suggested absent.
+ // RSA key and signature algorithms requires the NULL parameters
+ // to be present, see A.1 and A.2.4 of RFC 8017.
+ if (algid.equals(RSAEncryption_oid)
+ || algid.equals(MD2_oid)
+ || algid.equals(MD5_oid)
+ || algid.equals(SHA_oid)
+ || algid.equals(SHA224_oid)
+ || algid.equals(SHA256_oid)
+ || algid.equals(SHA384_oid)
+ || algid.equals(SHA512_oid)
+ || algid.equals(SHA512_224_oid)
+ || algid.equals(SHA512_256_oid)
+ || algid.equals(SHA3_224_oid)
+ || algid.equals(SHA3_256_oid)
+ || algid.equals(SHA3_384_oid)
+ || algid.equals(SHA3_512_oid)
+ || algid.equals(SHA1withRSA_oid)
+ || algid.equals(SHA224withRSA_oid)
+ || algid.equals(SHA256withRSA_oid)
+ || algid.equals(SHA384withRSA_oid)
+ || algid.equals(SHA512withRSA_oid)
+ || algid.equals(SHA512$224withRSA_oid)
+ || algid.equals(SHA512$256withRSA_oid)
+ || algid.equals(MD2withRSA_oid)
+ || algid.equals(MD5withRSA_oid)
+ || algid.equals(SHA3_224withRSA_oid)
+ || algid.equals(SHA3_256withRSA_oid)
+ || algid.equals(SHA3_384withRSA_oid)
+ || algid.equals(SHA3_512withRSA_oid)) {
bytes.putNull();
- } else {
- // Parameters omitted
}
} else {
bytes.writeBytes(encodedParams);
@@ -643,54 +646,30 @@ private static ConcurrentHashMap collectOIDAliases() {
public static final ObjectIdentifier MGF1_oid =
ObjectIdentifier.of(KnownOIDs.MGF1);
- /* Set of OIDs that must explicitly encode a NULL parameter in AlgorithmIdentifier.
- * References:
- - RFC 8017 (PKCS #1) §A.1, §A.2.4: RSA key and signature algorithms
- - RFC 9879 (HMAC) §4: HMAC algorithm identifiers
- - RFC 9688 (HMAC with SHA-3) §4.3: HMAC-SHA3 algorithms MUST omit parameters
- */
- private static final Set OIDS_REQUIRING_NULL = Set.of(
- // MessageDigest algorithms usually have a NULL parameters even
- // if most RFCs suggested absent.
- KnownOIDs.MD2.value(),
- KnownOIDs.MD5.value(),
- KnownOIDs.SHA_1.value(),
- KnownOIDs.SHA_224.value(),
- KnownOIDs.SHA_256.value(),
- KnownOIDs.SHA_384.value(),
- KnownOIDs.SHA_512.value(),
- KnownOIDs.SHA_512$224.value(),
- KnownOIDs.SHA_512$256.value(),
- KnownOIDs.SHA3_224.value(),
- KnownOIDs.SHA3_256.value(),
- KnownOIDs.SHA3_384.value(),
- KnownOIDs.SHA3_512.value(),
-
- //--- RSA key and signature algorithms (RFC 8017 §A.1, §A.2.4)
- KnownOIDs.RSA.value(),
- KnownOIDs.SHA1withRSA.value(),
- KnownOIDs.SHA224withRSA.value(),
- KnownOIDs.SHA256withRSA.value(),
- KnownOIDs.SHA384withRSA.value(),
- KnownOIDs.SHA512withRSA.value(),
- KnownOIDs.SHA512$224withRSA.value(),
- KnownOIDs.SHA512$256withRSA.value(),
- KnownOIDs.MD2withRSA.value(),
- KnownOIDs.MD5withRSA.value(),
- KnownOIDs.SHA3_224withRSA.value(),
- KnownOIDs.SHA3_256withRSA.value(),
- KnownOIDs.SHA3_384withRSA.value(),
- KnownOIDs.SHA3_512withRSA.value(),
-
- // HMACs per RFC 9879 (Section 4): these require explicit NULL parameters
- // Note: HMAC-SHA3 algorithms (RFC 9688 §4.3) MUST omit parameters,
- // so they are intentionally excluded from this list.
- KnownOIDs.HmacSHA1.value(),
- KnownOIDs.HmacSHA224.value(),
- KnownOIDs.HmacSHA256.value(),
- KnownOIDs.HmacSHA384.value(),
- KnownOIDs.HmacSHA512.value(),
- KnownOIDs.HmacSHA512$224.value(),
- KnownOIDs.HmacSHA512$256.value()
- );
+ public static final ObjectIdentifier SHA1withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA1withRSA);
+ public static final ObjectIdentifier SHA224withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA224withRSA);
+ public static final ObjectIdentifier SHA256withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA256withRSA);
+ public static final ObjectIdentifier SHA384withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA384withRSA);
+ public static final ObjectIdentifier SHA512withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA512withRSA);
+ public static final ObjectIdentifier SHA512$224withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA512$224withRSA);
+ public static final ObjectIdentifier SHA512$256withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA512$256withRSA);
+ public static final ObjectIdentifier MD2withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.MD2withRSA);
+ public static final ObjectIdentifier MD5withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.MD5withRSA);
+ public static final ObjectIdentifier SHA3_224withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA3_224withRSA);
+ public static final ObjectIdentifier SHA3_256withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA3_256withRSA);
+ public static final ObjectIdentifier SHA3_384withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA3_384withRSA);
+ public static final ObjectIdentifier SHA3_512withRSA_oid =
+ ObjectIdentifier.of(KnownOIDs.SHA3_512withRSA);
}
diff --git a/src/java.base/share/legal/aes.md b/src/java.base/share/legal/aes.md
new file mode 100644
index 0000000000000..6d0ee2e2bb4a5
--- /dev/null
+++ b/src/java.base/share/legal/aes.md
@@ -0,0 +1,36 @@
+## Cryptix AES v3.2.0
+
+### Cryptix General License
+
+
+Cryptix General License
+
+Copyright (c) 1995-2005 The Cryptix Foundation Limited.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ 1. Redistributions of source code must retain the copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE CRYPTIX FOUNDATION LIMITED AND
+CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE CRYPTIX FOUNDATION LIMITED OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
diff --git a/src/java.base/share/native/libverify/check_code.c b/src/java.base/share/native/libverify/check_code.c
index 32df102dcb3a9..7266ac8f93c26 100644
--- a/src/java.base/share/native/libverify/check_code.c
+++ b/src/java.base/share/native/libverify/check_code.c
@@ -395,8 +395,7 @@ static jboolean is_superclass(context_type *, fullinfo_type);
static void initialize_exception_table(context_type *);
static int instruction_length(unsigned char *iptr, unsigned char *end);
-static jboolean isLegalOffset(context_type *, int bci, int offset);
-static jboolean isLegalTarget(context_type *, int target);
+static jboolean isLegalTarget(context_type *, int offset);
static void verify_constant_pool_type(context_type *, int, unsigned);
static void initialize_dataflow(context_type *);
@@ -1155,9 +1154,9 @@ verify_opcode_operands(context_type *context, unsigned int inumber, int offset)
case JVM_OPC_goto: {
/* Set the ->operand to be the instruction number of the target. */
int jump = (((signed char)(code[offset+1])) << 8) + code[offset+2];
- if (!isLegalOffset(context, offset, jump))
- CCerror(context, "Illegal target of jump or branch");
int target = offset + jump;
+ if (!isLegalTarget(context, target))
+ CCerror(context, "Illegal target of jump or branch");
this_idata->operand.i = code_data[target];
break;
}
@@ -1171,9 +1170,9 @@ verify_opcode_operands(context_type *context, unsigned int inumber, int offset)
int jump = (((signed char)(code[offset+1])) << 24) +
(code[offset+2] << 16) + (code[offset+3] << 8) +
(code[offset + 4]);
- if (!isLegalOffset(context, offset, jump))
- CCerror(context, "Illegal target of jump or branch");
int target = offset + jump;
+ if (!isLegalTarget(context, target))
+ CCerror(context, "Illegal target of jump or branch");
this_idata->operand.i = code_data[target];
break;
}
@@ -1212,16 +1211,13 @@ verify_opcode_operands(context_type *context, unsigned int inumber, int offset)
}
}
saved_operand = NEW(int, keys + 2);
- int jump = _ck_ntohl(lpc[0]);
- if (!isLegalOffset(context, offset, jump))
+ if (!isLegalTarget(context, offset + _ck_ntohl(lpc[0])))
CCerror(context, "Illegal default target in switch");
- int target = offset + jump;
- saved_operand[keys + 1] = code_data[target];
+ saved_operand[keys + 1] = code_data[offset + _ck_ntohl(lpc[0])];
for (k = keys, lptr = &lpc[3]; --k >= 0; lptr += delta) {
- jump = _ck_ntohl(lptr[0]);
- if (!isLegalOffset(context, offset, jump))
+ int target = offset + _ck_ntohl(lptr[0]);
+ if (!isLegalTarget(context, target))
CCerror(context, "Illegal branch in tableswitch");
- target = offset + jump;
saved_operand[k + 1] = code_data[target];
}
saved_operand[0] = keys + 1; /* number of successors */
@@ -1750,24 +1746,11 @@ static int instruction_length(unsigned char *iptr, unsigned char *end)
/* Given the target of a branch, make sure that it's a legal target. */
static jboolean
-isLegalTarget(context_type *context, int target)
-{
- int code_length = context->code_length;
- int *code_data = context->code_data;
- return (target >= 0 && target < code_length && code_data[target] >= 0);
-}
-
-/* Given a bci and offset, make sure the offset is valid and the target is legal */
-static jboolean
-isLegalOffset(context_type *context, int bci, int offset)
+isLegalTarget(context_type *context, int offset)
{
int code_length = context->code_length;
int *code_data = context->code_data;
- int max_offset = 65535; // JVMS 4.11
- int min_offset = -65535;
- if (offset < min_offset || offset > max_offset) return JNI_FALSE;
- int target = bci + offset;
- return (target >= 0 && target < code_length && code_data[target] >= 0);
+ return (offset >= 0 && offset < code_length && code_data[offset] >= 0);
}
diff --git a/src/java.base/windows/classes/java/lang/ProcessImpl.java b/src/java.base/windows/classes/java/lang/ProcessImpl.java
index 78180cce678a7..7f7c1e75013c0 100644
--- a/src/java.base/windows/classes/java/lang/ProcessImpl.java
+++ b/src/java.base/windows/classes/java/lang/ProcessImpl.java
@@ -199,6 +199,7 @@ private static String[] getTokensFromCommand(String command) {
}
private static final int VERIFICATION_CMD_BAT = 0;
+ private static final int VERIFICATION_WIN32 = 1;
private static final int VERIFICATION_WIN32_SAFE = 2; // inside quotes not allowed
private static final int VERIFICATION_LEGACY = 3;
// See Command shell overview for documentation of special characters.
@@ -383,6 +384,12 @@ private boolean isExe(String executablePath) {
return (upName.endsWith(".EXE") || upName.indexOf('.') < 0);
}
+ // Old version that can be bypassed
+ private boolean isShellFile(String executablePath) {
+ String upPath = executablePath.toUpperCase(Locale.ROOT);
+ return (upPath.endsWith(".CMD") || upPath.endsWith(".BAT"));
+ }
+
private String quoteString(String arg) {
StringBuilder argbuf = new StringBuilder(arg.length() + 2);
return argbuf.append('"').append(arg).append('"').toString();
@@ -465,10 +472,12 @@ private ProcessImpl(String cmd[],
// Quotation protects from interpretation of the [path] argument as
// start of longer path with spaces. Quotation has no influence to
// [.exe] extension heuristic.
- boolean isShell = !isExe(executablePath);
+ boolean isShell = allowAmbiguousCommands ? isShellFile(executablePath)
+ : !isExe(executablePath);
cmdstr = createCommandLine(
// We need the extended verification procedures
- isShell ? VERIFICATION_CMD_BAT : VERIFICATION_WIN32_SAFE,
+ isShell ? VERIFICATION_CMD_BAT
+ : (allowAmbiguousCommands ? VERIFICATION_WIN32 : VERIFICATION_WIN32_SAFE),
quoteString(executablePath),
cmd);
}
diff --git a/src/java.desktop/share/classes/java/awt/GridBagConstraints.java b/src/java.desktop/share/classes/java/awt/GridBagConstraints.java
index 0008f5ac7807d..30f8bc4bf50a1 100644
--- a/src/java.desktop/share/classes/java/awt/GridBagConstraints.java
+++ b/src/java.desktop/share/classes/java/awt/GridBagConstraints.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1995, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -575,7 +575,7 @@ public class GridBagConstraints implements Cloneable, java.io.Serializable {
private static final long serialVersionUID = -1000070633030801713L;
/**
- * Creates a {@code GridBagConstraints} object with
+ * Creates a {@code GridBagConstraint} object with
* all of its fields set to their default value.
*/
public GridBagConstraints () {
diff --git a/src/java.desktop/share/classes/java/awt/font/NumericShaper.java b/src/java.desktop/share/classes/java/awt/font/NumericShaper.java
index 99b59cc2e0ec3..ae50703611264 100644
--- a/src/java.desktop/share/classes/java/awt/font/NumericShaper.java
+++ b/src/java.desktop/share/classes/java/awt/font/NumericShaper.java
@@ -346,19 +346,6 @@ private static Range indexToRange(int index) {
return index < NUM_KEYS ? Range.values()[index] : null;
}
- private static int toRangeHash(Set ranges) {
- int m = 0;
- for (Range range : ranges) {
- int index = range.ordinal();
- if (index < NUM_KEYS) {
- m |= 1 << index;
- } else {
- m |= (1 << NUM_KEYS) + index;
- }
- }
- return m;
- }
-
private static int toRangeMask(Set ranges) {
int m = 0;
for (Range range : ranges) {
@@ -589,7 +576,7 @@ private boolean inRange(int c) {
// and a linear probe is ok.
private static int ctCache = 0;
- private static final int ctCacheLimit = contexts.length - 2;
+ private static int ctCacheLimit = contexts.length - 2;
// warning, synchronize access to this as it modifies state
private static int getContextKey(char c) {
@@ -1523,9 +1510,6 @@ public static NumericShaper getContextualShaper(Set ranges,
private NumericShaper(int key, int mask) {
this.key = key;
this.mask = mask;
- if (((this.mask & ARABIC) != 0) && ((this.mask & EASTERN_ARABIC) != 0)) {
- this.mask &= ~ARABIC;
- }
}
private NumericShaper(Range defaultContext, Set ranges) {
@@ -1811,7 +1795,15 @@ private void shapeContextually(char[] text, int start, int count, Range ctxKey)
* @see java.lang.Object#hashCode
*/
public int hashCode() {
- return (rangeSet != null) ? Range.toRangeHash(rangeSet) : (mask & ~CONTEXTUAL_MASK);
+ int hash = mask;
+ if (rangeSet != null) {
+ // Use the CONTEXTUAL_MASK bit only for the enum-based
+ // NumericShaper. A deserialized NumericShaper might have
+ // bit masks.
+ hash &= CONTEXTUAL_MASK;
+ hash ^= rangeSet.hashCode();
+ }
+ return hash;
}
/**
diff --git a/src/java.desktop/share/classes/java/awt/image/BandedSampleModel.java b/src/java.desktop/share/classes/java/awt/image/BandedSampleModel.java
index bad9abc61308a..bd955e35870ae 100644
--- a/src/java.desktop/share/classes/java/awt/image/BandedSampleModel.java
+++ b/src/java.desktop/share/classes/java/awt/image/BandedSampleModel.java
@@ -141,9 +141,12 @@ public BandedSampleModel(int dataType,
* @param h the height of the resulting {@code BandedSampleModel}
* @return a new {@code BandedSampleModel} with the specified
* width and height.
- * @throws IllegalArgumentException if the product of {@code w}
- * and {@code h} is greater than {@code Integer.MAX_VALUE}
- * or {@code w} or {@code h} is not greater than 0.
+ * @throws IllegalArgumentException if {@code w} or
+ * {@code h} equals either
+ * {@code Integer.MAX_VALUE} or
+ * {@code Integer.MIN_VALUE}
+ * @throws IllegalArgumentException if {@code dataType} is not
+ * one of the supported data types
*/
public SampleModel createCompatibleSampleModel(int w, int h) {
int[] bandOffs;
@@ -169,8 +172,8 @@ public SampleModel createCompatibleSampleModel(int w, int h) {
* of the original BandedSampleModel/DataBuffer combination.
* @throws RasterFormatException if the number of bands is greater than
* the number of banks in this sample model.
- * @throws IllegalArgumentException if the number of bands is not greater than 0
- * @throws ArrayIndexOutOfBoundsException if any of the bank indices is out of bounds
+ * @throws IllegalArgumentException if {@code dataType} is not
+ * one of the supported data types
*/
public SampleModel createSubsetSampleModel(int[] bands) {
if (bands.length > bankIndices.length)
diff --git a/src/java.desktop/share/classes/java/awt/image/renderable/ContextualRenderedImageFactory.java b/src/java.desktop/share/classes/java/awt/image/renderable/ContextualRenderedImageFactory.java
index 8df3895a0216a..94a2aa14bf5b2 100644
--- a/src/java.desktop/share/classes/java/awt/image/renderable/ContextualRenderedImageFactory.java
+++ b/src/java.desktop/share/classes/java/awt/image/renderable/ContextualRenderedImageFactory.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
* ContextualRenderedImageFactory provides an interface for the
* functionality that may differ between instances of
* RenderableImageOp. Thus different operations on RenderableImages
- * may be performed by a single class such as RenderableImageOp through
+ * may be performed by a single class such as RenderedImageOp through
* the use of multiple instances of ContextualRenderedImageFactory.
* The name ContextualRenderedImageFactory is commonly shortened to
* "CRIF."
diff --git a/src/java.desktop/share/classes/java/awt/image/renderable/RenderableImageOp.java b/src/java.desktop/share/classes/java/awt/image/renderable/RenderableImageOp.java
index f2f0d458ba1c5..19bf3b3932334 100644
--- a/src/java.desktop/share/classes/java/awt/image/renderable/RenderableImageOp.java
+++ b/src/java.desktop/share/classes/java/awt/image/renderable/RenderableImageOp.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -58,7 +58,7 @@ public class RenderableImageOp implements RenderableImage {
/**
- * Constructs a {@code RenderableImageOp} given a
+ * Constructs a RenderedImageOp given a
* ContextualRenderedImageFactory object, and
* a ParameterBlock containing RenderableImage sources and other
* parameters. Any RenderedImage sources referenced by the
diff --git a/src/java.desktop/share/classes/javax/swing/ScrollPaneLayout.java b/src/java.desktop/share/classes/javax/swing/ScrollPaneLayout.java
index 50cb25fe4f828..0b8d8576f1469 100644
--- a/src/java.desktop/share/classes/javax/swing/ScrollPaneLayout.java
+++ b/src/java.desktop/share/classes/javax/swing/ScrollPaneLayout.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,8 +38,8 @@
/**
- * The layout manager used by {@code JScrollPane}.
- * {@code ScrollPaneLayout} is
+ * The layout manager used by JScrollPane.
+ * JScrollPaneLayout is
* responsible for nine components: a viewport, two scrollbars,
* a row header, a column header, and four "corner" components.
*
diff --git a/src/java.desktop/share/classes/sun/java2d/marlin/Curve.java b/src/java.desktop/share/classes/sun/java2d/marlin/Curve.java
index 9d2c8dc2a72aa..2ce0cd4672cd3 100644
--- a/src/java.desktop/share/classes/sun/java2d/marlin/Curve.java
+++ b/src/java.desktop/share/classes/sun/java2d/marlin/Curve.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -144,9 +144,7 @@ int yPoints(final double[] ts, final int off, final double y)
// finds points where the first and second derivative are
// perpendicular. This happens when g(t) = f'(t)*f''(t) == 0 (where
// * is a dot product). Unfortunately, we have to solve a cubic.
- private int perpendiculardfddf(final double[] pts, final int off,
- final double A, final double B)
- {
+ private int perpendiculardfddf(final double[] pts, final int off) {
assert pts.length >= off + 4;
// these are the coefficients of some multiple of g(t) (not g(t),
@@ -157,7 +155,7 @@ private int perpendiculardfddf(final double[] pts, final int off,
final double c = 2.0d * (dax * cx + day * cy) + dbx * dbx + dby * dby;
final double d = dbx * cx + dby * cy;
- return Helpers.cubicRootsInAB(a, b, c, d, pts, off, A, B);
+ return Helpers.cubicRootsInAB(a, b, c, d, pts, off, 0.0d, 1.0d);
}
// Tries to find the roots of the function ROC(t)-w in [0, 1). It uses
@@ -173,43 +171,35 @@ private int perpendiculardfddf(final double[] pts, final int off,
// at most 4 sub-intervals of (0,1). ROC has asymptotes at inflection
// points, so roc-w can have at least 6 roots. This shouldn't be a
// problem for what we're trying to do (draw a nice looking curve).
- int rootsOfROCMinusW(final double[] roots, final int off, final double w2,
- final double A, final double B)
- {
+ int rootsOfROCMinusW(final double[] roots, final int off, final double w2, final double err) {
// no OOB exception, because by now off<=6, and roots.length >= 10
assert off <= 6 && roots.length >= 10;
int ret = off;
- final int end = off + perpendiculardfddf(roots, off, A, B);
- Helpers.isort(roots, off, end);
+ final int end = off + perpendiculardfddf(roots, off);
roots[end] = 1.0d; // always check interval end points
- double t0 = 0.0d;
- double ft0 = eliminateInf(ROCsq(t0) - w2);
- double t1, ft1;
+ double t0 = 0.0d, ft0 = ROCsq(t0) - w2;
for (int i = off; i <= end; i++) {
- t1 = roots[i];
- ft1 = eliminateInf(ROCsq(t1) - w2);
+ double t1 = roots[i], ft1 = ROCsq(t1) - w2;
if (ft0 == 0.0d) {
roots[ret++] = t0;
} else if (ft1 * ft0 < 0.0d) { // have opposite signs
// (ROC(t)^2 == w^2) == (ROC(t) == w) is true because
// ROC(t) >= 0 for all t.
- roots[ret++] = falsePositionROCsqMinusX(t0, t1, ft0, ft1, w2, A); // A = err
+ roots[ret++] = falsePositionROCsqMinusX(t0, t1, w2, err);
}
t0 = t1;
ft0 = ft1;
}
+
return ret - off;
}
- private final static double MAX_ROC_SQ = 1e20;
-
- private static double eliminateInf(final double x2) {
- // limit the value of x to avoid numerical problems (smaller step):
- // must handle NaN and +Infinity:
- return (x2 <= MAX_ROC_SQ) ? x2 : MAX_ROC_SQ;
+ private static double eliminateInf(final double x) {
+ return (x == Double.POSITIVE_INFINITY ? Double.MAX_VALUE :
+ (x == Double.NEGATIVE_INFINITY ? Double.MIN_VALUE : x));
}
// A slight modification of the false position algorithm on wikipedia.
@@ -220,18 +210,17 @@ private static double eliminateInf(final double x2) {
// and turn out. Same goes for the newton's method
// algorithm in Helpers.java
private double falsePositionROCsqMinusX(final double t0, final double t1,
- final double ft0, final double ft1,
final double w2, final double err)
{
final int iterLimit = 100;
int side = 0;
- double s = t0, fs = eliminateInf(ft0);
- double t = t1, ft = eliminateInf(ft1);
+ double t = t1, ft = eliminateInf(ROCsq(t) - w2);
+ double s = t0, fs = eliminateInf(ROCsq(s) - w2);
double r = s, fr;
- for (int i = 0; i < iterLimit && Math.abs(t - s) > err; i++) {
+ for (int i = 0; i < iterLimit && Math.abs(t - s) > err * Math.abs(t + s); i++) {
r = (fs * t - ft * s) / (fs - ft);
- fr = eliminateInf(ROCsq(r) - w2);
+ fr = ROCsq(r) - w2;
if (sameSign(fr, ft)) {
ft = fr; t = r;
if (side < 0) {
@@ -252,7 +241,7 @@ private double falsePositionROCsqMinusX(final double t0, final double t1,
break;
}
}
- return (Math.abs(ft) <= Math.abs(fs)) ? t : s;
+ return r;
}
private static boolean sameSign(final double x, final double y) {
@@ -267,9 +256,9 @@ private double ROCsq(final double t) {
final double dy = t * (t * day + dby) + cy;
final double ddx = 2.0d * dax * t + dbx;
final double ddy = 2.0d * day * t + dby;
- final double dx2dy2 = dx * dx + dy * dy; // positive
- final double dxddyddxdy = dx * ddy - dy * ddx;
- // may return +Infinity if dxddyddxdy = 0 or NaN if 0/0:
- return (dx2dy2 * dx2dy2 * dx2dy2) / (dxddyddxdy * dxddyddxdy); // both positive
+ final double dx2dy2 = dx * dx + dy * dy;
+ final double ddx2ddy2 = ddx * ddx + ddy * ddy;
+ final double ddxdxddydy = ddx * dx + ddy * dy;
+ return dx2dy2 * ((dx2dy2 * dx2dy2) / (dx2dy2 * ddx2ddy2 - ddxdxddydy * ddxdxddydy));
}
}
diff --git a/src/java.desktop/share/classes/sun/java2d/marlin/DMarlinRenderingEngine.java b/src/java.desktop/share/classes/sun/java2d/marlin/DMarlinRenderingEngine.java
index f829872a8a843..66eb9334e8630 100644
--- a/src/java.desktop/share/classes/sun/java2d/marlin/DMarlinRenderingEngine.java
+++ b/src/java.desktop/share/classes/sun/java2d/marlin/DMarlinRenderingEngine.java
@@ -564,7 +564,7 @@ void strokeTo(final RendererContext rdrCtx,
}
private static boolean nearZero(final double num) {
- return Math.abs(num) < 2.0d * Helpers.ulp(num);
+ return Math.abs(num) < 2.0d * Math.ulp(num);
}
abstract static class NormalizingPathIterator implements PathIterator {
diff --git a/src/java.desktop/share/classes/sun/java2d/marlin/Helpers.java b/src/java.desktop/share/classes/sun/java2d/marlin/Helpers.java
index 926533cdb2bd6..0aed05ab506fb 100644
--- a/src/java.desktop/share/classes/sun/java2d/marlin/Helpers.java
+++ b/src/java.desktop/share/classes/sun/java2d/marlin/Helpers.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,19 +31,12 @@
final class Helpers implements MarlinConst {
- private final static double T_ERR = 1e-4;
- private final static double T_A = T_ERR;
- private final static double T_B = 1.0 - T_ERR;
-
private static final double EPS = 1e-9d;
private Helpers() {
throw new Error("This is a non instantiable class");
}
- /** use lower precision like former Pisces and Marlin (float-precision) */
- static double ulp(final double value) { return Math.ulp((float)value); }
-
static boolean within(final double x, final double y) {
return within(x, y, EPS);
}
@@ -329,10 +322,10 @@ static int findSubdivPoints(final Curve c, final double[] pts,
// now we must subdivide at points where one of the offset curves will have
// a cusp. This happens at ts where the radius of curvature is equal to w.
- ret += c.rootsOfROCMinusW(ts, ret, w2, T_A, T_B);
+ ret += c.rootsOfROCMinusW(ts, ret, w2, 0.0001d);
- ret = filterOutNotInAB(ts, 0, ret, T_A, T_B);
- isort(ts, 0, ret);
+ ret = filterOutNotInAB(ts, 0, ret, 0.0001d, 0.9999d);
+ isort(ts, ret);
return ret;
}
@@ -361,7 +354,7 @@ static int findClipPoints(final Curve curve, final double[] pts,
if ((outCodeOR & OUTCODE_BOTTOM) != 0) {
ret += curve.yPoints(ts, ret, clipRect[1]);
}
- isort(ts, 0, ret);
+ isort(ts, ret);
return ret;
}
@@ -381,11 +374,11 @@ static void subdivide(final double[] src,
}
}
- static void isort(final double[] a, final int off, final int len) {
- for (int i = off + 1, j; i < len; i++) {
+ static void isort(final double[] a, final int len) {
+ for (int i = 1, j; i < len; i++) {
final double ai = a[i];
j = i - 1;
- for (; j >= off && a[j] > ai; j--) {
+ for (; j >= 0 && a[j] > ai; j--) {
a[j + 1] = a[j];
}
a[j + 1] = ai;
diff --git a/src/java.desktop/share/classes/sun/java2d/marlin/Stroker.java b/src/java.desktop/share/classes/sun/java2d/marlin/Stroker.java
index 1c257bc13d972..59f93ed7d6db7 100644
--- a/src/java.desktop/share/classes/sun/java2d/marlin/Stroker.java
+++ b/src/java.desktop/share/classes/sun/java2d/marlin/Stroker.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -886,8 +886,8 @@ private int computeOffsetCubic(final double[] pts, final int off,
// if p1 == p2 && p3 == p4: draw line from p1->p4, unless p1 == p4,
// in which case ignore if p1 == p2
- final boolean p1eqp2 = Helpers.withinD(dx1, dy1, 6.0d * Helpers.ulp(y2));
- final boolean p3eqp4 = Helpers.withinD(dx4, dy4, 6.0d * Helpers.ulp(y4));
+ final boolean p1eqp2 = Helpers.withinD(dx1, dy1, 6.0d * Math.ulp(y2));
+ final boolean p3eqp4 = Helpers.withinD(dx4, dy4, 6.0d * Math.ulp(y4));
if (p1eqp2 && p3eqp4) {
return getLineOffsets(x1, y1, x4, y4, leftOff, rightOff);
@@ -905,7 +905,7 @@ private int computeOffsetCubic(final double[] pts, final int off,
final double l1sq = dx1 * dx1 + dy1 * dy1;
final double l4sq = dx4 * dx4 + dy4 * dy4;
- if (Helpers.within(dotsq, l1sq * l4sq, 4.0d * Helpers.ulp(dotsq))) {
+ if (Helpers.within(dotsq, l1sq * l4sq, 4.0d * Math.ulp(dotsq))) {
return getLineOffsets(x1, y1, x4, y4, leftOff, rightOff);
}
@@ -1078,8 +1078,8 @@ private int computeOffsetQuad(final double[] pts, final int off,
// equal if they're very close to each other.
// if p1 == p2 or p2 == p3: draw line from p1->p3
- final boolean p1eqp2 = Helpers.withinD(dx12, dy12, 6.0d * Helpers.ulp(y2));
- final boolean p2eqp3 = Helpers.withinD(dx23, dy23, 6.0d * Helpers.ulp(y3));
+ final boolean p1eqp2 = Helpers.withinD(dx12, dy12, 6.0d * Math.ulp(y2));
+ final boolean p2eqp3 = Helpers.withinD(dx23, dy23, 6.0d * Math.ulp(y3));
if (p1eqp2 || p2eqp3) {
return getLineOffsets(x1, y1, x3, y3, leftOff, rightOff);
@@ -1091,7 +1091,7 @@ private int computeOffsetQuad(final double[] pts, final int off,
final double l1sq = dx12 * dx12 + dy12 * dy12;
final double l3sq = dx23 * dx23 + dy23 * dy23;
- if (Helpers.within(dotsq, l1sq * l3sq, 4.0d * Helpers.ulp(dotsq))) {
+ if (Helpers.within(dotsq, l1sq * l3sq, 4.0d * Math.ulp(dotsq))) {
return getLineOffsets(x1, y1, x3, y3, leftOff, rightOff);
}
diff --git a/src/java.desktop/share/conf/psfontj2d.properties b/src/java.desktop/share/conf/psfontj2d.properties
index 8030a82bc4f9f..9efe88644283e 100644
--- a/src/java.desktop/share/conf/psfontj2d.properties
+++ b/src/java.desktop/share/conf/psfontj2d.properties
@@ -59,6 +59,7 @@ avantgarde_book_oblique=avantgarde_book_oblique
avantgarde_demi_oblique=avantgarde_demi_oblique
#
itcavantgarde=avantgarde_book
+itcavantgarde=avantgarde_book
itcavantgarde_demi=avantgarde_demi
itcavantgarde_oblique=avantgarde_book_oblique
itcavantgarde_demi_oblique=avantgarde_demi_oblique
diff --git a/src/java.desktop/share/native/libsplashscreen/splashscreen_gif.c b/src/java.desktop/share/native/libsplashscreen/splashscreen_gif.c
index 4f2cfca8dd093..cbdad61f78e83 100644
--- a/src/java.desktop/share/native/libsplashscreen/splashscreen_gif.c
+++ b/src/java.desktop/share/native/libsplashscreen/splashscreen_gif.c
@@ -279,9 +279,7 @@ SplashDecodeGif(Splash * splash, GifFileType * gif)
ImageRect dstRect;
rgbquad_t fillColor = 0; // 0 is transparent
- if (colorMap &&
- colorMap->Colors &&
- transparentColor < 0) {
+ if (transparentColor < 0) {
fillColor= MAKE_QUAD_GIF(
colorMap->Colors[gif->SBackGroundColor], 0xff);
}
diff --git a/src/java.desktop/unix/native/libawt_xawt/awt/gtk3_interface.c b/src/java.desktop/unix/native/libawt_xawt/awt/gtk3_interface.c
index e5b2dfa6db98d..916880873c63c 100644
--- a/src/java.desktop/unix/native/libawt_xawt/awt/gtk3_interface.c
+++ b/src/java.desktop/unix/native/libawt_xawt/awt/gtk3_interface.c
@@ -276,7 +276,10 @@ GtkApi* gtk3_load(JNIEnv *env, const char* lib_name)
fp_gtk_check_version = dl_symbol("gtk_check_version");
/* GLib */
- fp_glib_check_version = dl_symbol("glib_check_version");
+ fp_glib_check_version = dlsym(gtk3_libhandle, "glib_check_version");
+ if (!fp_glib_check_version) {
+ dlerror();
+ }
fp_g_free = dl_symbol("g_free");
fp_g_object_unref = dl_symbol("g_object_unref");
diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java
index 3a2578b3e0bc4..f28ae2a93264b 100644
--- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java
+++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsCheckBoxMenuItemUI.java
@@ -76,20 +76,19 @@ protected void paintBackground(Graphics g, JMenuItem menuItem,
super.paintBackground(g, menuItem, bgColor);
}
- @Override
+ /**
+ * Paint MenuItem.
+ */
protected void paintMenuItem(Graphics g, JComponent c,
Icon checkIcon, Icon arrowIcon,
Color background, Color foreground,
int defaultTextIconGap) {
if (WindowsMenuItemUI.isVistaPainting()) {
- WindowsMenuItemUI.paintMenuItem(accessor, g, c,
- checkIcon, arrowIcon,
- background, foreground,
- disabledForeground,
- acceleratorSelectionForeground,
- acceleratorForeground,
- defaultTextIconGap,
- menuItem, getPropertyPrefix());
+ WindowsMenuItemUI.paintMenuItem(accessor, g, c, checkIcon,
+ arrowIcon, background, foreground,
+ disabledForeground, acceleratorSelectionForeground,
+ acceleratorForeground, defaultTextIconGap,
+ menuItem, getPropertyPrefix());
return;
}
super.paintMenuItem(g, c, checkIcon, arrowIcon, background,
diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java
index 041bdb5adaaf5..a9b09085ad1a6 100644
--- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java
+++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuItemUI.java
@@ -29,11 +29,16 @@
import java.awt.Font;
import java.awt.FontMetrics;
import java.awt.Graphics;
+import java.awt.Insets;
import java.awt.Rectangle;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
+import java.util.Enumeration;
+import javax.swing.AbstractButton;
+import javax.swing.ButtonGroup;
import javax.swing.ButtonModel;
+import javax.swing.DefaultButtonModel;
import javax.swing.Icon;
import javax.swing.JComponent;
import javax.swing.JMenu;
@@ -127,6 +132,27 @@ public void propertyChange(PropertyChangeEvent e) {
menuItem.addPropertyChangeListener(changeListener);
}
+ protected void installDefaults() {
+ super.installDefaults();
+ String prefix = getPropertyPrefix();
+
+ if (acceleratorSelectionForeground == null ||
+ acceleratorSelectionForeground instanceof UIResource) {
+ acceleratorSelectionForeground =
+ UIManager.getColor(prefix + ".acceleratorSelectionForeground");
+ }
+ if (acceleratorForeground == null ||
+ acceleratorForeground instanceof UIResource) {
+ acceleratorForeground =
+ UIManager.getColor(prefix + ".acceleratorForeground");
+ }
+ if (disabledForeground == null ||
+ disabledForeground instanceof UIResource) {
+ disabledForeground =
+ UIManager.getColor(prefix + ".disabledForeground");
+ }
+ }
+
/**
* {@inheritDoc}
*/
@@ -139,19 +165,15 @@ protected void uninstallListeners() {
changeListener = null;
}
- @Override
protected void paintMenuItem(Graphics g, JComponent c,
Icon checkIcon, Icon arrowIcon,
Color background, Color foreground,
int defaultTextIconGap) {
if (WindowsMenuItemUI.isVistaPainting()) {
- WindowsMenuItemUI.paintMenuItem(accessor, g, c,
- checkIcon, arrowIcon,
- background, foreground,
- disabledForeground,
- acceleratorSelectionForeground,
- acceleratorForeground,
- defaultTextIconGap, menuItem,
+ WindowsMenuItemUI.paintMenuItem(accessor, g, c, checkIcon,
+ arrowIcon, background, foreground,
+ disabledForeground, acceleratorSelectionForeground,
+ acceleratorForeground, defaultTextIconGap, menuItem,
getPropertyPrefix());
return;
}
@@ -160,16 +182,12 @@ protected void paintMenuItem(Graphics g, JComponent c,
}
static void paintMenuItem(WindowsMenuItemUIAccessor accessor, Graphics g,
- JComponent c,
- Icon checkIcon, Icon arrowIcon,
+ JComponent c, Icon checkIcon, Icon arrowIcon,
Color background, Color foreground,
Color disabledForeground,
Color acceleratorSelectionForeground,
Color acceleratorForeground,
- int defaultTextIconGap, JMenuItem menuItem,
- String prefix) {
- assert c == menuItem : "menuItem passed as 'c' must be the same";
-
+ int defaultTextIconGap, JMenuItem menuItem, String prefix) {
// Save original graphics font and color
Font holdf = g.getFont();
Color holdc = g.getColor();
diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java
index 130b09227cc20..754b394d4ac11 100644
--- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java
+++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsMenuUI.java
@@ -131,20 +131,18 @@ protected void installDefaults() {
hotTrackingOn = (obj instanceof Boolean) ? (Boolean)obj : true;
}
- @Override
+ /**
+ * Paint MenuItem.
+ */
protected void paintMenuItem(Graphics g, JComponent c,
- Icon checkIcon, Icon arrowIcon,
- Color background, Color foreground,
- int defaultTextIconGap) {
- assert c == menuItem : "menuItem passed as 'c' must be the same";
+ Icon checkIcon, Icon arrowIcon,
+ Color background, Color foreground,
+ int defaultTextIconGap) {
if (WindowsMenuItemUI.isVistaPainting()) {
- WindowsMenuItemUI.paintMenuItem(accessor, g, c,
- checkIcon, arrowIcon,
+ WindowsMenuItemUI.paintMenuItem(accessor, g, c, checkIcon, arrowIcon,
background, foreground,
- disabledForeground,
- acceleratorSelectionForeground,
- acceleratorForeground,
- defaultTextIconGap, menuItem,
+ disabledForeground, acceleratorSelectionForeground,
+ acceleratorForeground, defaultTextIconGap, menuItem,
getPropertyPrefix());
return;
}
diff --git a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java
index 78768c29ab3b7..06ef5db23a1b2 100644
--- a/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java
+++ b/src/java.desktop/windows/classes/com/sun/java/swing/plaf/windows/WindowsRadioButtonMenuItemUI.java
@@ -76,20 +76,19 @@ protected void paintBackground(Graphics g, JMenuItem menuItem,
super.paintBackground(g, menuItem, bgColor);
}
- @Override
+ /**
+ * Paint MenuItem.
+ */
protected void paintMenuItem(Graphics g, JComponent c,
Icon checkIcon, Icon arrowIcon,
Color background, Color foreground,
int defaultTextIconGap) {
if (WindowsMenuItemUI.isVistaPainting()) {
- WindowsMenuItemUI.paintMenuItem(accessor, g, c,
- checkIcon, arrowIcon,
- background, foreground,
- disabledForeground,
- acceleratorSelectionForeground,
- acceleratorForeground,
- defaultTextIconGap,
- menuItem, getPropertyPrefix());
+ WindowsMenuItemUI.paintMenuItem(accessor, g, c, checkIcon,
+ arrowIcon, background, foreground,
+ disabledForeground, acceleratorSelectionForeground,
+ acceleratorForeground, defaultTextIconGap,
+ menuItem, getPropertyPrefix());
return;
}
super.paintMenuItem(g, c, checkIcon, arrowIcon, background,
diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/Http1Request.java b/src/java.net.http/share/classes/jdk/internal/net/http/Http1Request.java
index 8d28b664036c8..815b6bad20c0c 100644
--- a/src/java.net.http/share/classes/jdk/internal/net/http/Http1Request.java
+++ b/src/java.net.http/share/classes/jdk/internal/net/http/Http1Request.java
@@ -290,8 +290,7 @@ List headers() {
}
String uriString = requestURI();
StringBuilder sb = new StringBuilder(64);
- String method = request.method();
- sb.append(method)
+ sb.append(request.method())
.append(' ')
.append(uriString)
.append(" HTTP/1.1\r\n");
@@ -301,15 +300,11 @@ List headers() {
systemHeadersBuilder.setHeader("Host", hostString());
}
+ // GET, HEAD and DELETE with no request body should not set the Content-Length header
if (requestPublisher != null) {
contentLength = requestPublisher.contentLength();
if (contentLength == 0) {
- // PUT and POST with no request body should set the Content-Length header
- // even when the content is empty.
- // Other methods defined in RFC 9110 should not send the header in that case.
- if ("POST".equals(method) || "PUT".equals(method)) {
- systemHeadersBuilder.setHeader("Content-Length", "0");
- }
+ systemHeadersBuilder.setHeader("Content-Length", "0");
} else if (contentLength > 0) {
systemHeadersBuilder.setHeader("Content-Length", Long.toString(contentLength));
streaming = false;
diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/common/Utils.java b/src/java.net.http/share/classes/jdk/internal/net/http/common/Utils.java
index a02506cff5c76..b14d76d8dbadb 100644
--- a/src/java.net.http/share/classes/jdk/internal/net/http/common/Utils.java
+++ b/src/java.net.http/share/classes/jdk/internal/net/http/common/Utils.java
@@ -174,20 +174,15 @@ private static boolean hostnameVerificationDisabledValue() {
public static final int SLICE_THRESHOLD = 32;
/**
- * The capacity of ephemeral {@link ByteBuffer}s allocated to pass data to and from the client.
- * It is ensured to have a value between 1 and 2^14 (16,384).
+ * Allocated buffer size. Must never be higher than 16K. But can be lower
+ * if smaller allocation units preferred. HTTP/2 mandates that all
+ * implementations support frame payloads of at least 16K.
*/
+ private static final int DEFAULT_BUFSIZE = 16 * 1024;
+
public static final int BUFSIZE = getIntegerNetProperty(
- "jdk.httpclient.bufsize", 1,
- // We cap at 2^14 (16,384) for two main reasons:
- // - The initial frame size is 2^14 (RFC 9113)
- // - SSL record layer fragments data in chunks of 2^14 bytes or less (RFC 5246)
- 1 << 14,
- // We choose 2^14 (16,384) as the default, because:
- // 1. It maximizes throughput within the limits described above
- // 2. It is small enough to not create a GC bottleneck when it is partially filled
- 1 << 14,
- true);
+ "jdk.httpclient.bufsize", DEFAULT_BUFSIZE
+ );
public static final BiPredicate ACCEPT_ALL = (x,y) -> true;
diff --git a/src/java.net.http/share/classes/module-info.java b/src/java.net.http/share/classes/module-info.java
index 48f23953ad088..392385136b084 100644
--- a/src/java.net.http/share/classes/module-info.java
+++ b/src/java.net.http/share/classes/module-info.java
@@ -48,9 +48,7 @@
* depending on the context. These restrictions cannot be overridden by this property.
*
*
{@systemProperty jdk.httpclient.bufsize} (default: 16384 bytes or 16 kB)
- * The capacity of internal ephemeral buffers allocated to pass data to and from the
- * client, in bytes. Valid values are in the range [1, 2^14 (16384)].
- * If an invalid value is provided, the default value is used.
+ * The size to use for internal allocated buffers in bytes.
*
*
{@systemProperty jdk.httpclient.connectionPoolSize} (default: 0)
* The maximum number of connections to keep in the HTTP/1.1 keep alive cache. A value of 0
diff --git a/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/jaxp/DocumentBuilderFactoryImpl.java b/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/jaxp/DocumentBuilderFactoryImpl.java
index bc8e93b4f0b92..385b8e294395d 100644
--- a/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/jaxp/DocumentBuilderFactoryImpl.java
+++ b/src/java.xml/share/classes/com/sun/org/apache/xerces/internal/jaxp/DocumentBuilderFactoryImpl.java
@@ -41,7 +41,7 @@
/**
* @author Rajiv Mordani
* @author Edwin Goei
- * @LastModified: June 2025
+ * @LastModified: May 2025
*/
public class DocumentBuilderFactoryImpl extends DocumentBuilderFactory {
/** These are DocumentBuilderFactory attributes not DOM attributes */
@@ -59,24 +59,11 @@ public class DocumentBuilderFactoryImpl extends DocumentBuilderFactory {
XMLSecurityManager fSecurityManager;
XMLSecurityPropertyManager fSecurityPropertyMgr;
- /**
- * Creates a new {@code DocumentBuilderFactory} instance.
- */
public DocumentBuilderFactoryImpl() {
- this(null, null);
- }
-
- /**
- * Creates a new {@code DocumentBuilderFactory} instance with a {@code XMLSecurityManager}
- * and {@code XMLSecurityPropertyManager}.
- * @param xsm the {@code XMLSecurityManager}
- * @param xspm the {@code XMLSecurityPropertyManager}
- */
- public DocumentBuilderFactoryImpl(XMLSecurityManager xsm, XMLSecurityPropertyManager xspm) {
JdkXmlConfig config = JdkXmlConfig.getInstance(false);
// security (property) managers updated with current system properties
- fSecurityManager = (xsm == null) ? config.getXMLSecurityManager(true) : xsm;
- fSecurityPropertyMgr = (xspm == null) ? config.getXMLSecurityPropertyManager(true) : xspm;
+ fSecurityManager = config.getXMLSecurityManager(true);
+ fSecurityPropertyMgr = config.getXMLSecurityPropertyManager(true);
}
/**
diff --git a/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathFactoryImpl.java b/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathFactoryImpl.java
index 2f4d2ade54530..1288f1dbac32a 100644
--- a/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathFactoryImpl.java
+++ b/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathFactoryImpl.java
@@ -35,7 +35,7 @@
*
* @author Ramesh Mandava
*
- * @LastModified: June 2025
+ * @LastModified: May 2025
*/
public class XPathFactoryImpl extends XPathFactory {
@@ -72,7 +72,6 @@ public class XPathFactoryImpl extends XPathFactory {
* The XML security manager
*/
private XMLSecurityManager _xmlSecMgr;
- private XMLSecurityPropertyManager _xmlSecPropMgr;
/**
* javax.xml.xpath.XPathFactory implementation.
@@ -81,7 +80,6 @@ public XPathFactoryImpl() {
JdkXmlConfig config = JdkXmlConfig.getInstance(false);
_xmlSecMgr = config.getXMLSecurityManager(true);
_featureManager = config.getXMLFeatures(true);
- _xmlSecPropMgr = config.getXMLSecurityPropertyManager(true);
}
/**
@@ -131,7 +129,7 @@ public boolean isObjectModelSupported(String objectModel) {
*/
public javax.xml.xpath.XPath newXPath() {
return new XPathImpl(xPathVariableResolver, xPathFunctionResolver,
- !_isNotSecureProcessing, _featureManager, _xmlSecMgr, _xmlSecPropMgr);
+ !_isNotSecureProcessing, _featureManager, _xmlSecMgr);
}
/**
@@ -185,7 +183,6 @@ public void setFeature(String name, boolean value)
if (value && _featureManager != null) {
_featureManager.setFeature(JdkXmlFeatures.XmlFeature.ENABLE_EXTENSION_FUNCTION,
JdkProperty.State.FSP, false);
- _xmlSecMgr.setSecureProcessing(value);
}
// all done processing feature
@@ -341,7 +338,8 @@ public void setProperty(String name, String value) {
throw new NullPointerException(fmsg);
}
- if (JdkXmlUtils.setProperty(_xmlSecMgr, _xmlSecPropMgr, name, value)) {
+ if (_xmlSecMgr != null &&
+ _xmlSecMgr.setLimit(name, JdkProperty.State.APIPROPERTY, value)) {
return;
}
diff --git a/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImpl.java b/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImpl.java
index c2faf90ce2e1e..53099ad078ec4 100644
--- a/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImpl.java
+++ b/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImpl.java
@@ -36,7 +36,6 @@
import jdk.xml.internal.JdkXmlConfig;
import jdk.xml.internal.JdkXmlFeatures;
import jdk.xml.internal.XMLSecurityManager;
-import jdk.xml.internal.XMLSecurityPropertyManager;
import org.w3c.dom.Document;
import org.xml.sax.InputSource;
@@ -51,7 +50,7 @@
* New methods: evaluateExpression
* Refactored to share code with XPathExpressionImpl.
*
- * @LastModified: June 2025
+ * @LastModified: May 2025
*/
public class XPathImpl extends XPathImplUtil implements javax.xml.xpath.XPath {
@@ -63,13 +62,12 @@ public class XPathImpl extends XPathImplUtil implements javax.xml.xpath.XPath {
XPathImpl(XPathVariableResolver vr, XPathFunctionResolver fr) {
this(vr, fr, false,
JdkXmlConfig.getInstance(false).getXMLFeatures(false),
- JdkXmlConfig.getInstance(false).getXMLSecurityManager(false),
- JdkXmlConfig.getInstance(false).getXMLSecurityPropertyManager(false));
+ JdkXmlConfig.getInstance(false).getXMLSecurityManager(false));
}
XPathImpl(XPathVariableResolver vr, XPathFunctionResolver fr,
boolean featureSecureProcessing, JdkXmlFeatures featureManager,
- XMLSecurityManager xmlSecMgr, XMLSecurityPropertyManager xmlSecPropMgr) {
+ XMLSecurityManager xmlSecMgr) {
this.origVariableResolver = this.variableResolver = vr;
this.origFunctionResolver = this.functionResolver = fr;
this.featureSecureProcessing = featureSecureProcessing;
@@ -77,7 +75,6 @@ public class XPathImpl extends XPathImplUtil implements javax.xml.xpath.XPath {
overrideDefaultParser = featureManager.getFeature(
JdkXmlFeatures.XmlFeature.JDK_OVERRIDE_PARSER);
this.xmlSecMgr = xmlSecMgr;
- this.xmlSecPropMgr = xmlSecPropMgr;
}
diff --git a/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImplUtil.java b/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImplUtil.java
index 3de72f3f68bb2..a92090900facb 100644
--- a/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImplUtil.java
+++ b/src/java.xml/share/classes/com/sun/org/apache/xpath/internal/jaxp/XPathImplUtil.java
@@ -31,7 +31,6 @@
import com.sun.org.apache.xpath.internal.objects.XObject;
import com.sun.org.apache.xpath.internal.res.XPATHErrorResources;
import java.io.IOException;
-import javax.xml.XMLConstants;
import javax.xml.namespace.QName;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
@@ -45,7 +44,6 @@
import jdk.xml.internal.JdkXmlFeatures;
import jdk.xml.internal.JdkXmlUtils;
import jdk.xml.internal.XMLSecurityManager;
-import jdk.xml.internal.XMLSecurityPropertyManager;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.traversal.NodeIterator;
@@ -56,7 +54,7 @@
* This class contains several utility methods used by XPathImpl and
* XPathExpressionImpl
*
- * @LastModified: June 2025
+ * @LastModified: Apr 2025
*/
class XPathImplUtil {
XPathFunctionResolver functionResolver;
@@ -69,7 +67,6 @@ class XPathImplUtil {
boolean featureSecureProcessing = false;
JdkXmlFeatures featureManager;
XMLSecurityManager xmlSecMgr;
- XMLSecurityPropertyManager xmlSecPropMgr;
/**
* Evaluate an XPath context using the internal XPath engine
@@ -131,12 +128,7 @@ Document getDocument(InputSource source)
//
// so we really have to create a fresh DocumentBuilder every time we need one
// - KK
- DocumentBuilderFactory dbf = JdkXmlUtils.getDOMFactory(
- overrideDefaultParser, xmlSecMgr, xmlSecPropMgr);
- if (xmlSecMgr != null && xmlSecMgr.isSecureProcessingSet()) {
- dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING,
- xmlSecMgr.isSecureProcessing());
- }
+ DocumentBuilderFactory dbf = JdkXmlUtils.getDOMFactory(overrideDefaultParser);
return dbf.newDocumentBuilder().parse(source);
} catch (ParserConfigurationException | SAXException | IOException e) {
throw new XPathExpressionException (e);
diff --git a/src/java.xml/share/classes/jdk/xml/internal/JdkXmlUtils.java b/src/java.xml/share/classes/jdk/xml/internal/JdkXmlUtils.java
index 9e718b264e447..93b63a746f15e 100644
--- a/src/java.xml/share/classes/jdk/xml/internal/JdkXmlUtils.java
+++ b/src/java.xml/share/classes/jdk/xml/internal/JdkXmlUtils.java
@@ -445,20 +445,6 @@ public static Document getDOMDocument() {
* @return a DocumentBuilderFactory instance.
*/
public static DocumentBuilderFactory getDOMFactory(boolean overrideDefaultParser) {
- return getDOMFactory(overrideDefaultParser, null, null);
- }
-
- /**
- * {@return a DocumentBuilderFactory instance}
- *
- * @param overrideDefaultParser a flag indicating whether the system-default
- * implementation may be overridden. If the system property of the
- * DOM factory ID is set, override is always allowed.
- * @param xsm XMLSecurityManager
- * @param xspm XMLSecurityPropertyManager
- */
- public static DocumentBuilderFactory getDOMFactory(boolean overrideDefaultParser,
- XMLSecurityManager xsm, XMLSecurityPropertyManager xspm) {
boolean override = overrideDefaultParser;
String spDOMFactory = SecuritySupport.getJAXPSystemProperty(DOM_FACTORY_ID);
@@ -467,7 +453,7 @@ public static DocumentBuilderFactory getDOMFactory(boolean overrideDefaultParser
}
DocumentBuilderFactory dbf
= !override
- ? new DocumentBuilderFactoryImpl(xsm, xspm)
+ ? new DocumentBuilderFactoryImpl()
: DocumentBuilderFactory.newInstance();
dbf.setNamespaceAware(true);
// false is the default setting. This step here is for compatibility
diff --git a/src/java.xml/share/classes/jdk/xml/internal/XMLSecurityManager.java b/src/java.xml/share/classes/jdk/xml/internal/XMLSecurityManager.java
index a1687c420c3ff..5ca4073e20f2f 100644
--- a/src/java.xml/share/classes/jdk/xml/internal/XMLSecurityManager.java
+++ b/src/java.xml/share/classes/jdk/xml/internal/XMLSecurityManager.java
@@ -244,12 +244,6 @@ public static enum Processor {
*/
boolean secureProcessing;
- /**
- * Flag indicating the secure processing is set explicitly through factories'
- * setFeature method and then the setSecureProcessing method
- */
- boolean secureProcessingSet;
-
/**
* States that determine if properties are set explicitly
*/
@@ -346,7 +340,6 @@ private NotFoundAction toActionType(String resolve) {
* Setting FEATURE_SECURE_PROCESSING explicitly
*/
public void setSecureProcessing(boolean secure) {
- secureProcessingSet = true;
secureProcessing = secure;
for (Limit limit : Limit.values()) {
if (secure) {
@@ -365,15 +358,6 @@ public boolean isSecureProcessing() {
return secureProcessing;
}
- /**
- * Returns the state indicating whether the Secure Processing is set explicitly,
- * via factories' setFeature and then this class' setSecureProcessing method.
- * @return the state indicating whether the Secure Processing is set explicitly
- */
- public boolean isSecureProcessingSet() {
- return secureProcessingSet;
- }
-
/**
* Finds a limit's new name with the given property name.
* @param propertyName the property name specified
diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/ExhaustivenessComputer.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/ExhaustivenessComputer.java
deleted file mode 100644
index 7a8067ce983f8..0000000000000
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/ExhaustivenessComputer.java
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package com.sun.tools.javac.comp;
-
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
-import com.sun.tools.javac.code.*;
-import com.sun.tools.javac.tree.*;
-import com.sun.tools.javac.util.*;
-
-import com.sun.tools.javac.code.Symbol.*;
-import com.sun.tools.javac.tree.JCTree.*;
-
-import com.sun.tools.javac.code.Kinds.Kind;
-import com.sun.tools.javac.code.Type.TypeVar;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-
-import static java.util.stream.Collectors.groupingBy;
-
-/** A class to compute exhaustiveness of set of switch cases.
- *
- *
This is NOT part of any supported API.
- * If you write code that depends on this, you do so at your own risk.
- * This code and its internal interfaces are subject to change or
- * deletion without notice.
- */
-public class ExhaustivenessComputer {
- protected static final Context.Key exhaustivenessKey = new Context.Key<>();
-
- private final Symtab syms;
- private final Types types;
- private final Check chk;
- private final Infer infer;
-
- public static ExhaustivenessComputer instance(Context context) {
- ExhaustivenessComputer instance = context.get(exhaustivenessKey);
- if (instance == null)
- instance = new ExhaustivenessComputer(context);
- return instance;
- }
-
- @SuppressWarnings("this-escape")
- protected ExhaustivenessComputer(Context context) {
- context.put(exhaustivenessKey, this);
- syms = Symtab.instance(context);
- types = Types.instance(context);
- chk = Check.instance(context);
- infer = Infer.instance(context);
- }
-
- public boolean exhausts(JCExpression selector, List cases) {
- Set patternSet = new HashSet<>();
- Map> enum2Constants = new HashMap<>();
- Set