diff --git a/doc/building.html b/doc/building.html
index e0cca9534bd..034cd77d9a7 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -495,6 +495,11 @@
Advanced Make Control Variables
Running Tests
Most of the JDK tests are using the JTReg test framework. Make sure that your configuration knows where to find your installation of JTReg. If this is not picked up automatically, use the --with-jtreg=<path to jtreg home>
option to point to the JTReg framework. Note that this option should point to the JTReg home, i.e. the top directory, containing lib/jtreg.jar
etc.
The Adoption Group provides recent builds of jtreg here. Download the latest .tar.gz
file, unpack it, and point --with-jtreg
to the jtreg
directory that you just unpacked.
+Building of Hotspot Gtest suite requires the source code of Google Test framework. The top directory, which contains both googletest
and googlemock
directories, should be specified via --with-gtest
. The supported version of Google Test is 1.8.1, whose source code can be obtained:
+
+- by downloading and unpacking the source bundle from here
+- or by checking out
release-1.8.1
tag of googletest
project: git clone -b release-1.8.1 https://github.com/google/googletest
+
To execute the most basic tests (tier 1), use:
make run-test-tier1
For more details on how to run tests, please see the Testing the JDK document.
diff --git a/doc/building.md b/doc/building.md
index 850f6f07da2..3990a9b9d71 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -829,6 +829,14 @@ https://ci.adoptopenjdk.net/view/Dependencies/job/jtreg/lastSuccessfulBuild/arti
Download the latest `.tar.gz` file, unpack it, and point `--with-jtreg` to the
`jtreg` directory that you just unpacked.
+Building of Hotspot Gtest suite requires the source code of Google Test framework.
+The top directory, which contains both `googletest` and `googlemock`
+directories, should be specified via `--with-gtest`.
+The supported version of Google Test is 1.8.1, whose source code can be obtained:
+
+ * by downloading and unpacking the source bundle from [here](https://github.com/google/googletest/releases/tag/release-1.8.1)
+ * or by checking out `release-1.8.1` tag of `googletest` project: `git clone -b release-1.8.1 https://github.com/google/googletest`
+
To execute the most basic tests (tier 1), use:
```
make run-test-tier1
diff --git a/make/Main.gmk b/make/Main.gmk
index bbe41cbc5c0..14ae6213653 100644
--- a/make/Main.gmk
+++ b/make/Main.gmk
@@ -652,7 +652,7 @@ $(eval $(call SetupTarget, test-image-hotspot-jtreg-graal, \
DEPS := build-test-hotspot-jtreg-graal, \
))
-ifeq ($(BUILD_GTEST), true)
+ifneq ($GTEST_FRAMEWORK_SRC), )
$(eval $(call SetupTarget, test-image-hotspot-gtest, \
MAKEFILE := hotspot/test/GtestImage, \
DEPS := hotspot, \
@@ -1109,7 +1109,7 @@ ifneq ($(JVM_TEST_IMAGE_TARGETS), )
test-image: $(JVM_TEST_IMAGE_TARGETS)
else
test-image: test-image-hotspot-jtreg-native
- ifeq ($(BUILD_GTEST), true)
+ ifneq ($(GTEST_FRAMEWORK_SRC), )
test-image: test-image-hotspot-gtest
endif
diff --git a/make/RunTestsPrebuiltSpec.gmk b/make/RunTestsPrebuiltSpec.gmk
index 33953c2b7d9..30cb6d85946 100644
--- a/make/RunTestsPrebuiltSpec.gmk
+++ b/make/RunTestsPrebuiltSpec.gmk
@@ -82,7 +82,6 @@ SHELL := $(BASH) $(BASH_ARGS)
# Set some reasonable defaults for features
DEBUG_LEVEL := release
HOTSPOT_DEBUG_LEVEL := release
-BUILD_GTEST := true
BUILD_FAILURE_HANDLER := true
################################################################################
diff --git a/make/autoconf/buildjdk-spec.gmk.in b/make/autoconf/buildjdk-spec.gmk.in
index 07d2b0b1503..7134e34bcee 100644
--- a/make/autoconf/buildjdk-spec.gmk.in
+++ b/make/autoconf/buildjdk-spec.gmk.in
@@ -91,9 +91,6 @@ DISABLE_WARNING_PREFIX := @BUILD_CC_DISABLE_WARNING_PREFIX@
# Save speed and disk space by not enabling debug symbols for the buildjdk
ENABLE_DEBUG_SYMBOLS := false
-# Control whether Hotspot builds gtest tests
-BUILD_GTEST := false
-
JVM_VARIANTS := server
JVM_VARIANT_MAIN := server
JVM_FEATURES_server := cds compiler1 compiler2 g1gc serialgc
diff --git a/make/autoconf/configure.ac b/make/autoconf/configure.ac
index 4c95dd362ef..16e4bcfebaa 100644
--- a/make/autoconf/configure.ac
+++ b/make/autoconf/configure.ac
@@ -234,7 +234,6 @@ LIB_SETUP_LIBRARIES
JVM_FEATURES_PARSE_OPTIONS
JVM_FEATURES_SETUP
-HOTSPOT_ENABLE_DISABLE_GTEST
HOTSPOT_SETUP_MISC
###############################################################################
diff --git a/make/autoconf/hotspot.m4 b/make/autoconf/hotspot.m4
index f4dd27126d1..43653c115be 100644
--- a/make/autoconf/hotspot.m4
+++ b/make/autoconf/hotspot.m4
@@ -111,28 +111,6 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_JVM_VARIANTS],
AC_SUBST(JVM_VARIANT_MAIN)
])
-###############################################################################
-# Check if gtest should be built
-#
-AC_DEFUN_ONCE([HOTSPOT_ENABLE_DISABLE_GTEST],
-[
- GTEST_AVAILABLE=true
-
- AC_MSG_CHECKING([if Hotspot gtest test source is present])
- if test -e "${TOPDIR}/test/hotspot/gtest"; then
- AC_MSG_RESULT([yes])
- else
- AC_MSG_RESULT([no, cannot build gtest])
- GTEST_AVAILABLE=false
- fi
-
- UTIL_ARG_ENABLE(NAME: hotspot-gtest, DEFAULT: auto,
- RESULT: BUILD_GTEST, AVAILABLE: $GTEST_AVAILABLE,
- DEFAULT_DESC: [enabled if possible to build],
- DESC: [enable building of the Hotspot unit tests])
- AC_SUBST(BUILD_GTEST)
-])
-
###############################################################################
# Misc hotspot setup that does not fit elsewhere.
#
@@ -162,4 +140,7 @@ AC_DEFUN_ONCE([HOTSPOT_SETUP_MISC],
# --with-cpu-port is no longer supported
UTIL_DEPRECATED_ARG_WITH(with-cpu-port)
+
+ # in jdk15 hotspot-gtest was replaced with --with-gtest
+ UTIL_DEPRECATED_ARG_ENABLE(hotspot-gtest)
])
diff --git a/make/autoconf/lib-tests.m4 b/make/autoconf/lib-tests.m4
index 4512291a4f0..ff41329afb5 100644
--- a/make/autoconf/lib-tests.m4
+++ b/make/autoconf/lib-tests.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -55,6 +55,40 @@ AC_DEFUN_ONCE([LIB_TESTS_SETUP_GRAALUNIT],
AC_SUBST(GRAALUNIT_LIB)
])
+###############################################################################
+#
+# Setup and check for gtest framework source files
+#
+AC_DEFUN_ONCE([LIB_TESTS_SETUP_GTEST],
+[
+ AC_ARG_WITH(gtest, [AS_HELP_STRING([--with-gtest],
+ [specify prefix directory for the gtest framework])])
+
+ if test "x${with_gtest}" != x; then
+ AC_MSG_CHECKING([for gtest])
+ if test "x${with_gtest}" = xno; then
+ AC_MSG_RESULT([no, disabled])
+ elif test "x${with_gtest}" = xyes; then
+ AC_MSG_RESULT([no, error])
+ AC_MSG_ERROR([--with-gtest must have a value])
+ else
+ if ! test -s "${with_gtest}/googletest/include/gtest/gtest.h"; then
+ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([Can't find 'googletest/include/gtest/gtest.h' under ${with_gtest} given with the --with-gtest option.])
+ elif ! test -s "${with_gtest}/googlemock/include/gmock/gmock.h"; then
+ AC_MSG_RESULT([no])
+ AC_MSG_ERROR([Can't find 'googlemock/include/gmock/gmock.h' under ${with_gtest} given with the --with-gtest option.])
+ else
+ GTEST_FRAMEWORK_SRC=${with_gtest}
+ AC_MSG_RESULT([$GTEST_FRAMEWORK_SRC])
+ UTIL_FIXUP_PATH([GTEST_FRAMEWORK_SRC])
+ fi
+ fi
+ fi
+
+ AC_SUBST(GTEST_FRAMEWORK_SRC)
+])
+
###############################################################################
#
# Setup and check the Java Microbenchmark Harness
diff --git a/make/autoconf/libraries.m4 b/make/autoconf/libraries.m4
index 0002dba1604..5120918aed2 100644
--- a/make/autoconf/libraries.m4
+++ b/make/autoconf/libraries.m4
@@ -102,6 +102,7 @@ AC_DEFUN_ONCE([LIB_SETUP_LIBRARIES],
LIB_SETUP_BUNDLED_LIBS
LIB_SETUP_MISC_LIBS
LIB_TESTS_SETUP_GRAALUNIT
+ LIB_TESTS_SETUP_GTEST
BASIC_JDKLIB_LIBS=""
if test "x$TOOLCHAIN_TYPE" != xmicrosoft; then
diff --git a/make/autoconf/spec.gmk.in b/make/autoconf/spec.gmk.in
index 98c5132832e..ed4e860e01c 100644
--- a/make/autoconf/spec.gmk.in
+++ b/make/autoconf/spec.gmk.in
@@ -292,9 +292,6 @@ JVM_FEATURES_custom := @JVM_FEATURES_custom@
VALID_JVM_FEATURES := @VALID_JVM_FEATURES@
VALID_JVM_VARIANTS := @VALID_JVM_VARIANTS@
-# Control whether Hotspot builds gtest tests
-BUILD_GTEST := @BUILD_GTEST@
-
# Allow overriding the default hotspot library path
HOTSPOT_OVERRIDE_LIBPATH := @HOTSPOT_OVERRIDE_LIBPATH@
@@ -392,6 +389,8 @@ JMH_JOPT_SIMPLE_JAR := @JMH_JOPT_SIMPLE_JAR@
JMH_COMMONS_MATH_JAR := @JMH_COMMONS_MATH_JAR@
JMH_VERSION := @JMH_VERSION@
+GTEST_FRAMEWORK_SRC := @GTEST_FRAMEWORK_SRC@
+
# Source file for cacerts
CACERTS_FILE=@CACERTS_FILE@
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index befee0d95b8..a070909f397 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -404,7 +404,7 @@ var getJibProfilesProfiles = function (input, common, data) {
"linux-x64": {
target_os: "linux",
target_cpu: "x64",
- dependencies: ["devkit", "graphviz", "pandoc", "graalunit_lib"],
+ dependencies: ["devkit", "gtest", "graphviz", "pandoc", "graalunit_lib"],
configure_args: concat(common.configure_args_64bit,
"--enable-full-docs", "--with-zlib=system",
(isWsl(input) ? [ "--host=x86_64-unknown-linux-gnu",
@@ -416,7 +416,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: "linux",
target_cpu: "x86",
build_cpu: "x64",
- dependencies: ["devkit"],
+ dependencies: ["devkit", "gtest"],
configure_args: concat(common.configure_args_32bit,
"--with-jvm-variants=minimal,server", "--with-zlib=system"),
},
@@ -424,7 +424,7 @@ var getJibProfilesProfiles = function (input, common, data) {
"macosx-x64": {
target_os: "macosx",
target_cpu: "x64",
- dependencies: ["devkit", "pandoc", "graalunit_lib"],
+ dependencies: ["devkit", "gtest", "pandoc", "graalunit_lib"],
configure_args: concat(common.configure_args_64bit, "--with-zlib=system",
"--with-macosx-version-max=10.9.0"),
},
@@ -432,7 +432,7 @@ var getJibProfilesProfiles = function (input, common, data) {
"windows-x64": {
target_os: "windows",
target_cpu: "x64",
- dependencies: ["devkit", "pandoc", "graalunit_lib"],
+ dependencies: ["devkit", "gtest", "pandoc", "graalunit_lib"],
configure_args: concat(common.configure_args_64bit),
},
@@ -440,7 +440,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: "windows",
target_cpu: "x86",
build_cpu: "x64",
- dependencies: ["devkit"],
+ dependencies: ["devkit", "gtest"],
configure_args: concat(common.configure_args_32bit),
},
@@ -448,7 +448,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: "linux",
target_cpu: "aarch64",
build_cpu: "x64",
- dependencies: ["devkit", "build_devkit", "pandoc"],
+ dependencies: ["devkit", "gtest", "build_devkit", "pandoc"],
configure_args: [
"--openjdk-target=aarch64-linux-gnu",
"--disable-jvm-feature-jvmci",
@@ -461,7 +461,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: "linux",
target_cpu: "arm",
build_cpu: "x64",
- dependencies: ["devkit", "build_devkit"],
+ dependencies: ["devkit", "gtest", "build_devkit"],
configure_args: [
"--openjdk-target=arm-linux-gnueabihf", "--with-freetype=bundled",
"--with-abi-profile=arm-vfp-hflt", "--disable-warnings-as-errors"
@@ -472,7 +472,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: "linux",
target_cpu: "ppc64le",
build_cpu: "x64",
- dependencies: ["devkit", "build_devkit"],
+ dependencies: ["devkit", "gtest", "build_devkit"],
configure_args: [
"--openjdk-target=ppc64le-linux-gnu", "--with-freetype=bundled",
"--disable-warnings-as-errors"
@@ -483,7 +483,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: "linux",
target_cpu: "s390x",
build_cpu: "x64",
- dependencies: ["devkit", "build_devkit"],
+ dependencies: ["devkit", "gtest", "build_devkit"],
configure_args: [
"--openjdk-target=s390x-linux-gnu", "--with-freetype=bundled",
"--disable-warnings-as-errors"
@@ -538,7 +538,7 @@ var getJibProfilesProfiles = function (input, common, data) {
"linux-x64-zero": {
target_os: "linux",
target_cpu: "x64",
- dependencies: ["devkit"],
+ dependencies: ["devkit", "gtest"],
configure_args: concat(common.configure_args_64bit, [
"--with-zlib=system",
"--with-jvm-variants=zero",
@@ -550,7 +550,7 @@ var getJibProfilesProfiles = function (input, common, data) {
target_os: "linux",
target_cpu: "x86",
build_cpu: "x64",
- dependencies: ["devkit"],
+ dependencies: ["devkit", "gtest"],
configure_args: concat(common.configure_args_32bit, [
"--with-zlib=system",
"--with-jvm-variants=zero",
@@ -573,7 +573,7 @@ var getJibProfilesProfiles = function (input, common, data) {
"linux-x64-debug-nopch": {
target_os: "linux",
target_cpu: "x64",
- dependencies: ["devkit"],
+ dependencies: ["devkit", "gtest"],
configure_args: concat(common.configure_args_64bit,
"--with-zlib=system", "--disable-precompiled-headers"),
},
@@ -1148,6 +1148,12 @@ var getJibProfilesDependencies = function (input, common) {
configure_args: "--with-graalunit-lib=" + input.get("graalunit_lib", "install_path"),
environment_name: "GRAALUNIT_LIB"
},
+
+ gtest: {
+ organization: common.organization,
+ ext: "tar.gz",
+ revision: "1.8.1"
+ },
};
return dependencies;
diff --git a/make/data/jdwp/jdwp.spec b/make/data/jdwp/jdwp.spec
index a3377b61a09..48d1aab3c15 100644
--- a/make/data/jdwp/jdwp.spec
+++ b/make/data/jdwp/jdwp.spec
@@ -461,16 +461,9 @@ JDWP "Java(tm) Debug Wire Protocol"
"PopFrames command can be used "
"to pop frames with obsolete methods."
""
- "Unless the canUnrestrictedlyRedefineClasses capability is present the following "
- "redefinitions are restricted: "
- "
"
- "- changing the schema (the fields)
"
- "- changing the hierarchy (superclasses, interfaces)
"
- "- deleting a method
"
- "- changing class modifiers
"
- "- changing method modifiers
"
- "- changing the
NestHost
, NestMembers
, or Record
class attributes "
- "
"
+ "Unless the canUnrestrictedlyRedefineClasses capability is present "
+ "the redefinition must follow the restrictions described in "
+ "JVM TI RedefineClasses."
""
"Requires canRedefineClasses capability - see "
"CapabilitiesNew. "
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index 238956ab45a..bf3a9e2ba24 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -27,7 +27,6 @@ $(eval $(call IncludeCustomExtension, hotspot/lib/CompileGtest.gmk))
GTEST_TEST_SRC += $(TOPDIR)/test/hotspot/gtest
GTEST_LAUNCHER_SRC := $(TOPDIR)/test/hotspot/gtest/gtestLauncher.cpp
-GTEST_FRAMEWORK_SRC := $(TOPDIR)/test/fmw/gtest
# On Windows, there are no internal debug symbols so must set copying to true
# to get any at all.
@@ -39,6 +38,36 @@ endif
################################################################################
+# Disabling all warnings in gtest source code
+
+$(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBGTEST, \
+ NAME := gtest, \
+ TYPE := STATIC_LIBRARY, \
+ TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ OUTPUT_DIR := $(JVM_OUTPUTDIR)/libgtest, \
+ OBJECT_DIR := $(JVM_OUTPUTDIR)/libgtest/objs, \
+ SRC := \
+ $(GTEST_FRAMEWORK_SRC)/googletest/src \
+ $(GTEST_FRAMEWORK_SRC)/googlemock/src, \
+ INCLUDE_FILES := gtest-all.cc gmock-all.cc, \
+ CFLAGS := $(filter-out $(WARNING_CFLAGS_JVM), $(JVM_CFLAGS)) \
+ -w \
+ -I$(GTEST_FRAMEWORK_SRC)/googletest \
+ -I$(GTEST_FRAMEWORK_SRC)/googletest/include \
+ -I$(GTEST_FRAMEWORK_SRC)/googlemock \
+ -I$(GTEST_FRAMEWORK_SRC)/googlemock/include, \
+ CFLAGS_windows := -EHsc, \
+ CFLAGS_macosx := -DGTEST_OS_MAC=1, \
+ OPTIMIZATION := $(JVM_OPTIMIZATION), \
+ COPY_DEBUG_SYMBOLS := $(GTEST_COPY_DEBUG_SYMBOLS), \
+ ZIP_EXTERNAL_DEBUG_SYMBOLS := false, \
+ STRIP_SYMBOLS := false, \
+))
+
+TARGETS += $(BUILD_GTEST_LIBGTEST)
+
+################################################################################
+
ifeq ($(call isTargetOs, windows), true)
GTEST_JVM_MAPFILE := $(JVM_MAPFILE)
else
@@ -64,15 +93,10 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
EXCLUDES := $(JVM_EXCLUDES), \
EXCLUDE_FILES := gtestLauncher.cpp, \
EXCLUDE_PATTERNS := $(JVM_EXCLUDE_PATTERNS), \
- EXTRA_FILES := \
- $(GTEST_FRAMEWORK_SRC)/googletest/src/gtest-all.cc \
- $(GTEST_FRAMEWORK_SRC)/googlemock/src/gmock-all.cc, \
EXTRA_OBJECT_FILES := $(filter-out %/operator_new$(OBJ_SUFFIX), \
$(BUILD_LIBJVM_ALL_OBJS)), \
CFLAGS := $(JVM_CFLAGS) \
- -I$(GTEST_FRAMEWORK_SRC)/googletest \
-I$(GTEST_FRAMEWORK_SRC)/googletest/include \
- -I$(GTEST_FRAMEWORK_SRC)/googlemock \
-I$(GTEST_FRAMEWORK_SRC)/googlemock/include \
$(addprefix -I,$(GTEST_TEST_SRC)), \
CFLAGS_windows := -EHsc, \
@@ -83,7 +107,10 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
undef switch format-nonliteral tautological-undefined-compare \
self-assign-overloaded, \
LDFLAGS := $(JVM_LDFLAGS), \
+ LDFLAGS_unix := -L$(JVM_OUTPUTDIR)/libgtest, \
LIBS := $(JVM_LIBS), \
+ LIBS_unix := -lgtest, \
+ LIBS_windows := $(JVM_OUTPUTDIR)/libgtest/gtest.lib, \
OPTIMIZATION := $(JVM_OPTIMIZATION), \
MAPFILE := $(GTEST_JVM_MAPFILE), \
USE_MAPFILE_FOR_SYMBOLS := true, \
@@ -91,9 +118,11 @@ $(eval $(call SetupNativeCompilation, BUILD_GTEST_LIBJVM, \
ZIP_EXTERNAL_DEBUG_SYMBOLS := false, \
STRIP_SYMBOLS := false, \
PRECOMPILED_HEADER := $(JVM_PRECOMPILED_HEADER), \
- PRECOMPILED_HEADER_EXCLUDE := gtest-all.cc gmock-all.cc gtestMain.cpp, \
+ PRECOMPILED_HEADER_EXCLUDE := gtestMain.cpp, \
))
+$(BUILD_GTEST_LIBJVM) : $(BUILD_GTEST_LIBGTEST)
+
TARGETS += $(BUILD_GTEST_LIBJVM)
################################################################################
diff --git a/make/hotspot/lib/CompileLibraries.gmk b/make/hotspot/lib/CompileLibraries.gmk
index 9f68d7033ad..6b1e6529cd0 100644
--- a/make/hotspot/lib/CompileLibraries.gmk
+++ b/make/hotspot/lib/CompileLibraries.gmk
@@ -33,7 +33,7 @@ include HotspotCommon.gmk
include lib/CompileJvm.gmk
-ifeq ($(BUILD_GTEST), true)
+ifneq ($(GTEST_FRAMEWORK_SRC), )
include lib/CompileGtest.gmk
endif
diff --git a/make/modules/java.base/lib/CoreLibraries.gmk b/make/modules/java.base/lib/CoreLibraries.gmk
index e5c54212347..28c7357adc5 100644
--- a/make/modules/java.base/lib/CoreLibraries.gmk
+++ b/make/modules/java.base/lib/CoreLibraries.gmk
@@ -168,7 +168,7 @@ TARGETS += $(BUILD_LIBJIMAGE)
##########################################################################################
ifeq ($(call isTargetOs, macosx), true)
- LIBJLI_EXCLUDE_FILES += java_md_solinux.c
+ LIBJLI_EXCLUDE_FILES += java_md.c
endif
ifeq ($(call isTargetOs, windows), true)
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index eedc825c7ca..466ecaec0f9 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -1361,17 +1361,12 @@ source %{
// traverse when searching from a card mark membar for the merge mem
// feeding a trailing membar or vice versa
-// predicates controlling emit of ldr/ldar and associated dmb
+// predicates controlling emit of ldr/ldar
bool unnecessary_acquire(const Node *barrier)
{
assert(barrier->is_MemBar(), "expecting a membar");
- if (UseBarriersForVolatile) {
- // we need to plant a dmb
- return false;
- }
-
MemBarNode* mb = barrier->as_MemBar();
if (mb->trailing_load()) {
@@ -1390,26 +1385,15 @@ bool unnecessary_acquire(const Node *barrier)
bool needs_acquiring_load(const Node *n)
{
assert(n->is_Load(), "expecting a load");
- if (UseBarriersForVolatile) {
- // we use a normal load and a dmb
- return false;
- }
-
LoadNode *ld = n->as_Load();
-
return ld->is_acquire();
}
bool unnecessary_release(const Node *n)
{
assert((n->is_MemBar() &&
- n->Opcode() == Op_MemBarRelease),
- "expecting a release membar");
-
- if (UseBarriersForVolatile) {
- // we need to plant a dmb
- return false;
- }
+ n->Opcode() == Op_MemBarRelease),
+ "expecting a release membar");
MemBarNode *barrier = n->as_MemBar();
if (!barrier->leading()) {
@@ -1437,11 +1421,6 @@ bool unnecessary_release(const Node *n)
bool unnecessary_volatile(const Node *n)
{
// assert n->is_MemBar();
- if (UseBarriersForVolatile) {
- // we need to plant a dmb
- return false;
- }
-
MemBarNode *mbvol = n->as_MemBar();
bool release = mbvol->trailing_store();
@@ -1458,18 +1437,12 @@ bool unnecessary_volatile(const Node *n)
return release;
}
-// predicates controlling emit of str/stlr and associated dmbs
+// predicates controlling emit of str/stlr
bool needs_releasing_store(const Node *n)
{
// assert n->is_Store();
- if (UseBarriersForVolatile) {
- // we use a normal store and dmb combination
- return false;
- }
-
StoreNode *st = n->as_Store();
-
return st->trailing_membar() != NULL;
}
@@ -1480,10 +1453,6 @@ bool needs_releasing_store(const Node *n)
bool needs_acquiring_load_exclusive(const Node *n)
{
assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
- if (UseBarriersForVolatile) {
- return false;
- }
-
LoadStoreNode* ldst = n->as_LoadStore();
if (is_CAS(n->Opcode(), false)) {
assert(ldst->trailing_membar() != NULL, "expected trailing membar");
diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
index 312002e851a..43598039050 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -1411,9 +1411,8 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
// membar it's possible for a simple Dekker test to fail if loads
// use LD;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and C1 compiles the loads in another.
- if (! UseBarriersForVolatile) {
+ if (!is_c1_or_interpreter_only()) {
__ membar();
}
-
__ volatile_load_mem_reg(address, result, info);
}
diff --git a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
index 9bdaf586505..7a37c7caf10 100644
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp
@@ -683,23 +683,11 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
address start = __ pc();
- Label work, done;
+ Label slow_path;
__ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
__ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
__ ldrb(rscratch2, Address(rscratch2, rscratch1));
- __ tbnz(rscratch2, 0, work);
- __ ret(lr);
- __ bind(work);
-
- Label slow_path;
- __ ldr(rscratch1, Address(r0, oopDesc::mark_offset_in_bytes()));
- __ eon(rscratch1, rscratch1, zr);
- __ ands(zr, rscratch1, markWord::lock_mask_in_place);
- __ br(Assembler::NE, slow_path);
-
- // Decode forwarded object.
- __ orr(rscratch1, rscratch1, markWord::marked_value);
- __ eon(r0, rscratch1, zr);
+ __ tbnz(rscratch2, 0, slow_path);
__ ret(lr);
__ bind(slow_path);
@@ -718,7 +706,6 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator
__ mov(r0, rscratch1);
__ leave(); // required for proper stackwalking of RuntimeStub frame
- __ bind(done);
__ ret(lr);
return start;
diff --git a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
index 9a1c547b3b5..698a3c200be 100644
--- a/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
@@ -56,8 +56,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORT_RESERVED_STACK_AREA
-#define PREFERRED_METASPACE_ALIGNMENT
-
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true
#endif // CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP
diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
index f35a42cae2a..d34327a9d82 100644
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
@@ -85,9 +85,6 @@ define_pd_global(intx, InlineSmallCode, 1000);
\
product(bool, NearCpool, true, \
"constant pool is close to instructions") \
- \
- product(bool, UseBarriersForVolatile, false, \
- "Use memory barriers to implement volatile accesses") \
product(bool, UseNeon, false, \
"Use Neon for CRC32 computation") \
product(bool, UseCRC32, false, \
diff --git a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp
index c31c3c2dd49..edbc3f93587 100644
--- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -80,23 +80,12 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
__ ldrw(rcounter, safepoint_counter_addr);
__ tbnz(rcounter, 0, slow);
- if (!UseBarriersForVolatile) {
- // Field may be volatile. See other usages of this flag.
- __ membar(MacroAssembler::AnyAny);
- __ mov(robj, c_rarg1);
- } else if (JvmtiExport::can_post_field_access()) {
+ // It doesn't need to issue a full barrier here even if the field
+ // is volatile, since it has already used "ldar" for it.
+ if (JvmtiExport::can_post_field_access()) {
// Using barrier to order wrt. JVMTI check and load of result.
__ membar(Assembler::LoadLoad);
- __ mov(robj, c_rarg1);
- } else {
- // Using address dependency to order wrt. load of result.
- __ eor(robj, c_rarg1, rcounter);
- __ eor(robj, robj, rcounter); // obj, since
- // robj ^ rcounter ^ rcounter == robj
- // robj is address dependent on rcounter.
- }
- if (JvmtiExport::can_post_field_access()) {
// Check to see if a field access watch has been set before we
// take the fast path.
unsigned long offset2;
@@ -105,6 +94,14 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
offset2);
__ ldrw(result, Address(result, offset2));
__ cbnzw(result, slow);
+
+ __ mov(robj, c_rarg1);
+ } else {
+ // Using address dependency to order wrt. load of result.
+ __ eor(robj, c_rarg1, rcounter);
+ __ eor(robj, robj, rcounter); // obj, since
+ // robj ^ rcounter ^ rcounter == robj
+ // robj is address dependent on rcounter.
}
// Both robj and rscratch1 are clobbered by try_resolve_jobject_in_native.
diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
index a8ba062f932..19564abbd01 100644
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -2487,7 +2487,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
- if (! UseBarriersForVolatile) {
+ if (!is_c1_or_interpreter_only()){
Label notVolatile;
__ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::AnyAny);
@@ -3083,7 +3083,7 @@ void TemplateTable::fast_accessfield(TosState state)
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
- if (! UseBarriersForVolatile) {
+ if (!is_c1_or_interpreter_only()) {
Label notVolatile;
__ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::AnyAny);
@@ -3145,7 +3145,7 @@ void TemplateTable::fast_xaccess(TosState state)
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
- if (! UseBarriersForVolatile) {
+ if (!is_c1_or_interpreter_only()) {
Label notVolatile;
__ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset())));
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index 515f8736ea5..7d477c04b27 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -220,7 +220,7 @@ void VM_Version::get_processor_features() {
// ThunderX
if (_cpu == CPU_CAVIUM && (_model == 0xA1)) {
- if (_variant == 0) _features |= CPU_DMB_ATOMICS;
+ guarantee(_variant != 0, "Pre-release hardware no longer supported.");
if (FLAG_IS_DEFAULT(AvoidUnalignedAccesses)) {
FLAG_SET_DEFAULT(AvoidUnalignedAccesses, true);
}
@@ -420,10 +420,6 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
}
- if (FLAG_IS_DEFAULT(UseBarriersForVolatile)) {
- UseBarriersForVolatile = (_features & CPU_DMB_ATOMICS) != 0;
- }
-
if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
FLAG_SET_DEFAULT(UsePopCountInstruction, true);
}
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
index a4cf9bc49d8..91bba66fa34 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -98,7 +98,6 @@ class VM_Version : public Abstract_VM_Version {
CPU_LSE = (1<<8),
CPU_STXR_PREFETCH= (1 << 29),
CPU_A53MAC = (1 << 30),
- CPU_DMB_ATOMICS = (1 << 31),
};
static int cpu_family() { return _cpu; }
diff --git a/src/hotspot/cpu/arm/methodHandles_arm.cpp b/src/hotspot/cpu/arm/methodHandles_arm.cpp
index b69efa7bbcc..027387577e5 100644
--- a/src/hotspot/cpu/arm/methodHandles_arm.cpp
+++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp
@@ -488,6 +488,7 @@ void trace_method_handle_stub(const char* adaptername,
if (last_sp != saved_sp && last_sp != NULL)
tty->print_cr("*** last_sp=" INTPTR_FORMAT, p2i(last_sp));
if (Verbose) {
+ ResourceMark rm;
tty->print(" reg dump: ");
int i;
for (i = 0; i < trace_mh_nregs; i++) {
@@ -497,44 +498,43 @@ void trace_method_handle_stub(const char* adaptername,
tty->print(" %s: " INTPTR_FORMAT, reg_name, p2i((void *)saved_regs[i]));
}
tty->cr();
- }
-
- if (Verbose) {
- // dump last frame (from JavaThread::print_frame_layout)
-
- // Note: code is robust but the dumped informationm may not be
- // 100% correct, particularly with respect to the dumped
- // "unextended_sp". Getting it right for all trace_method_handle
- // call paths is not worth the complexity/risk. The correct slot
- // will be identified by *Rsender_sp anyway in the dump.
- JavaThread* p = JavaThread::active();
- ResourceMark rm;
- PRESERVE_EXCEPTION_MARK;
- FrameValues values;
-
- intptr_t* dump_fp = (intptr_t *) saved_bp;
- address dump_pc = (address) saved_regs[trace_mh_nregs-2]; // LR (with LR,PC last in saved_regs)
- frame dump_frame((intptr_t *)entry_sp, dump_fp, dump_pc);
-
- dump_frame.describe(values, 1);
- // mark Rsender_sp if seems valid
- if (has_mh) {
- if ((saved_sp >= entry_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
- values.describe(-1, saved_sp, "*Rsender_sp");
+ {
+ // dump last frame (from JavaThread::print_frame_layout)
+
+ // Note: code is robust but the dumped informationm may not be
+ // 100% correct, particularly with respect to the dumped
+ // "unextended_sp". Getting it right for all trace_method_handle
+ // call paths is not worth the complexity/risk. The correct slot
+ // will be identified by *Rsender_sp anyway in the dump.
+ JavaThread* p = JavaThread::active();
+
+ PRESERVE_EXCEPTION_MARK;
+ FrameValues values;
+
+ intptr_t* dump_fp = (intptr_t *) saved_bp;
+ address dump_pc = (address) saved_regs[trace_mh_nregs-2]; // LR (with LR,PC last in saved_regs)
+ frame dump_frame((intptr_t *)entry_sp, dump_fp, dump_pc);
+
+ dump_frame.describe(values, 1);
+ // mark Rsender_sp if seems valid
+ if (has_mh) {
+ if ((saved_sp >= entry_sp - UNREASONABLE_STACK_MOVE) && (saved_sp < dump_fp)) {
+ values.describe(-1, saved_sp, "*Rsender_sp");
+ }
}
+
+ // Note: the unextended_sp may not be correct
+ tty->print_cr(" stack layout:");
+ values.print(p);
}
- // Note: the unextended_sp may not be correct
- tty->print_cr(" stack layout:");
- values.print(p);
- }
- if (Verbose) {
if (has_mh && oopDesc::is_oop(mh)) {
mh->print();
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
- if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
+ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) {
java_lang_invoke_MethodHandle::form(mh)->print();
+ }
}
}
}
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.hpp b/src/hotspot/cpu/ppc/assembler_ppc.hpp
index 58780fcb09c..18e517c039c 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.hpp
@@ -1933,6 +1933,7 @@ class Assembler : public AbstractAssembler {
inline void td( int tobits, Register a, Register b); // asserts UseSIGTRAP
inline void tw( int tobits, Register a, Register b); // asserts UseSIGTRAP
+ public:
static bool is_tdi(int x, int tobits, int ra, int si16) {
return (TDI_OPCODE == (x & TDI_OPCODE_MASK))
&& (tobits == inv_to_field(x))
@@ -1940,6 +1941,15 @@ class Assembler : public AbstractAssembler {
&& (si16 == inv_si_field(x));
}
+ static int tdi_get_si16(int x, int tobits, int ra) {
+ if (TDI_OPCODE == (x & TDI_OPCODE_MASK)
+ && (tobits == inv_to_field(x))
+ && (ra == -1/*any reg*/ || ra == inv_ra_field(x))) {
+ return inv_si_field(x);
+ }
+ return -1; // No valid tdi instruction.
+ }
+
static bool is_twi(int x, int tobits, int ra, int si16) {
return (TWI_OPCODE == (x & TWI_OPCODE_MASK))
&& (tobits == inv_to_field(x))
@@ -1967,7 +1977,6 @@ class Assembler : public AbstractAssembler {
&& (rb == -1/*any reg*/ || rb == inv_rb_field(x));
}
- public:
// PPC floating point instructions
// PPC 1, section 4.6.2 Floating-Point Load Instructions
inline void lfs( FloatRegister d, int si16, Register a);
diff --git a/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp b/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp
index 7de34093cfe..3c93cdde917 100644
--- a/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_FrameMap_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -42,7 +42,7 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool outgoing) {
// The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value
// so we must add it in here.
int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
- opr = LIR_OprFact::address(new LIR_Address(SP_opr, st_off + STACK_BIAS, type));
+ opr = LIR_OprFact::address(new LIR_Address(SP_opr, st_off, type));
} else if (r_1->is_Register()) {
Register reg = r_1->as_Register();
//if (outgoing) {
@@ -362,7 +362,7 @@ void FrameMap::initialize() {
Address FrameMap::make_new_address(ByteSize sp_offset) const {
- return Address(R1_SP, STACK_BIAS + in_bytes(sp_offset));
+ return Address(R1_SP, in_bytes(sp_offset));
}
@@ -394,5 +394,5 @@ bool FrameMap::validate_frame() {
}
java_index += type2size[opr->type()];
}
- return Assembler::is_simm16(max_offset + STACK_BIAS);
+ return Assembler::is_simm16(max_offset);
}
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index 1da4ce7d5ab..468a81dd7d9 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -1197,7 +1197,7 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
addr = frame_map()->address_for_double_slot(src->double_stack_ix());
}
- bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
+ bool unaligned = addr.disp() % 8 != 0;
load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
}
@@ -1209,7 +1209,7 @@ void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bo
} else if (dest->is_double_word()) {
addr = frame_map()->address_for_slot(dest->double_stack_ix());
}
- bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
+ bool unaligned = addr.disp() % 8 != 0;
store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
}
@@ -3086,7 +3086,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
} else {
__ cmpdi(CCR0, obj, 0);
__ bne(CCR0, Lupdate);
- __ stop("unexpect null obj", 0x9652);
+ __ stop("unexpect null obj");
#endif
}
@@ -3103,7 +3103,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
metadata2reg(exact_klass->constant_encoding(), R0);
__ cmpd(CCR0, klass, R0);
__ beq(CCR0, ok);
- __ stop("exact klass and actual klass differ", 0x8564);
+ __ stop("exact klass and actual klass differ");
__ bind(ok);
}
#endif
@@ -3170,7 +3170,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
__ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
__ beq(CCR0, ok); // First time here.
- __ stop("unexpected profiling mismatch", 0x7865);
+ __ stop("unexpected profiling mismatch");
__ bind(ok);
}
#endif
diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
index 9c15fbccfbd..e61bdcb5140 100644
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2019, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -309,7 +309,7 @@ void LIRGenerator::store_stack_parameter(LIR_Opr item, ByteSize offset_from_sp)
BasicType t = item->type();
LIR_Opr sp_opr = FrameMap::SP_opr;
if ((t == T_LONG || t == T_DOUBLE) &&
- ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) {
+ (in_bytes(offset_from_sp) % 8 != 0)) {
__ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
} else {
__ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 4fd6a35642f..fea4b9ca578 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -294,7 +294,7 @@ void C1_MacroAssembler::initialize_object(
} else {
cmpwi(CCR0, t1, con_size_in_bytes);
}
- asm_assert_eq("bad size in initialize_object", 0x753);
+ asm_assert_eq("bad size in initialize_object");
}
#endif
@@ -390,7 +390,7 @@ void C1_MacroAssembler::allocate_array(
#ifndef PRODUCT
void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
- verify_oop_addr((RegisterOrConstant)(stack_offset + STACK_BIAS), R1_SP, "broken oop in stack slot");
+ verify_oop_addr((RegisterOrConstant)stack_offset, R1_SP, "broken oop in stack slot");
}
void C1_MacroAssembler::verify_not_null_oop(Register r) {
diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
index 37f4566b34a..94b53b010aa 100644
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -199,7 +199,7 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers
Register r = as_Register(i);
if (FrameMap::reg_needs_save(r)) {
int sp_offset = cpu_reg_save_offsets[i];
- __ std(r, sp_offset + STACK_BIAS, R1_SP);
+ __ std(r, sp_offset, R1_SP);
}
}
@@ -207,7 +207,7 @@ static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers
for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
FloatRegister r = as_FloatRegister(i);
int sp_offset = fpu_reg_save_offsets[i];
- __ stfd(r, sp_offset + STACK_BIAS, R1_SP);
+ __ stfd(r, sp_offset, R1_SP);
}
}
@@ -220,7 +220,7 @@ static void restore_live_registers(StubAssembler* sasm, Register result1, Regist
Register r = as_Register(i);
if (FrameMap::reg_needs_save(r) && r != result1 && r != result2) {
int sp_offset = cpu_reg_save_offsets[i];
- __ ld(r, sp_offset + STACK_BIAS, R1_SP);
+ __ ld(r, sp_offset, R1_SP);
}
}
@@ -228,7 +228,7 @@ static void restore_live_registers(StubAssembler* sasm, Register result1, Regist
for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
FloatRegister r = as_FloatRegister(i);
int sp_offset = fpu_reg_save_offsets[i];
- __ lfd(r, sp_offset + STACK_BIAS, R1_SP);
+ __ lfd(r, sp_offset, R1_SP);
}
}
@@ -796,10 +796,10 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
// empty before writing to them.
__ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
__ cmpdi(CCR0, R0, 0);
- __ asm_assert_eq("exception oop already set", 0x963);
+ __ asm_assert_eq("exception oop already set");
__ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread);
__ cmpdi(CCR0, R0, 0);
- __ asm_assert_eq("exception pc already set", 0x962);
+ __ asm_assert_eq("exception pc already set");
#endif
// Save the exception and issuing pc in the thread.
diff --git a/src/hotspot/cpu/ppc/disassembler_ppc.cpp b/src/hotspot/cpu/ppc/disassembler_ppc.cpp
index eeae7eb19a5..9c6ea38acb8 100644
--- a/src/hotspot/cpu/ppc/disassembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/disassembler_ppc.cpp
@@ -164,6 +164,8 @@ void Disassembler::annotate(address here, outputStream* st) {
const uint pos = st->position();
const uint aligned_pos = ((pos+tabspacing-1)/tabspacing)*tabspacing;
+ int stop_type = -1;
+
if (MacroAssembler::is_bcxx(instruction)) {
st->print(",bo=0b");
print_instruction_bits(st, instruction, 6, 10);
@@ -180,9 +182,6 @@ void Disassembler::annotate(address here, outputStream* st) {
print_decoded_bh_bits(st, instruction, 20,
!(MacroAssembler::is_bctr(instruction) ||
MacroAssembler::is_bctrl(instruction)));
- } else if (MacroAssembler::is_trap_should_not_reach_here(instruction)) {
- st->fill_to(aligned_pos + tabspacing);
- st->print(";trap: should not reach here");
} else if (MacroAssembler::is_trap_null_check(instruction)) {
st->fill_to(aligned_pos + tabspacing);
st->print(";trap: null check");
@@ -192,8 +191,8 @@ void Disassembler::annotate(address here, outputStream* st) {
} else if (MacroAssembler::is_trap_ic_miss_check(instruction)) {
st->fill_to(aligned_pos + tabspacing);
st->print(";trap: ic miss check");
- } else if (MacroAssembler::is_trap_zombie_not_entrant(instruction)) {
+ } else if ((stop_type = MacroAssembler::tdi_get_si16(instruction, Assembler::traptoUnconditional, 0)) != -1) {
st->fill_to(aligned_pos + tabspacing);
- st->print(";trap: zombie");
+ st->print(";trap: stop type %d", stop_type);
}
}
diff --git a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
index 2b1994930c2..0cfde28890c 100644
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
@@ -152,7 +152,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
if (preloaded && not_null) {
#ifdef ASSERT
__ cmpdi(CCR0, pre_val, 0);
- __ asm_assert_ne("null oop not allowed (G1 pre)", 0x321); // Checked by caller.
+ __ asm_assert_ne("null oop not allowed (G1 pre)"); // Checked by caller.
#endif
} else {
__ cmpdi(CCR0, pre_val, 0);
@@ -223,7 +223,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
if (not_null) {
#ifdef ASSERT
__ cmpdi(CCR0, new_val, 0);
- __ asm_assert_ne("null oop not allowed (G1 post)", 0x322); // Checked by caller.
+ __ asm_assert_ne("null oop not allowed (G1 post)"); // Checked by caller.
#endif
} else {
__ cmpdi(CCR0, new_val, 0);
diff --git a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
index 7db26a318e9..6fa63a20acd 100644
--- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
+++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
@@ -58,10 +58,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define INCLUDE_RTM_OPT 0
#endif
-#if defined(AIX)
-#define PREFERRED_METASPACE_ALIGNMENT
-#endif
-
#define SUPPORT_RESERVED_STACK_AREA
// If UseSIGTRAP is active, we only use the poll bit and no polling page.
diff --git a/src/hotspot/cpu/ppc/globals_ppc.hpp b/src/hotspot/cpu/ppc/globals_ppc.hpp
index e9752737b10..66b5dacba66 100644
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp
@@ -147,9 +147,6 @@ define_pd_global(intx, InitArrayShortSize, 9*BytesPerLong);
"switch off all optimizations requiring SIGTRAP.") \
product(bool, TrapBasedICMissChecks, true, \
"Raise and handle SIGTRAP if inline cache miss detected.") \
- product(bool, TrapBasedNotEntrantChecks, true, \
- "Raise and handle SIGTRAP if calling not entrant or zombie" \
- " method.") \
product(bool, TraceTraps, false, "Trace all traps the signal handler" \
"handles.") \
\
diff --git a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
index 8a3e5ad981d..522565e00d9 100644
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp
@@ -490,7 +490,7 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
sldi(R0, R0, LogBytesPerHeapOop);
cmpd(CCR0, tmp, R0);
blt(CCR0, index_ok);
- stop("resolved reference index out of bounds", 0x09256);
+ stop("resolved reference index out of bounds");
bind(index_ok);
#endif
// Add in the index.
@@ -1143,7 +1143,7 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R
#ifdef ASSERT
ld(Rscratch1, _ijava_state_neg(top_frame_sp), Rscratch2); // Rscratch2 contains fp
cmpd(CCR0, R21_sender_SP, Rscratch1);
- asm_assert_eq("top_frame_sp incorrect", 0x951);
+ asm_assert_eq("top_frame_sp incorrect");
#endif
bctr();
@@ -2251,7 +2251,7 @@ void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool
subf(R0, R1_SP, scratch);
cmpdi(CCR0, R0, frame::abi_reg_args_size + frame::ijava_state_size);
bge(CCR0, Lok);
- stop("frame too small (restore istate)", 0x5432);
+ stop("frame too small (restore istate)");
bind(Lok);
}
#endif
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index 3a0d1e453ae..c63117e303e 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -902,7 +902,7 @@ void MacroAssembler::resize_frame(Register offset, Register tmp) {
#ifdef ASSERT
assert_different_registers(offset, tmp, R1_SP);
andi_(tmp, offset, frame::alignment_in_bytes-1);
- asm_assert_eq("resize_frame: unaligned", 0x204);
+ asm_assert_eq("resize_frame: unaligned");
#endif
// tmp <- *(SP)
@@ -941,7 +941,7 @@ void MacroAssembler::push_frame(Register bytes, Register tmp) {
#ifdef ASSERT
assert(bytes != R0, "r0 not allowed here");
andi_(R0, bytes, frame::alignment_in_bytes-1);
- asm_assert_eq("push_frame(Reg, Reg): unaligned", 0x203);
+ asm_assert_eq("push_frame(Reg, Reg): unaligned");
#endif
neg(tmp, bytes);
stdux(R1_SP, R1_SP, tmp);
@@ -2313,7 +2313,7 @@ void MacroAssembler::tlab_allocate(
Label L;
andi_(R0, new_top, MinObjAlignmentInBytesMask);
beq(CCR0, L);
- stop("updated TLAB free is not properly aligned", 0x934);
+ stop("updated TLAB free is not properly aligned");
bind(L);
}
#endif // ASSERT
@@ -2792,7 +2792,7 @@ void MacroAssembler::rtm_inflated_locking(ConditionRegister flag,
ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);
#ifdef ASSERT
andi_(R0, mark_word, markWord::monitor_value);
- asm_assert_ne("must be inflated", 0xa754); // Deflating only allowed at safepoint.
+ asm_assert_ne("must be inflated"); // Deflating only allowed at safepoint.
#endif
addi(owner_addr_Reg, mark_word, owner_offset);
}
@@ -2929,7 +2929,7 @@ void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register
// Invariant 1: _recursions should be 0.
//assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size");
asm_assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), temp,
- "monitor->_recursions should be 0", -1);
+ "monitor->_recursions should be 0");
# endif
#if INCLUDE_RTM_OPT
@@ -3058,7 +3058,7 @@ void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Ja
// Verify that last_Java_pc was zeroed on return to Java
asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()), R16_thread,
- "last_Java_pc not zeroed before leaving Java", 0x200);
+ "last_Java_pc not zeroed before leaving Java");
// When returning from calling out from Java mode the frame anchor's
// last_Java_pc will always be set to NULL. It is set here so that
@@ -3074,7 +3074,7 @@ void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Ja
void MacroAssembler::reset_last_Java_frame(void) {
asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
- R16_thread, "SP was not set, still zero", 0x202);
+ R16_thread, "SP was not set, still zero");
BLOCK_COMMENT("reset_last_Java_frame {");
li(R0, 0);
@@ -4327,7 +4327,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen,
bind(L_done);
} // multiply_to_len
-void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
+void MacroAssembler::asm_assert(bool check_equal, const char *msg) {
#ifdef ASSERT
Label ok;
if (check_equal) {
@@ -4335,13 +4335,13 @@ void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
} else {
bne(CCR0, ok);
}
- stop(msg, id);
+ stop(msg);
bind(ok);
#endif
}
void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_offset,
- Register mem_base, const char* msg, int id) {
+ Register mem_base, const char* msg) {
#ifdef ASSERT
switch (size) {
case 4:
@@ -4355,7 +4355,7 @@ void MacroAssembler::asm_assert_mems_zero(bool check_equal, int size, int mem_of
default:
ShouldNotReachHere();
}
- asm_assert(check_equal, msg, id);
+ asm_assert(check_equal, msg);
#endif // ASSERT
}
@@ -4430,32 +4430,20 @@ void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, con
restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
}
-const char* stop_types[] = {
- "stop",
- "untested",
- "unimplemented",
- "shouldnotreachhere"
-};
-
-static void stop_on_request(int tp, const char* msg) {
- tty->print("PPC assembly code requires stop: (%s) %s\n", stop_types[tp%/*stop_end*/4], msg);
- guarantee(false, "PPC assembly code requires stop: %s", msg);
-}
-
// Call a C-function that prints output.
-void MacroAssembler::stop(int type, const char* msg, int id) {
+void MacroAssembler::stop(int type, const char* msg) {
#ifndef PRODUCT
- block_comment(err_msg("stop: %s %s {", stop_types[type%stop_end], msg));
+ block_comment(err_msg("stop(type %d): %s {", type, msg));
#else
block_comment("stop {");
#endif
- // setup arguments
- load_const_optimized(R3_ARG1, type);
- load_const_optimized(R4_ARG2, (void *)msg, /*tmp=*/R0);
- call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), R3_ARG1, R4_ARG2);
- illtrap();
- emit_int32(id);
+ if (type != stop_shouldnotreachhere) {
+ // Use R0 to pass msg. "shouldnotreachhere" preserves R0.
+ load_const_optimized(R0, (void*)msg);
+ }
+ tdi_unchecked(traptoUnconditional, 0/*reg 0*/, type);
+
block_comment("} stop;");
}
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
index af598ed4c14..6ae44b22aa6 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
@@ -673,12 +673,6 @@ class MacroAssembler: public Assembler {
is_tdi(x, traptoGreaterThanUnsigned, -1/*any reg*/, 0);
}
- inline void trap_zombie_not_entrant();
- static bool is_trap_zombie_not_entrant(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
-
- inline void trap_should_not_reach_here();
- static bool is_trap_should_not_reach_here(int x) { return is_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
-
inline void trap_ic_miss_check(Register a, Register b);
static bool is_trap_ic_miss_check(int x) {
return is_td(x, traptoGreaterThanUnsigned | traptoLessThanUnsigned, -1/*any reg*/, -1/*any reg*/);
@@ -863,21 +857,21 @@ class MacroAssembler: public Assembler {
//
// assert on cr0
- void asm_assert(bool check_equal, const char* msg, int id);
- void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
- void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
+ void asm_assert(bool check_equal, const char* msg);
+ void asm_assert_eq(const char* msg) { asm_assert(true, msg); }
+ void asm_assert_ne(const char* msg) { asm_assert(false, msg); }
private:
void asm_assert_mems_zero(bool check_equal, int size, int mem_offset, Register mem_base,
- const char* msg, int id);
+ const char* msg);
public:
- void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg, int id) {
- asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg, id);
+ void asm_assert_mem8_is_zero(int mem_offset, Register mem_base, const char* msg) {
+ asm_assert_mems_zero(true, 8, mem_offset, mem_base, msg);
}
- void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg, int id) {
- asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg, id);
+ void asm_assert_mem8_isnot_zero(int mem_offset, Register mem_base, const char* msg) {
+ asm_assert_mems_zero(false, 8, mem_offset, mem_base, msg);
}
// Verify R16_thread contents.
@@ -903,22 +897,21 @@ class MacroAssembler: public Assembler {
#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
private:
+ void stop(int type, const char* msg);
+ public:
enum {
stop_stop = 0,
stop_untested = 1,
stop_unimplemented = 2,
- stop_shouldnotreachhere = 3,
- stop_end = 4
+ stop_shouldnotreachhere = 3
};
- void stop(int type, const char* msg, int id);
- public:
// Prints msg, dumps registers and stops execution.
- void stop (const char* msg = "", int id = 0) { stop(stop_stop, msg, id); }
- void untested (const char* msg = "", int id = 0) { stop(stop_untested, msg, id); }
- void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
- void should_not_reach_here() { stop(stop_shouldnotreachhere, "", -1); }
+ void stop (const char* msg = NULL) { stop(stop_stop, msg ); }
+ void untested (const char* msg = NULL) { stop(stop_untested, msg ); }
+ void unimplemented(const char* msg = NULL) { stop(stop_unimplemented, msg ); }
+ void should_not_reach_here() { stop(stop_shouldnotreachhere, NULL); }
void zap_from_to(Register low, int before, Register high, int after, Register val, Register addr) PRODUCT_RETURN;
};
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp
index 5bd502c617e..9a0f4afbcd5 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.inline.hpp
@@ -281,12 +281,6 @@ inline void MacroAssembler::trap_null_check(Register a, trap_to_bits cmp) {
assert(TrapBasedNullChecks, "sanity");
tdi(cmp, a/*reg a*/, 0);
}
-inline void MacroAssembler::trap_zombie_not_entrant() {
- tdi(traptoUnconditional, 0/*reg 0*/, 1);
-}
-inline void MacroAssembler::trap_should_not_reach_here() {
- tdi_unchecked(traptoUnconditional, 0/*reg 0*/, 2);
-}
inline void MacroAssembler::trap_ic_miss_check(Register a, Register b) {
td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
diff --git a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp
index b0fb1b73625..2cc37c53441 100644
--- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp
+++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp
@@ -483,6 +483,7 @@ void trace_method_handle_stub(const char* adaptername,
adaptername, mh_reg_name, p2i(mh), p2i(entry_sp));
if (Verbose) {
+ ResourceMark rm;
tty->print_cr("Registers:");
const int abi_offset = frame::abi_reg_args_size / 8;
for (int i = R3->encoding(); i <= R12->encoding(); i++) {
@@ -503,7 +504,6 @@ void trace_method_handle_stub(const char* adaptername,
JavaThread* p = JavaThread::active();
- ResourceMark rm;
PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
FrameValues values;
@@ -538,8 +538,9 @@ void trace_method_handle_stub(const char* adaptername,
if (has_mh && oopDesc::is_oop(mh)) {
mh->print();
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
- if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
+ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) {
java_lang_invoke_MethodHandle::form(mh)->print();
+ }
}
}
}
diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp
index 18efcc85d80..a34f0a8d79a 100644
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp
@@ -39,10 +39,9 @@
#include "c1/c1_Runtime1.hpp"
#endif
-// We use an illtrap for marking a method as not_entrant or zombie iff !UseSIGTRAP
+// We use an illtrap for marking a method as not_entrant or zombie
// Work around a C++ compiler bug which changes 'this'
bool NativeInstruction::is_sigill_zombie_not_entrant_at(address addr) {
- assert(!UseSIGTRAP, "precondition");
if (*(int*)addr != 0 /*illtrap*/) return false;
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
if (cb == NULL || !cb->is_nmethod()) return false;
@@ -335,13 +334,8 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
a->b(dest);
} else {
// The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub().
- if (TrapBasedNotEntrantChecks) {
- // We use a special trap for marking a method as not_entrant or zombie.
- a->trap_zombie_not_entrant();
- } else {
- // We use an illtrap for marking a method as not_entrant or zombie.
- a->illtrap();
- }
+ // We use an illtrap for marking a method as not_entrant or zombie.
+ a->illtrap();
}
ICache::ppc64_flush_icache_bytes(verified_entry, code_size);
}
diff --git a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp
index 37dd76780ce..02f7779cc5d 100644
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.hpp
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.hpp
@@ -60,17 +60,12 @@ class NativeInstruction {
return MacroAssembler::is_trap_null_check(long_at(0));
}
- // We use a special trap for marking a method as not_entrant or zombie
- // iff UseSIGTRAP.
- bool is_sigtrap_zombie_not_entrant() {
- assert(UseSIGTRAP, "precondition");
- return MacroAssembler::is_trap_zombie_not_entrant(long_at(0));
+ int get_stop_type() {
+ return MacroAssembler::tdi_get_si16(long_at(0), Assembler::traptoUnconditional, 0);
}
- // We use an illtrap for marking a method as not_entrant or zombie
- // iff !UseSIGTRAP.
+ // We use an illtrap for marking a method as not_entrant or zombie.
bool is_sigill_zombie_not_entrant() {
- assert(!UseSIGTRAP, "precondition");
// Work around a C++ compiler bug which changes 'this'.
return NativeInstruction::is_sigill_zombie_not_entrant_at(addr_at(0));
}
@@ -84,11 +79,6 @@ class NativeInstruction {
}
#endif
- // 'should not reach here'.
- bool is_sigtrap_should_not_reach_here() {
- return MacroAssembler::is_trap_should_not_reach_here(long_at(0));
- }
-
bool is_safepoint_poll() {
// Is the current instruction a POTENTIAL read access to the polling page?
// The current arguments of the instruction are not checked!
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index ff85a36f39d..c5034a48c84 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -15196,7 +15196,7 @@ instruct ShouldNotReachHere() %{
ins_encode %{
if (is_reachable()) {
// TODO: PPC port $archOpcode(ppc64Opcode_tdi);
- __ trap_should_not_reach_here();
+ __ should_not_reach_here();
}
%}
ins_pipe(pipe_class_default);
diff --git a/src/hotspot/cpu/ppc/runtime_ppc.cpp b/src/hotspot/cpu/ppc/runtime_ppc.cpp
index dc3891ebbfa..088acbed75e 100644
--- a/src/hotspot/cpu/ppc/runtime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/runtime_ppc.cpp
@@ -108,7 +108,7 @@ void OptoRuntime::generate_exception_blob() {
address calls_return_pc = __ last_calls_return_pc();
# ifdef ASSERT
__ cmpdi(CCR0, R3_RET, 0);
- __ asm_assert_ne("handle_exception_C must not return NULL", 0x601);
+ __ asm_assert_ne("handle_exception_C must not return NULL");
# endif
// Set an oopmap for the call site. This oopmap will only be used if we
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index 7424e622f21..0713d8326a2 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -2345,7 +2345,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
RegisterSaver::restore_argument_registers_and_pop_frame(masm, frame_size, total_c_args, out_regs, out_regs2);
__ asm_assert_mem8_is_zero(thread_(pending_exception),
- "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C", 0);
+ "no pending exception allowed on exit from SharedRuntime::complete_monitor_locking_C");
__ bind(locked);
}
@@ -2558,7 +2558,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), r_oop, r_box, R16_thread);
__ asm_assert_mem8_is_zero(thread_(pending_exception),
- "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C", 0);
+ "no pending exception allowed on exit from SharedRuntime::complete_monitor_unlocking_C");
restore_native_result(masm, ret_type, workspace_slot_offset);
@@ -2772,7 +2772,7 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
#ifdef ASSERT
// Make sure that there is at least one entry in the array.
__ cmpdi(CCR0, number_of_frames_reg, 0);
- __ asm_assert_ne("array_size must be > 0", 0x205);
+ __ asm_assert_ne("array_size must be > 0");
#endif
// Now push the new interpreter frames.
@@ -3084,7 +3084,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
#ifdef ASSERT
__ lwz(R22_tmp2, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), unroll_block_reg);
__ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
- __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap", 0);
+ __ asm_assert_eq("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
#endif
// Allocate new interpreter frame(s) and possibly a c2i adapter
diff --git a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
index b9bbd509434..5c94698dc57 100644
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp
@@ -547,7 +547,7 @@ class StubGenerator: public StubCodeGenerator {
address frame_complete_pc = __ pc();
if (restore_saved_exception_pc) {
- __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc", 74);
+ __ unimplemented("StubGenerator::throw_exception with restore_saved_exception_pc");
}
// Note that we always have a runtime stub frame on the top of
@@ -921,7 +921,7 @@ class StubGenerator: public StubCodeGenerator {
inline void assert_positive_int(Register count) {
#ifdef ASSERT
__ srdi_(R0, count, 31);
- __ asm_assert_eq("missing zero extend", 0xAFFE);
+ __ asm_assert_eq("missing zero extend");
#endif
}
@@ -2181,7 +2181,7 @@ class StubGenerator: public StubCodeGenerator {
// Overlaps if Src before dst and distance smaller than size.
// Branch to forward copy routine otherwise.
__ blt(CCR0, no_overlap);
- __ stop("overlap in checkcast_copy", 0x9543);
+ __ stop("overlap in checkcast_copy");
__ bind(no_overlap);
}
#endif
diff --git a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp
index 22bd173b582..dee03f6c6ef 100644
--- a/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/templateInterpreterGenerator_ppc.cpp
@@ -216,7 +216,7 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
__ bind(do_dontreachhere);
- __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
+ __ unimplemented("ShouldNotReachHere in slow_signature_handler");
__ bind(do_array);
@@ -834,12 +834,12 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
Label frame_not_shrunk;
__ cmpld(CCR0, R1_SP, R21_sender_SP);
__ ble(CCR0, frame_not_shrunk);
- __ stop("frame shrunk", 0x546);
+ __ stop("frame shrunk");
__ bind(frame_not_shrunk);
__ ld(Rscratch1, 0, R1_SP);
__ ld(R0, 0, R21_sender_SP);
__ cmpd(CCR0, R0, Rscratch1);
- __ asm_assert_eq("backlink", 0x547);
+ __ asm_assert_eq("backlink");
#endif // ASSERT
__ mr(R1_SP, R21_sender_SP);
__ bctr();
@@ -1726,7 +1726,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
Label Lok;
__ lwz(R0, in_bytes(Method::access_flags_offset()), R19_method);
__ andi_(R0, R0, JVM_ACC_SYNCHRONIZED);
- __ asm_assert_eq("method needs synchronization", 0x8521);
+ __ asm_assert_eq("method needs synchronization");
__ bind(Lok);
}
#endif // ASSERT
diff --git a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
index bb99cc0ce49..089034e4d38 100644
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp
@@ -2518,7 +2518,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
#ifdef ASSERT
__ bind(LFlagInvalid);
- __ stop("got invalid flag", 0x654);
+ __ stop("got invalid flag");
#endif
if (!is_static && rc == may_not_rewrite) {
@@ -2533,7 +2533,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
assert(branch_table[vtos] == 0, "can't compute twice");
branch_table[vtos] = __ pc(); // non-volatile_entry point
- __ stop("vtos unexpected", 0x655);
+ __ stop("vtos unexpected");
#endif
__ align(32, 28, 28); // Align load.
@@ -2847,7 +2847,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
#ifdef ASSERT
__ bind(LFlagInvalid);
- __ stop("got invalid flag", 0x656);
+ __ stop("got invalid flag");
// __ bind(Lvtos);
address pc_before_release = __ pc();
@@ -2855,7 +2855,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
assert(branch_table[vtos] == 0, "can't compute twice");
branch_table[vtos] = __ pc(); // non-volatile_entry point
- __ stop("vtos unexpected", 0x657);
+ __ stop("vtos unexpected");
#endif
__ align(32, 28, 28); // Align pop.
diff --git a/src/hotspot/cpu/ppc/vm_version_ppc.cpp b/src/hotspot/cpu/ppc/vm_version_ppc.cpp
index 7d21a99e921..a62255eb72f 100644
--- a/src/hotspot/cpu/ppc/vm_version_ppc.cpp
+++ b/src/hotspot/cpu/ppc/vm_version_ppc.cpp
@@ -102,9 +102,7 @@ void VM_Version::initialize() {
if (!UseSIGTRAP) {
MSG(TrapBasedICMissChecks);
- MSG(TrapBasedNotEntrantChecks);
MSG(TrapBasedNullChecks);
- FLAG_SET_ERGO(TrapBasedNotEntrantChecks, false);
FLAG_SET_ERGO(TrapBasedNullChecks, false);
FLAG_SET_ERGO(TrapBasedICMissChecks, false);
}
diff --git a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
index aa9fda17024..796a8b1bf78 100644
--- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
@@ -113,7 +113,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
Label L;
__ cmpdi(CCR0, R19_method, 0);
__ bne(CCR0, L);
- __ stop("Vtable entry is ZERO", 102);
+ __ stop("Vtable entry is ZERO");
__ bind(L);
}
#endif
@@ -199,7 +199,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
Label ok;
__ cmpd(CCR0, R19_method, 0);
__ bne(CCR0, ok);
- __ stop("method is null", 103);
+ __ stop("method is null");
__ bind(ok);
}
#endif
diff --git a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
index d5a99a73ff0..02d1d6a7973 100644
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp
@@ -977,7 +977,7 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator
StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
address start = __ pc();
- Label resolve_oop, slow_path;
+ Label slow_path;
// We use RDI, which also serves as argument register for slow call.
// RAX always holds the src object ptr, except after the slow call,
@@ -995,25 +995,7 @@ address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator
__ movptr(tmp2, (intptr_t) ShenandoahHeap::in_cset_fast_test_addr());
__ movbool(tmp2, Address(tmp2, tmp1, Address::times_1));
__ testbool(tmp2);
- __ jccb(Assembler::notZero, resolve_oop);
- __ pop(tmp2);
- __ pop(tmp1);
- __ ret(0);
-
- // Test if object is already resolved.
- __ bind(resolve_oop);
- __ movptr(tmp2, Address(rax, oopDesc::mark_offset_in_bytes()));
- // Test if both lowest bits are set. We trick it by negating the bits
- // then test for both bits clear.
- __ notptr(tmp2);
- __ testb(tmp2, markWord::marked_value);
__ jccb(Assembler::notZero, slow_path);
- // Clear both lower bits. It's still inverted, so set them, and then invert back.
- __ orptr(tmp2, markWord::marked_value);
- __ notptr(tmp2);
- // At this point, tmp2 contains the decoded forwarding pointer.
- __ mov(rax, tmp2);
-
__ pop(tmp2);
__ pop(tmp1);
__ ret(0);
diff --git a/src/hotspot/cpu/x86/methodHandles_x86.cpp b/src/hotspot/cpu/x86/methodHandles_x86.cpp
index 912e8411716..3a46b7091bd 100644
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp
@@ -502,6 +502,7 @@ void trace_method_handle_stub(const char* adaptername,
p2i(mh), p2i(entry_sp));
if (Verbose) {
+ ResourceMark rm;
tty->print_cr("Registers:");
const int saved_regs_count = RegisterImpl::number_of_registers;
for (int i = 0; i < saved_regs_count; i++) {
@@ -527,12 +528,11 @@ void trace_method_handle_stub(const char* adaptername,
tty->cr();
{
- // dumping last frame with frame::describe
+ // dumping last frame with frame::describe
JavaThread* p = JavaThread::active();
- ResourceMark rm;
- PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
+ PRESERVE_EXCEPTION_MARK; // may not be needed but safer and inexpensive here
FrameValues values;
// Note: We want to allow trace_method_handle from any call site.
@@ -581,8 +581,9 @@ void trace_method_handle_stub(const char* adaptername,
if (has_mh && oopDesc::is_oop(mh)) {
mh->print();
if (java_lang_invoke_MethodHandle::is_instance(mh)) {
- if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
+ if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) {
java_lang_invoke_MethodHandle::form(mh)->print();
+ }
}
}
}
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
index 17b6be9ca9f..51c115057a1 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
@@ -1033,8 +1033,8 @@ static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
- // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
- // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
+ // __ ld(FP, reg2offset(src.first()), L5);
+ // __ st(L5, SP, reg2offset(dst.first()));
__ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
__ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index 43fca09b210..05ee42ef061 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -365,11 +365,11 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
goto report_and_die;
}
+ int stop_type = -1;
// Handle signal from NativeJump::patch_verified_entry().
- if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
- (!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
+ if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
if (TraceTraps) {
- tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
+ tty->print_cr("trap: zombie_not_entrant");
}
stub = SharedRuntime::get_handle_wrong_method_stub();
goto run_stub;
@@ -436,6 +436,30 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
goto run_stub;
}
+ // stop on request
+ else if (sig == SIGTRAP && (stop_type = nativeInstruction_at(pc)->get_stop_type()) != -1) {
+ const char *msg = NULL,
+ *detail_msg = (const char*)(uc->uc_mcontext.jmp_context.gpr[0]);
+ switch (stop_type) {
+ case MacroAssembler::stop_stop : msg = "stop"; break;
+ case MacroAssembler::stop_untested : msg = "untested"; break;
+ case MacroAssembler::stop_unimplemented : msg = "unimplemented"; break;
+ case MacroAssembler::stop_shouldnotreachhere: msg = "shouldnotreachhere"; detail_msg = NULL; break;
+ default: msg = "unknown"; break;
+ }
+ if (detail_msg == NULL) {
+ detail_msg = "no details provided";
+ }
+
+ if (TraceTraps) {
+ tty->print_cr("trap: %s: %s (SIGTRAP, stop type %d)", msg, detail_msg, stop_type);
+ }
+
+ va_list detail_args;
+ VMError::report_and_die(t, ucVoid, NULL, 0, msg, detail_msg, detail_args);
+ va_end(detail_args);
+ }
+
else if (sig == SIGBUS) {
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index 7e4e56f6473..b84411ae942 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -401,11 +401,11 @@ JVM_handle_linux_signal(int sig,
}
CodeBlob *cb = NULL;
+ int stop_type = -1;
// Handle signal from NativeJump::patch_verified_entry().
- if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
- (!TrapBasedNotEntrantChecks && sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
+ if (sig == SIGILL && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
if (TraceTraps) {
- tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
+ tty->print_cr("trap: zombie_not_entrant");
}
stub = SharedRuntime::get_handle_wrong_method_stub();
}
@@ -465,6 +465,31 @@ JVM_handle_linux_signal(int sig,
stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
}
#endif
+
+ // stop on request
+ else if (sig == SIGTRAP && (stop_type = nativeInstruction_at(pc)->get_stop_type()) != -1) {
+ const char *msg = NULL,
+ *detail_msg = (const char*)(uc->uc_mcontext.regs->gpr[0]);
+ switch (stop_type) {
+ case MacroAssembler::stop_stop : msg = "stop"; break;
+ case MacroAssembler::stop_untested : msg = "untested"; break;
+ case MacroAssembler::stop_unimplemented : msg = "unimplemented"; break;
+ case MacroAssembler::stop_shouldnotreachhere: msg = "shouldnotreachhere"; detail_msg = NULL; break;
+ default: msg = "unknown"; break;
+ }
+ if (detail_msg == NULL) {
+ detail_msg = "no details provided";
+ }
+
+ if (TraceTraps) {
+ tty->print_cr("trap: %s: %s (SIGTRAP, stop type %d)", msg, detail_msg, stop_type);
+ }
+
+ va_list detail_args;
+ VMError::report_and_die(t, ucVoid, NULL, 0, msg, detail_msg, detail_args);
+ va_end(detail_args);
+ }
+
else if (sig == SIGBUS) {
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
diff --git a/src/hotspot/share/c1/c1_FrameMap.cpp b/src/hotspot/share/c1/c1_FrameMap.cpp
index 40bd7dbf3b6..1a517c9f549 100644
--- a/src/hotspot/share/c1/c1_FrameMap.cpp
+++ b/src/hotspot/share/c1/c1_FrameMap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,7 +82,7 @@ CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signa
if (opr->is_address()) {
LIR_Address* addr = opr->as_address_ptr();
assert(addr->disp() == (int)addr->disp(), "out of range value");
- out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
+ out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
}
i += type2size[t];
}
@@ -133,7 +133,7 @@ CallingConvention* FrameMap::c_calling_convention(const BasicTypeArray* signatur
args->append(opr);
if (opr->is_address()) {
LIR_Address* addr = opr->as_address_ptr();
- out_preserve = MAX2(out_preserve, (intptr_t)(addr->disp() - STACK_BIAS) / 4);
+ out_preserve = MAX2(out_preserve, (intptr_t)addr->disp() / 4);
}
i += type2size[t];
}
@@ -174,7 +174,7 @@ FrameMap::FrameMap(ciMethod* method, int monitors, int reserved_argument_area_si
LIR_Opr opr = _incoming_arguments->at(i);
if (opr->is_address()) {
LIR_Address* address = opr->as_address_ptr();
- _argument_locations->at_put(java_index, address->disp() - STACK_BIAS);
+ _argument_locations->at_put(java_index, address->disp());
_incoming_arguments->args()->at_put(i, LIR_OprFact::stack(java_index, as_BasicType(as_ValueType(address->type()))));
}
java_index += type2size[opr->type()];
diff --git a/src/hotspot/share/classfile/javaClasses.cpp b/src/hotspot/share/classfile/javaClasses.cpp
index ecc21ba03bf..ad7127dda0c 100644
--- a/src/hotspot/share/classfile/javaClasses.cpp
+++ b/src/hotspot/share/classfile/javaClasses.cpp
@@ -286,8 +286,7 @@ Handle java_lang_String::create_from_unicode(const jchar* unicode, int length, T
char* expected = UNICODE::as_utf8(unicode, length);
char* actual = as_utf8_string(h_obj());
if (strcmp(expected, actual) != 0) {
- tty->print_cr("Unicode conversion failure: %s --> %s", expected, actual);
- ShouldNotReachHere();
+ fatal("Unicode conversion failure: %s --> %s", expected, actual);
}
}
#endif
@@ -324,19 +323,16 @@ Handle java_lang_String::create_from_str(const char* utf8_str, TRAPS) {
}
#ifdef ASSERT
- // This check is too strict because the input string is not necessarily valid UTF8.
+ // This check is too strict when the input string is not a valid UTF8.
// For example, it may be created with arbitrary content via jni_NewStringUTF.
- /*
- {
+ if (UTF8::is_legal_utf8((const unsigned char*)utf8_str, (int)strlen(utf8_str), false)) {
ResourceMark rm;
const char* expected = utf8_str;
char* actual = as_utf8_string(h_obj());
if (strcmp(expected, actual) != 0) {
- tty->print_cr("String conversion failure: %s --> %s", expected, actual);
- ShouldNotReachHere();
+ fatal("String conversion failure: %s --> %s", expected, actual);
}
}
- */
#endif
return h_obj;
@@ -376,8 +372,7 @@ Handle java_lang_String::create_from_symbol(Symbol* symbol, TRAPS) {
const char* expected = symbol->as_utf8();
char* actual = as_utf8_string(h_obj());
if (strncmp(expected, actual, utf8_len) != 0) {
- tty->print_cr("Symbol conversion failure: %s --> %s", expected, actual);
- ShouldNotReachHere();
+ fatal("Symbol conversion failure: %s --> %s", expected, actual);
}
}
#endif
diff --git a/src/hotspot/share/classfile/systemDictionary.cpp b/src/hotspot/share/classfile/systemDictionary.cpp
index 6934593a6e1..6f17652e3f5 100644
--- a/src/hotspot/share/classfile/systemDictionary.cpp
+++ b/src/hotspot/share/classfile/systemDictionary.cpp
@@ -84,6 +84,7 @@
#include "services/diagnosticCommand.hpp"
#include "services/threadService.hpp"
#include "utilities/macros.hpp"
+#include "utilities/utf8.hpp"
#if INCLUDE_CDS
#include "classfile/systemDictionaryShared.hpp"
#endif
@@ -232,6 +233,27 @@ bool SystemDictionary::is_platform_class_loader(oop class_loader) {
// ----------------------------------------------------------------------------
// Resolving of classes
+Symbol* SystemDictionary::class_name_symbol(const char* name, Symbol* exception, TRAPS) {
+ if (name == NULL) {
+ THROW_MSG_0(exception, "No class name given");
+ }
+ if ((int)strlen(name) > Symbol::max_length()) {
+ // It's impossible to create this class; the name cannot fit
+ // into the constant pool.
+ Exceptions::fthrow(THREAD_AND_LOCATION, exception,
+ "Class name exceeds maximum length of %d: %s",
+ Symbol::max_length(),
+ name);
+ return NULL;
+ }
+ // Callers should ensure that the name is never an illegal UTF8 string.
+ assert(UTF8::is_legal_utf8((const unsigned char*)name, (int)strlen(name), false),
+ "Class name is not a valid utf8 string.");
+
+ // Make a new symbol for the class name.
+ return SymbolTable::new_symbol(name);
+}
+
// Forwards to resolve_or_null
Klass* SystemDictionary::resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS) {
diff --git a/src/hotspot/share/classfile/systemDictionary.hpp b/src/hotspot/share/classfile/systemDictionary.hpp
index 531434078b4..6abfc52e63d 100644
--- a/src/hotspot/share/classfile/systemDictionary.hpp
+++ b/src/hotspot/share/classfile/systemDictionary.hpp
@@ -668,6 +668,9 @@ class SystemDictionary : AllStatic {
return !m->is_public() && m->method_holder() == SystemDictionary::Object_klass();
}
+ // Return Symbol or throw exception if name given is can not be a valid Symbol.
+ static Symbol* class_name_symbol(const char* name, Symbol* exception, TRAPS);
+
protected:
// Setup link to hierarchy
static void add_to_hierarchy(InstanceKlass* k, TRAPS);
diff --git a/src/hotspot/share/compiler/compilerDefinitions.cpp b/src/hotspot/share/compiler/compilerDefinitions.cpp
index 33442dd9b12..011494af95b 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -517,3 +517,42 @@ void CompilerConfig::ergo_initialize() {
}
#endif // COMPILER2
}
+
+static CompLevel highest_compile_level() {
+ return TieredCompilation ? MIN2((CompLevel) TieredStopAtLevel, CompLevel_highest_tier) : CompLevel_highest_tier;
+}
+
+bool is_c1_or_interpreter_only() {
+ if (Arguments::is_interpreter_only()) {
+ return true;
+ }
+
+#if INCLUDE_AOT
+ if (UseAOT) {
+ return false;
+ }
+#endif
+
+ if (highest_compile_level() < CompLevel_full_optimization) {
+#if INCLUDE_JVMCI
+ if (TieredCompilation) {
+ return true;
+ }
+ // This happens on jvm variant with C2 disabled and JVMCI
+ // enabled.
+ return !UseJVMCICompiler;
+#else
+ return true;
+#endif
+ }
+
+#ifdef TIERED
+ // The quick-only compilation mode is c1 only. However,
+ // CompilationModeFlag only takes effect with TieredCompilation
+ // enabled.
+ if (TieredCompilation && CompilationModeFlag::quick_only()) {
+ return true;
+ }
+#endif
+ return false;
+}
diff --git a/src/hotspot/share/compiler/compilerDefinitions.hpp b/src/hotspot/share/compiler/compilerDefinitions.hpp
index d347d0e3c9c..d8cc1c1f21a 100644
--- a/src/hotspot/share/compiler/compilerDefinitions.hpp
+++ b/src/hotspot/share/compiler/compilerDefinitions.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -116,6 +116,8 @@ inline bool is_compile(int comp_level) {
return is_c1_compile(comp_level) || is_c2_compile(comp_level);
}
+bool is_c1_or_interpreter_only();
+
// States of Restricted Transactional Memory usage.
enum RTMState {
NoRTM = 0x2, // Don't use RTM
diff --git a/src/hotspot/share/gc/g1/g1Analytics.cpp b/src/hotspot/share/gc/g1/g1Analytics.cpp
index d57c70a9d52..0f1de8e1a29 100644
--- a/src/hotspot/share/gc/g1/g1Analytics.cpp
+++ b/src/hotspot/share/gc/g1/g1Analytics.cpp
@@ -94,8 +94,8 @@ G1Analytics::G1Analytics(const G1Predictions* predictor) :
_rs_length_seq(new TruncatedSeq(TruncatedSeqLength)),
_cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
_recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
- _recent_avg_pause_time_ratio(0.0),
- _last_pause_time_ratio(0.0) {
+ _long_term_pause_time_ratio(0.0),
+ _short_term_pause_time_ratio(0.0) {
// Seed sequences with initial values.
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
@@ -150,17 +150,16 @@ void G1Analytics::report_alloc_rate_ms(double alloc_rate) {
}
void G1Analytics::compute_pause_time_ratio(double interval_ms, double pause_time_ms) {
- _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms;
-
- // Clamp the result to [0.0 ... 1.0] to filter out nonsensical results due to bad input.
- _recent_avg_pause_time_ratio = clamp(_recent_avg_pause_time_ratio, 0.0, 1.0);
+ _long_term_pause_time_ratio = _recent_gc_times_ms->sum() / interval_ms;
+ // Filter out nonsensical results due to bad input.
+ _long_term_pause_time_ratio = clamp(_long_term_pause_time_ratio, 0.0, 1.0);
// Compute the ratio of just this last pause time to the entire time range stored
// in the vectors. Comparing this pause to the entire range, rather than only the
// most recent interval, has the effect of smoothing over a possible transient 'burst'
// of more frequent pauses that don't really reflect a change in heap occupancy.
// This reduces the likelihood of a needless heap expansion being triggered.
- _last_pause_time_ratio =
+ _short_term_pause_time_ratio =
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
}
diff --git a/src/hotspot/share/gc/g1/g1Analytics.hpp b/src/hotspot/share/gc/g1/g1Analytics.hpp
index 2acba9f9c5b..d2fe2ec1599 100644
--- a/src/hotspot/share/gc/g1/g1Analytics.hpp
+++ b/src/hotspot/share/gc/g1/g1Analytics.hpp
@@ -75,10 +75,10 @@ class G1Analytics: public CHeapObj {
// Statistics kept per GC stoppage, pause or full.
TruncatedSeq* _recent_prev_end_times_for_all_gcs_sec;
- // The ratio of gc time to elapsed time, computed over recent pauses,
- // and the ratio for just the last pause.
- double _recent_avg_pause_time_ratio;
- double _last_pause_time_ratio;
+ // Cached values for long and short term pause time ratios. See
+ // compute_pause_time_ratios() for how they are computed.
+ double _long_term_pause_time_ratio;
+ double _short_term_pause_time_ratio;
// Returns whether the sequence have enough samples to get a "good" prediction.
// The constant used is random but "small".
@@ -95,12 +95,12 @@ class G1Analytics: public CHeapObj {
return _prev_collection_pause_end_ms;
}
- double recent_avg_pause_time_ratio() const {
- return _recent_avg_pause_time_ratio;
+ double long_term_pause_time_ratio() const {
+ return _long_term_pause_time_ratio;
}
- double last_pause_time_ratio() const {
- return _last_pause_time_ratio;
+ double short_term_pause_time_ratio() const {
+ return _short_term_pause_time_ratio;
}
uint number_of_recorded_pause_times() const {
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 6dca5d875c9..7030b0c41af 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -47,6 +47,7 @@
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1HotCardCache.hpp"
+#include "gc/g1/g1InitLogger.hpp"
#include "gc/g1/g1MemoryPool.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1ParallelCleaning.hpp"
@@ -1824,6 +1825,8 @@ jint G1CollectedHeap::initialize() {
_collection_set.initialize(max_regions());
+ G1InitLogger::print();
+
return JNI_OK;
}
diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
index cba298ddbb9..b7b0240e76e 100644
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
@@ -50,44 +50,70 @@ void G1HeapSizingPolicy::clear_ratio_check_data() {
_pauses_since_start = 0;
}
+double G1HeapSizingPolicy::scale_with_heap(double pause_time_threshold) {
+ double threshold = pause_time_threshold;
+ // If the heap is at less than half its maximum size, scale the threshold down,
+ // to a limit of 1%. Thus the smaller the heap is, the more likely it is to expand,
+ // though the scaling code will likely keep the increase small.
+ if (_g1h->capacity() <= _g1h->max_capacity() / 2) {
+ threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2);
+ threshold = MAX2(threshold, 0.01);
+ }
+
+ return threshold;
+}
+
+static void log_expansion(double short_term_pause_time_ratio,
+ double long_term_pause_time_ratio,
+ double threshold,
+ double pause_time_ratio,
+ bool fully_expanded,
+ size_t resize_bytes) {
+
+ log_debug(gc, ergo, heap)("Heap expansion: "
+ "short term pause time ratio %1.2f%% long term pause time ratio %1.2f%% "
+ "threshold %1.2f%% pause time ratio %1.2f%% fully expanded %s "
+ "resize by " SIZE_FORMAT "B",
+ short_term_pause_time_ratio * 100.0,
+ long_term_pause_time_ratio * 100.0,
+ threshold * 100.0,
+ pause_time_ratio * 100.0,
+ BOOL_TO_STR(fully_expanded),
+ resize_bytes);
+}
+
size_t G1HeapSizingPolicy::expansion_amount() {
- assert(GCTimeRatio > 0,
- "we should have set it to a default value set_g1_gc_flags() "
- "if a user set it to 0");
+ assert(GCTimeRatio > 0, "must be");
+
+ double long_term_pause_time_ratio = _analytics->long_term_pause_time_ratio();
+ double short_term_pause_time_ratio = _analytics->short_term_pause_time_ratio();
+ const double pause_time_threshold = 1.0 / (1.0 + GCTimeRatio);
+ double threshold = scale_with_heap(pause_time_threshold);
- double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
- double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
size_t expand_bytes = 0;
if (_g1h->capacity() == _g1h->max_capacity()) {
- log_trace(gc, ergo, heap)("Can not expand (heap already fully expanded) "
- "recent GC overhead: %1.2f %% committed: " SIZE_FORMAT "B",
- recent_gc_overhead, _g1h->capacity());
-
+ log_expansion(short_term_pause_time_ratio, long_term_pause_time_ratio,
+ threshold, pause_time_threshold, true, 0);
clear_ratio_check_data();
return expand_bytes;
}
- const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio));
-
- double threshold = gc_overhead_percent;
-
- // If the heap is at less than half its maximum size, scale the threshold down,
- // to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
- // though the scaling code will likely keep the increase small.
- if (_g1h->capacity() <= _g1h->max_capacity() / 2) {
- threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2);
- threshold = MAX2(threshold, 1.0);
- }
-
// If the last GC time ratio is over the threshold, increment the count of
// times it has been exceeded, and add this ratio to the sum of exceeded
// ratios.
- if (last_gc_overhead > threshold) {
+ if (short_term_pause_time_ratio > threshold) {
_ratio_over_threshold_count++;
- _ratio_over_threshold_sum += last_gc_overhead;
+ _ratio_over_threshold_sum += short_term_pause_time_ratio;
}
+ log_trace(gc, ergo, heap)("Heap expansion triggers: pauses since start: %u "
+ "num prev pauses for heuristics: %u "
+ "ratio over threshold count: %u",
+ _pauses_since_start,
+ _num_prev_pauses_for_heuristics,
+ _ratio_over_threshold_count);
+
// Check if we've had enough GC time ratio checks that were over the
// threshold to trigger an expansion. We'll also expand if we've
// reached the end of the history buffer and the average of all entries
@@ -95,7 +121,7 @@ size_t G1HeapSizingPolicy::expansion_amount() {
// long enough to make the average exceed the threshold.
bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
- (filled_history_buffer && (recent_gc_overhead > threshold))) {
+ (filled_history_buffer && (long_term_pause_time_ratio > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t reserved_bytes = _g1h->max_capacity();
size_t committed_bytes = _g1h->capacity();
@@ -123,15 +149,15 @@ size_t G1HeapSizingPolicy::expansion_amount() {
} else {
double const MinScaleDownFactor = 0.2;
double const MaxScaleUpFactor = 2;
- double const StartScaleDownAt = gc_overhead_percent;
- double const StartScaleUpAt = gc_overhead_percent * 1.5;
- double const ScaleUpRange = gc_overhead_percent * 2.0;
+ double const StartScaleDownAt = pause_time_threshold;
+ double const StartScaleUpAt = pause_time_threshold * 1.5;
+ double const ScaleUpRange = pause_time_threshold * 2.0;
double ratio_delta;
if (filled_history_buffer) {
- ratio_delta = recent_gc_overhead - threshold;
+ ratio_delta = long_term_pause_time_ratio - threshold;
} else {
- ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
+ ratio_delta = (_ratio_over_threshold_sum / _ratio_over_threshold_count) - threshold;
}
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
@@ -144,16 +170,11 @@ size_t G1HeapSizingPolicy::expansion_amount() {
}
}
- log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
- "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
- recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
-
expand_bytes = static_cast(expand_bytes * scale_factor);
// Ensure the expansion size is at least the minimum growth amount
// and at most the remaining uncommitted byte size.
- expand_bytes = MAX2(expand_bytes, min_expand_bytes);
- expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
+ expand_bytes = clamp(expand_bytes, min_expand_bytes, uncommitted_bytes);
clear_ratio_check_data();
} else {
@@ -169,5 +190,8 @@ size_t G1HeapSizingPolicy::expansion_amount() {
}
}
+ log_expansion(short_term_pause_time_ratio, long_term_pause_time_ratio,
+ threshold, pause_time_threshold, false, expand_bytes);
+
return expand_bytes;
}
diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp
index 5ef237707d6..99c740ca0ef 100644
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp
@@ -45,14 +45,16 @@ class G1HeapSizingPolicy: public CHeapObj {
double _ratio_over_threshold_sum;
uint _pauses_since_start;
+ // Scale "full" gc pause time threshold with heap size as we want to resize more
+ // eagerly at small heap sizes.
+ double scale_with_heap(double pause_time_threshold);
-protected:
G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics);
public:
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
- virtual size_t expansion_amount();
+ size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();
diff --git a/src/hotspot/share/gc/g1/g1InitLogger.cpp b/src/hotspot/share/gc/g1/g1InitLogger.cpp
new file mode 100644
index 00000000000..d143c586247
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1InitLogger.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1InitLogger.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+void G1InitLogger::print_heap() {
+ log_info(gc, init)("Heap Region Size: " SIZE_FORMAT "M", G1HeapRegionSize / M);
+ GCInitLogger::print_heap();
+}
+
+void G1InitLogger::print_workers() {
+ GCInitLogger::print_workers();
+ if (G1ConcRefinementThreads > 0) {
+ log_info(gc, init)("Concurrent Refinement Workers: %u", G1ConcRefinementThreads);
+ }
+}
+
+void G1InitLogger::print_gc_specific() {
+ // Print a message about periodic GC configuration.
+ if (G1PeriodicGCInterval != 0) {
+ log_info(gc, init)("Periodic GC: Enabled");
+ log_info(gc, init)("Periodic GC Interval: " UINTX_FORMAT "ms", G1PeriodicGCInterval);
+ } else {
+ log_info(gc, init)("Periodic GC: Disabled");
+ }
+}
+
+void G1InitLogger::print() {
+ G1InitLogger init_log;
+ init_log.print_all();
+}
diff --git a/src/hotspot/share/gc/g1/g1InitLogger.hpp b/src/hotspot/share/gc/g1/g1InitLogger.hpp
new file mode 100644
index 00000000000..9c4057988d6
--- /dev/null
+++ b/src/hotspot/share/gc/g1/g1InitLogger.hpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1INITLOGGER_HPP
+#define SHARE_GC_G1_G1INITLOGGER_HPP
+
+#include "gc/shared/gcInitLogger.hpp"
+
+class G1InitLogger : public GCInitLogger {
+ protected:
+ virtual void print_heap();
+ virtual void print_workers();
+ virtual void print_gc_specific();
+ public:
+ static void print();
+};
+
+#endif //SHARE_GC_G1_G1INITLOGGER_HPP
diff --git a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp
index 929680151fb..c57f3d7ce07 100644
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp
@@ -101,13 +101,6 @@ void G1YoungRemSetSamplingThread::check_for_periodic_gc(){
void G1YoungRemSetSamplingThread::run_service() {
double vtime_start = os::elapsedVTime();
- // Print a message about periodic GC configuration.
- if (G1PeriodicGCInterval != 0) {
- log_info(gc)("Periodic GC enabled with interval " UINTX_FORMAT "ms", G1PeriodicGCInterval);
- } else {
- log_info(gc)("Periodic GC disabled");
- }
-
while (!should_terminate()) {
sample_young_list_rs_length();
diff --git a/src/hotspot/share/gc/g1/heapRegion.cpp b/src/hotspot/share/gc/g1/heapRegion.cpp
index 336dbb31c1b..9fcc6b77dbf 100644
--- a/src/hotspot/share/gc/g1/heapRegion.cpp
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp
@@ -89,7 +89,6 @@ void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
// The cast to int is safe, given that we've bounded region_size by
// MIN_REGION_SIZE and MAX_REGION_SIZE.
GrainBytes = region_size;
- log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M", GrainBytes / M);
guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize;
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
index 0ea793acb44..ac6901dfa7b 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp
@@ -37,6 +37,7 @@
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcWhen.hpp"
#include "gc/shared/genArguments.hpp"
+#include "gc/shared/gcInitLogger.hpp"
#include "gc/shared/locationPrinter.inline.hpp"
#include "gc/shared/scavengableNMethods.hpp"
#include "logging/log.hpp"
@@ -134,6 +135,8 @@ jint ParallelScavengeHeap::initialize() {
// Set up WorkGang
_workers.initialize_workers();
+ GCInitLogger::print();
+
return JNI_OK;
}
diff --git a/src/hotspot/share/gc/shared/gcInitLogger.cpp b/src/hotspot/share/gc/shared/gcInitLogger.cpp
new file mode 100644
index 00000000000..75e088ab86d
--- /dev/null
+++ b/src/hotspot/share/gc/shared/gcInitLogger.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/gcInitLogger.hpp"
+#include "logging/log.hpp"
+#include "oops/compressedOops.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/vm_version.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+void GCInitLogger::print_all() {
+ print_version();
+ print_cpu();
+ print_memory();
+ print_large_pages();
+ print_numa();
+ print_compressed_oops();
+ print_heap();
+ print_workers();
+ print_gc_specific();
+}
+
+void GCInitLogger::print() {
+ GCInitLogger init_log;
+ init_log.print_all();
+}
+
+void GCInitLogger::print_version() {
+ log_info(gc, init)("Version: %s (%s)",
+ VM_Version::vm_release(),
+ VM_Version::jdk_debug_level());
+}
+
+void GCInitLogger::print_cpu() {
+ log_info(gc, init)("CPUs: %u total, %u available",
+ os::processor_count(),
+ os::initial_active_processor_count());
+}
+
+void GCInitLogger::print_memory() {
+ julong memory = os::physical_memory();
+ log_info(gc, init)("Memory: " JULONG_FORMAT "%s",
+ byte_size_in_proper_unit(memory), proper_unit_for_byte_size(memory));
+}
+
+void GCInitLogger::print_large_pages() {
+ log_info(gc, init)("Large Page Support: %s", large_pages_support());
+}
+
+void GCInitLogger::print_numa() {
+ if (UseNUMA) {
+ log_info(gc, init)("NUMA Support: Enabled");
+ log_info(gc, init)("NUMA Nodes: " SIZE_FORMAT, os::numa_get_groups_num());
+ } else {
+ log_info(gc, init)("NUMA Support: Disabled");
+ }
+}
+
+void GCInitLogger::print_compressed_oops() {
+ if (UseCompressedOops) {
+ log_info(gc, init)("Compressed Oops: Enabled (%s)",
+ CompressedOops::mode_to_string(CompressedOops::mode()));
+ } else {
+ log_info(gc, init)("Compressed Oops: Disabled");
+ }
+}
+
+void GCInitLogger::print_heap() {
+ log_info(gc, init)("Heap Min Capacity: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(MinHeapSize), exact_unit_for_byte_size(MinHeapSize));
+ log_info(gc, init)("Heap Initial Capacity: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(InitialHeapSize), exact_unit_for_byte_size(InitialHeapSize));
+ log_info(gc, init)("Heap Max Capacity: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(MaxHeapSize), exact_unit_for_byte_size(MaxHeapSize));
+
+ log_info(gc, init)("Pre-touch: %s", AlwaysPreTouch ? "Enabled" : "Disabled");
+}
+
+void GCInitLogger::print_workers() {
+ if (ParallelGCThreads > 0) {
+ log_info(gc, init)("Parallel Workers: %u", ParallelGCThreads);
+ }
+ if (ConcGCThreads > 0) {
+ log_info(gc, init)("Concurrent Workers: %u", ConcGCThreads);
+ }
+}
+
+void GCInitLogger::print_gc_specific() {
+ // To allow additional gc specific logging.
+}
+
+const char* GCInitLogger::large_pages_support() {
+ if (UseLargePages) {
+#ifdef LINUX
+ if (UseTransparentHugePages) {
+ return "Enabled (Transparent)";
+ } else {
+ return "Enabled (Explicit)";
+ }
+#else
+ return "Enabled";
+#endif
+ } else {
+ return "Disabled";
+ }
+}
diff --git a/src/hotspot/share/gc/shared/gcInitLogger.hpp b/src/hotspot/share/gc/shared/gcInitLogger.hpp
new file mode 100644
index 00000000000..801887b9423
--- /dev/null
+++ b/src/hotspot/share/gc/shared/gcInitLogger.hpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_GCINITLOGGER_HPP
+#define SHARE_GC_SHARED_GCINITLOGGER_HPP
+
+#include "memory/allocation.hpp"
+
+class GCInitLogger : public StackObj {
+ protected:
+ const char* large_pages_support();
+ virtual void print_version();
+ virtual void print_cpu();
+ virtual void print_memory();
+ virtual void print_large_pages();
+ virtual void print_numa();
+ virtual void print_compressed_oops();
+ virtual void print_heap();
+ virtual void print_workers();
+ virtual void print_gc_specific();
+ public:
+ void print_all();
+ static void print();
+};
+
+#endif //SHARE_GC_SHARED_GCINITLOGGER_HPP
diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
index ed3ac6286ae..d5def394e42 100644
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp
@@ -47,6 +47,7 @@
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/generationSpec.hpp"
+#include "gc/shared/gcInitLogger.hpp"
#include "gc/shared/locationPrinter.inline.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/scavengableNMethods.hpp"
@@ -130,6 +131,8 @@ jint GenCollectedHeap::initialize() {
_old_gen = _old_gen_spec->init(old_rs, rem_set());
clear_incremental_collection_failed();
+ GCInitLogger::print();
+
return JNI_OK;
}
diff --git a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
index edc78a11999..23d2b4e3db4 100644
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp
@@ -924,9 +924,16 @@ void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl,
Node* raw_val = new CastP2XNode(old_ctrl, val);
Node* cset_idx = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
- Node* cset_addr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
- Node* cset_load_addr = new AddPNode(phase->C->top(), cset_addr, cset_idx);
- Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_addr,
+
+ // Figure out the target cset address with raw pointer math.
+ // This avoids matching AddP+LoadB that would emit inefficient code.
+ // See JDK-8245465.
+ Node* cset_addr_ptr = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
+ Node* cset_addr = new CastP2XNode(old_ctrl, cset_addr_ptr);
+ Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
+ Node* cset_load_ptr = new CastX2PNode(cset_load_addr);
+
+ Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
TypeInt::BYTE, MemNode::unordered);
Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT));
@@ -941,11 +948,13 @@ void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl,
phase->register_control(ctrl, loop, cset_iff);
phase->register_control(not_cset_ctrl, loop, cset_iff);
- phase->set_ctrl(cset_addr, phase->C->root());
+ phase->set_ctrl(cset_addr_ptr, phase->C->root());
phase->register_new_node(raw_val, old_ctrl);
phase->register_new_node(cset_idx, old_ctrl);
+ phase->register_new_node(cset_addr, old_ctrl);
phase->register_new_node(cset_load_addr, old_ctrl);
+ phase->register_new_node(cset_load_ptr, old_ctrl);
phase->register_new_node(cset_load, old_ctrl);
phase->register_new_node(cset_cmp, old_ctrl);
phase->register_new_node(cset_bool, old_ctrl);
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
index 59531b23e9b..87e5b53a163 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
@@ -161,15 +161,3 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() const {
return ShenandoahHeuristics::should_start_gc();
}
-
-const char* ShenandoahAdaptiveHeuristics::name() {
- return "adaptive";
-}
-
-bool ShenandoahAdaptiveHeuristics::is_diagnostic() {
- return false;
-}
-
-bool ShenandoahAdaptiveHeuristics::is_experimental() {
- return false;
-}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
index 1a05275c966..7f832559424 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
@@ -43,11 +43,9 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
virtual bool should_start_gc() const;
- virtual const char* name();
-
- virtual bool is_diagnostic();
-
- virtual bool is_experimental();
+ virtual const char* name() { return "Adaptive"; }
+ virtual bool is_diagnostic() { return false; }
+ virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
index ba44b86734d..72eba58baa3 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp
@@ -73,15 +73,3 @@ bool ShenandoahAggressiveHeuristics::should_unload_classes() {
// Randomly unload classes with 50% chance.
return (os::random() & 1) == 1;
}
-
-const char* ShenandoahAggressiveHeuristics::name() {
- return "aggressive";
-}
-
-bool ShenandoahAggressiveHeuristics::is_diagnostic() {
- return true;
-}
-
-bool ShenandoahAggressiveHeuristics::is_experimental() {
- return false;
-}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp
index 6239aa1c3f3..071b3be8ff6 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp
@@ -41,11 +41,9 @@ class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
virtual bool should_unload_classes();
- virtual const char* name();
-
- virtual bool is_diagnostic();
-
- virtual bool is_experimental();
+ virtual const char* name() { return "Aggressive"; }
+ virtual bool is_diagnostic() { return true; }
+ virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHAGGRESSIVEHEURISTICS_HPP
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
index a2292cf78df..d5f49becfbe 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
@@ -93,15 +93,3 @@ void ShenandoahCompactHeuristics::choose_collection_set_from_regiondata(Shenando
}
}
}
-
-const char* ShenandoahCompactHeuristics::name() {
- return "compact";
-}
-
-bool ShenandoahCompactHeuristics::is_diagnostic() {
- return false;
-}
-
-bool ShenandoahCompactHeuristics::is_experimental() {
- return false;
-}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp
index a32638ff11a..43c35f115d1 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp
@@ -37,11 +37,9 @@ class ShenandoahCompactHeuristics : public ShenandoahHeuristics {
RegionData* data, size_t size,
size_t actual_free);
- virtual const char* name();
-
- virtual bool is_diagnostic();
-
- virtual bool is_experimental();
+ virtual const char* name() { return "Compact"; }
+ virtual bool is_diagnostic() { return false; }
+ virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHCOMPACTHEURISTICS_HPP
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
index 8cd21b08b3e..e23614b2088 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.cpp
@@ -78,15 +78,3 @@ void ShenandoahPassiveHeuristics::choose_collection_set_from_regiondata(Shenando
}
}
}
-
-const char* ShenandoahPassiveHeuristics::name() {
- return "passive";
-}
-
-bool ShenandoahPassiveHeuristics::is_diagnostic() {
- return true;
-}
-
-bool ShenandoahPassiveHeuristics::is_experimental() {
- return false;
-}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp
index 09512a7be43..752b0be4dcd 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp
@@ -41,11 +41,9 @@ class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
RegionData* data, size_t data_size,
size_t free);
- virtual const char* name();
-
- virtual bool is_diagnostic();
-
- virtual bool is_experimental();
+ virtual const char* name() { return "Passive"; }
+ virtual bool is_diagnostic() { return true; }
+ virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHPASSIVEHEURISTICS_HPP
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
index d35d7df8009..a386362d861 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp
@@ -67,15 +67,3 @@ void ShenandoahStaticHeuristics::choose_collection_set_from_regiondata(Shenandoa
}
}
}
-
-const char* ShenandoahStaticHeuristics::name() {
- return "static";
-}
-
-bool ShenandoahStaticHeuristics::is_diagnostic() {
- return false;
-}
-
-bool ShenandoahStaticHeuristics::is_experimental() {
- return false;
-}
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp
index 1e53d33b76e..949b2b769c8 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp
@@ -39,11 +39,9 @@ class ShenandoahStaticHeuristics : public ShenandoahHeuristics {
RegionData* data, size_t size,
size_t free);
- virtual const char* name();
-
- virtual bool is_diagnostic();
-
- virtual bool is_experimental();
+ virtual const char* name() { return "Static"; }
+ virtual bool is_diagnostic() { return false; }
+ virtual bool is_experimental() { return false; }
};
#endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHSTATICHEURISTICS_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
index 9f31cb59957..889af8d1308 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp
@@ -143,26 +143,11 @@ void ShenandoahArguments::initialize() {
#endif // ASSERT
#endif // COMPILER2
- if (AlwaysPreTouch) {
- // Shenandoah handles pre-touch on its own. It does not let the
- // generic storage code to do the pre-touch before Shenandoah has
- // a chance to do it on its own.
- FLAG_SET_DEFAULT(AlwaysPreTouch, false);
- FLAG_SET_DEFAULT(ShenandoahAlwaysPreTouch, true);
- }
-
// Record more information about previous cycles for improved debugging pleasure
if (FLAG_IS_DEFAULT(LogEventsBufferEntries)) {
FLAG_SET_DEFAULT(LogEventsBufferEntries, 250);
}
- if (ShenandoahAlwaysPreTouch) {
- if (!FLAG_IS_DEFAULT(ShenandoahUncommit)) {
- warning("AlwaysPreTouch is enabled, disabling ShenandoahUncommit");
- }
- FLAG_SET_DEFAULT(ShenandoahUncommit, false);
- }
-
if ((InitialHeapSize == MaxHeapSize) && ShenandoahUncommit) {
log_info(gc)("Min heap equals to max heap, disabling ShenandoahUncommit");
FLAG_SET_DEFAULT(ShenandoahUncommit, false);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
index 37fe9be79d5..385408d7fd5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
@@ -33,10 +33,10 @@
#include "services/memTracker.hpp"
#include "utilities/copy.hpp"
-ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, char* heap_base, size_t size) :
+ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base) :
_map_size(heap->num_regions()),
_region_size_bytes_shift(ShenandoahHeapRegion::region_size_bytes_shift()),
- _map_space(align_up(((uintx)heap_base + size) >> _region_size_bytes_shift, os::vm_allocation_granularity())),
+ _map_space(space),
_cset_map(_map_space.base() + ((uintx)heap_base >> _region_size_bytes_shift)),
_biased_cset_map(_map_space.base()),
_heap(heap),
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
index bfb924671f1..56e96522094 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
@@ -51,7 +51,7 @@ class ShenandoahCollectionSet : public CHeapObj {
shenandoah_padding(1);
public:
- ShenandoahCollectionSet(ShenandoahHeap* heap, char* heap_base, size_t size);
+ ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedSpace space, char* heap_base);
// Add region to collection set
void add_region(ShenandoahHeapRegion* r);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 7c8b86f5a5c..9812cd6d6f8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -46,6 +46,7 @@
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
#include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
+#include "gc/shenandoah/shenandoahInitLogger.hpp"
#include "gc/shenandoah/shenandoahMarkCompact.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
#include "gc/shenandoah/shenandoahMemoryPool.hpp"
@@ -110,7 +111,9 @@ class ShenandoahPretouchHeapTask : public AbstractGangTask {
virtual void work(uint worker_id) {
ShenandoahHeapRegion* r = _regions.next();
while (r != NULL) {
- os::pretouch_memory(r->bottom(), r->end(), _page_size);
+ if (r->is_committed()) {
+ os::pretouch_memory(r->bottom(), r->end(), _page_size);
+ }
r = _regions.next();
}
}
@@ -136,7 +139,9 @@ class ShenandoahPretouchBitmapTask : public AbstractGangTask {
size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
- os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
+ if (r->is_committed()) {
+ os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
+ }
r = _regions.next();
}
@@ -155,11 +160,6 @@ jint ShenandoahHeap::initialize() {
size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
- if (ShenandoahAlwaysPreTouch) {
- // Enabled pre-touch means the entire heap is committed right away.
- init_byte_size = max_byte_size;
- }
-
Universe::check_alignment(max_byte_size, reg_size_bytes, "Shenandoah heap");
Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
@@ -292,9 +292,35 @@ jint ShenandoahHeap::initialize() {
"Cannot commit region memory");
}
+ // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
+ // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
+ // If not successful, bite a bullet and allocate at whatever address.
+ {
+ size_t cset_align = MAX2(os::vm_page_size(), os::vm_allocation_granularity());
+ size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
+
+ uintptr_t min = round_up_power_of_2(cset_align);
+ uintptr_t max = (1u << 30u);
+
+ for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
+ char* req_addr = (char*)addr;
+ assert(is_aligned(req_addr, cset_align), "Should be aligned");
+ ReservedSpace cset_rs(cset_size, cset_align, false, req_addr);
+ if (cset_rs.is_reserved()) {
+ assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
+ _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
+ break;
+ }
+ }
+
+ if (_collection_set == NULL) {
+ ReservedSpace cset_rs(cset_size, cset_align, false);
+ _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
+ }
+ }
+
_regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
_free_set = new ShenandoahFreeSet(this, _num_regions);
- _collection_set = new ShenandoahCollectionSet(this, sh_rs.base(), sh_rs.size());
{
ShenandoahHeapLocker locker(lock());
@@ -318,38 +344,32 @@ jint ShenandoahHeap::initialize() {
_free_set->rebuild();
}
- if (ShenandoahAlwaysPreTouch) {
- assert(!AlwaysPreTouch, "Should have been overridden");
-
+ if (AlwaysPreTouch) {
// For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
// before initialize() below zeroes it with initializing thread. For any given region,
// we touch the region and the corresponding bitmaps from the same thread.
ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
- size_t pretouch_heap_page_size = heap_page_size;
- size_t pretouch_bitmap_page_size = bitmap_page_size;
+ _pretouch_heap_page_size = heap_page_size;
+ _pretouch_bitmap_page_size = bitmap_page_size;
#ifdef LINUX
// UseTransparentHugePages would madvise that backing memory can be coalesced into huge
// pages. But, the kernel needs to know that every small page is used, in order to coalesce
// them into huge one. Therefore, we need to pretouch with smaller pages.
if (UseTransparentHugePages) {
- pretouch_heap_page_size = (size_t)os::vm_page_size();
- pretouch_bitmap_page_size = (size_t)os::vm_page_size();
+ _pretouch_heap_page_size = (size_t)os::vm_page_size();
+ _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
}
#endif
// OS memory managers may want to coalesce back-to-back pages. Make their jobs
// simpler by pre-touching continuous spaces (heap and bitmap) separately.
- log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
- _num_regions, pretouch_bitmap_page_size);
- ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size);
+ ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
_workers->run_task(&bcl);
- log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
- _num_regions, pretouch_heap_page_size);
- ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size);
+ ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
_workers->run_task(&hcl);
}
@@ -385,13 +405,10 @@ jint ShenandoahHeap::initialize() {
_control_thread = new ShenandoahControlThread();
- log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
- byte_size_in_proper_unit(_initial_size), proper_unit_for_byte_size(_initial_size),
- byte_size_in_proper_unit(_minimum_size), proper_unit_for_byte_size(_minimum_size),
- byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
- );
+ _ref_proc_mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
+ _ref_proc_mt_discovery = _max_workers > 1;
- log_info(gc, init)("Safepointing mechanism: thread-local poll");
+ ShenandoahInitLogger::print();
return JNI_OK;
}
@@ -421,8 +438,6 @@ void ShenandoahHeap::initialize_heuristics() {
err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
_gc_mode->name()));
}
- log_info(gc, init)("Shenandoah GC mode: %s",
- _gc_mode->name());
_heuristics = _gc_mode->initialize_heuristics();
@@ -436,8 +451,6 @@ void ShenandoahHeap::initialize_heuristics() {
err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
_heuristics->name()));
}
- log_info(gc, init)("Shenandoah heuristics: %s",
- _heuristics->name());
}
#ifdef _MSC_VER
@@ -486,8 +499,6 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
{
_heap = this;
- log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
-
BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
_max_workers = MAX2(_max_workers, 1U);
@@ -1830,8 +1841,9 @@ class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
void ShenandoahHeap::op_weak_roots() {
if (is_concurrent_weak_root_in_progress()) {
+ // Concurrent weak root processing
{
- // Concurrent weak root processing
+ ShenandoahTimingsTracker t(ShenandoahPhaseTimings::conc_weak_roots_work);
ShenandoahGCWorkerPhase worker_phase(ShenandoahPhaseTimings::conc_weak_roots_work);
ShenandoahConcurrentWeakRootsEvacUpdateTask task(ShenandoahPhaseTimings::conc_weak_roots_work);
workers()->run_task(&task);
@@ -2132,23 +2144,16 @@ void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool in_progress) {
void ShenandoahHeap::ref_processing_init() {
assert(_max_workers > 0, "Sanity");
- bool mt_processing = ParallelRefProcEnabled && (ParallelGCThreads > 1);
- bool mt_discovery = _max_workers > 1;
-
_ref_processor =
new ReferenceProcessor(&_subject_to_discovery, // is_subject_to_discovery
- mt_processing, // MT processing
+ _ref_proc_mt_processing, // MT processing
_max_workers, // Degree of MT processing
- mt_discovery, // MT discovery
+ _ref_proc_mt_discovery, // MT discovery
_max_workers, // Degree of MT discovery
false, // Reference discovery is not atomic
NULL, // No closure, should be installed before use
true); // Scale worker threads
- log_info(gc, init)("Reference processing: %s discovery, %s processing",
- mt_discovery ? "parallel" : "serial",
- mt_processing ? "parallel" : "serial");
-
shenandoah_assert_rp_isalive_not_installed();
}
@@ -2660,9 +2665,16 @@ bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
size_t slice = r->index() / _bitmap_regions_per_slice;
size_t off = _bitmap_bytes_per_slice * slice;
size_t len = _bitmap_bytes_per_slice;
- if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
+ char* start = (char*) _bitmap_region.start() + off;
+
+ if (!os::commit_memory(start, len, false)) {
return false;
}
+
+ if (AlwaysPreTouch) {
+ os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
+ }
+
return true;
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index 723b7c817be..f1c4a911fd2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -457,6 +457,7 @@ class ShenandoahHeap : public CollectedHeap {
public:
ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; }
+ ShenandoahMode* mode() const { return _gc_mode; }
ShenandoahHeuristics* heuristics() const { return _heuristics; }
ShenandoahFreeSet* free_set() const { return _free_set; }
ShenandoahConcurrentMark* concurrent_mark() { return _scm; }
@@ -496,11 +497,15 @@ class ShenandoahHeap : public CollectedHeap {
AlwaysTrueClosure _subject_to_discovery;
ReferenceProcessor* _ref_processor;
ShenandoahSharedFlag _process_references;
+ bool _ref_proc_mt_discovery;
+ bool _ref_proc_mt_processing;
void ref_processing_init();
public:
ReferenceProcessor* ref_processor() { return _ref_processor; }
+ bool ref_processor_mt_discovery() { return _ref_proc_mt_discovery; }
+ bool ref_processor_mt_processing() { return _ref_proc_mt_processing; }
void set_process_references(bool pr);
bool process_references() const;
@@ -629,6 +634,9 @@ class ShenandoahHeap : public CollectedHeap {
size_t _bitmap_regions_per_slice;
size_t _bitmap_bytes_per_slice;
+ size_t _pretouch_heap_page_size;
+ size_t _pretouch_bitmap_page_size;
+
bool _bitmap_region_special;
bool _aux_bitmap_region_special;
@@ -665,6 +673,8 @@ class ShenandoahHeap : public CollectedHeap {
ShenandoahLiveData* get_liveness_cache(uint worker_id);
void flush_liveness_cache(uint worker_id);
+ size_t pretouch_heap_page_size() { return _pretouch_heap_page_size; }
+
// ---------- Evacuation support
//
private:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index 820e9923da8..d50353eb455 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -609,13 +609,6 @@ void ShenandoahHeapRegion::setup_sizes(size_t max_heap_size) {
guarantee(MaxTLABSizeBytes == 0, "we should only set it once");
MaxTLABSizeBytes = MaxTLABSizeWords * HeapWordSize;
assert (MaxTLABSizeBytes > MinTLABSize, "should be larger");
-
- log_info(gc, init)("Regions: " SIZE_FORMAT " x " SIZE_FORMAT "%s",
- RegionCount, byte_size_in_proper_unit(RegionSizeBytes), proper_unit_for_byte_size(RegionSizeBytes));
- log_info(gc, init)("Humongous object threshold: " SIZE_FORMAT "%s",
- byte_size_in_proper_unit(HumongousThresholdBytes), proper_unit_for_byte_size(HumongousThresholdBytes));
- log_info(gc, init)("Max TLAB size: " SIZE_FORMAT "%s",
- byte_size_in_proper_unit(MaxTLABSizeBytes), proper_unit_for_byte_size(MaxTLABSizeBytes));
}
void ShenandoahHeapRegion::do_commit() {
@@ -626,6 +619,9 @@ void ShenandoahHeapRegion::do_commit() {
if (!heap->commit_bitmap_slice(this)) {
report_java_out_of_memory("Unable to commit bitmaps for region");
}
+ if (AlwaysPreTouch) {
+ os::pretouch_memory(bottom(), end(), heap->pretouch_heap_page_size());
+ }
heap->increase_committed(ShenandoahHeapRegion::region_size_bytes());
}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp
new file mode 100644
index 00000000000..0811600b211
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2020, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahHeap.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahInitLogger.hpp"
+#include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
+#include "gc/shenandoah/mode/shenandoahMode.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+void ShenandoahInitLogger::print_heap() {
+ GCInitLogger::print_heap();
+
+ ShenandoahHeap* heap = ShenandoahHeap::heap();
+
+ log_info(gc, init)("Mode: %s",
+ heap->mode()->name());
+
+ log_info(gc, init)("Heuristics: %s",
+ heap->heuristics()->name());
+
+ log_info(gc, init)("Heap Region Count: " SIZE_FORMAT,
+ ShenandoahHeapRegion::region_count());
+
+ log_info(gc, init)("Heap Region Size: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(ShenandoahHeapRegion::region_size_bytes()),
+ exact_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
+
+ log_info(gc, init)("TLAB Size Max: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(ShenandoahHeapRegion::max_tlab_size_bytes()),
+ exact_unit_for_byte_size(ShenandoahHeapRegion::max_tlab_size_bytes()));
+
+ log_info(gc, init)("Humongous Object Threshold: " SIZE_FORMAT "%s",
+ byte_size_in_exact_unit(ShenandoahHeapRegion::humongous_threshold_bytes()),
+ exact_unit_for_byte_size(ShenandoahHeapRegion::humongous_threshold_bytes()));
+
+ log_info(gc, init)("Reference Processing: %s discovery, %s processing",
+ heap->ref_processor_mt_discovery() ? "Parallel" : "Serial",
+ heap->ref_processor_mt_processing() ? "Parallel" : "Serial");
+}
+
+void ShenandoahInitLogger::print() {
+ ShenandoahInitLogger init_log;
+ init_log.print_all();
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.hpp b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.hpp
new file mode 100644
index 00000000000..98c918c58f7
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.hpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2020, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHINITLOGGER_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHINITLOGGER_HPP
+
+#include "gc/shared/gcInitLogger.hpp"
+
+class ShenandoahInitLogger : public GCInitLogger {
+protected:
+ virtual void print_heap();
+
+public:
+ static void print();
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHINITLOGGER_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
index 5f99568e764..cb6c6261e95 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.cpp
@@ -37,6 +37,7 @@
#define SHENANDOAH_S_TIME_FORMAT "%8.3lf"
#define SHENANDOAH_US_TIME_FORMAT "%8.0lf"
#define SHENANDOAH_US_WORKER_TIME_FORMAT "%3.0lf"
+#define SHENANDOAH_PARALLELISM_FORMAT "%4.2lf"
#define SHENANDOAH_PHASE_DECLARE_NAME(type, title) \
title,
@@ -229,6 +230,14 @@ void ShenandoahPhaseTimings::print_cycle_on(outputStream* out) const {
double v = _cycle_data[i] * 1000000.0;
if (v > 0) {
out->print(SHENANDOAH_PHASE_NAME_FORMAT " " SHENANDOAH_US_TIME_FORMAT " us", _phase_names[i], v);
+
+ if (is_worker_phase(Phase(i))) {
+ double total = _cycle_data[i + 1] * 1000000.0;
+ if (total > 0) {
+ out->print(", parallelism: " SHENANDOAH_PARALLELISM_FORMAT "x", total / v);
+ }
+ }
+
if (_worker_data[i] != NULL) {
out->print(", workers (us): ");
for (uint c = 0; c < _max_workers; c++) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
index ff80988b7ab..bb5d8ff9708 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
@@ -41,17 +41,7 @@ class ShenandoahVerifierTask {
public:
ShenandoahVerifierTask(oop o = NULL, int idx = 0): _obj(o) { }
ShenandoahVerifierTask(oop o, size_t idx): _obj(o) { }
- ShenandoahVerifierTask(const ShenandoahVerifierTask& t): _obj(t._obj) { }
-
- ShenandoahVerifierTask& operator =(const ShenandoahVerifierTask& t) {
- _obj = t._obj;
- return *this;
- }
- volatile ShenandoahVerifierTask&
- operator =(const volatile ShenandoahVerifierTask& t) volatile {
- (void)const_cast(_obj = t._obj);
- return *this;
- }
+ // Trivially copyable.
inline oop obj() const { return _obj; }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
index 1bdc8c9f402..e69193570a9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
@@ -300,9 +300,6 @@
diagnostic(bool, ShenandoahAllocFailureALot, false, \
"Testing: make lots of artificial allocation failures.") \
\
- diagnostic(bool, ShenandoahAlwaysPreTouch, false, \
- "Pre-touch heap memory, overrides global AlwaysPreTouch.") \
- \
experimental(intx, ShenandoahMarkScanPrefetch, 32, \
"How many objects to prefetch ahead when traversing mark bitmaps."\
"Set to 0 to disable prefetching.") \
diff --git a/src/hotspot/share/gc/z/zOopClosures.inline.hpp b/src/hotspot/share/gc/z/zOopClosures.inline.hpp
index 28c74d34e54..da8f22ff9d8 100644
--- a/src/hotspot/share/gc/z/zOopClosures.inline.hpp
+++ b/src/hotspot/share/gc/z/zOopClosures.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -87,7 +87,7 @@ inline void ZPhantomKeepAliveOopClosure::do_oop(narrowOop* p) {
inline void ZPhantomCleanOopClosure::do_oop(oop* p) {
// Read the oop once, to make sure the liveness check
// and the later clearing uses the same value.
- const oop obj = *(volatile oop*)p;
+ const oop obj = Atomic::load(p);
if (ZBarrier::is_alive_barrier_on_phantom_oop(obj)) {
ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
} else {
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp
index 970a2cdbb2d..8161e172215 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.cpp
@@ -37,8 +37,7 @@
#include "oops/oop.inline.hpp"
#include "utilities/align.hpp"
- // max dfs depth should not exceed size of stack
-static const size_t max_dfs_depth = 4000;
+UnifiedOopRef DFSClosure::_reference_stack[max_dfs_depth];
void DFSClosure::find_leaks_from_edge(EdgeStore* edge_store,
BitSet* mark_bits,
@@ -72,11 +71,6 @@ void DFSClosure::find_leaks_from_root_set(EdgeStore* edge_store,
DFSClosure::DFSClosure(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge)
:_edge_store(edge_store), _mark_bits(mark_bits), _start_edge(start_edge),
_max_depth(max_dfs_depth), _depth(0), _ignore_root_set(false) {
- _reference_stack = NEW_C_HEAP_ARRAY(UnifiedOopRef, max_dfs_depth, mtTracing);
-}
-
-DFSClosure::~DFSClosure() {
- FREE_C_HEAP_ARRAY(UnifiedOopRef, _reference_stack);
}
void DFSClosure::closure_impl(UnifiedOopRef reference, const oop pointee) {
diff --git a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp
index 4c2be1095f7..ba88b8dcd78 100644
--- a/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp
+++ b/src/hotspot/share/jfr/leakprofiler/chains/dfsClosure.hpp
@@ -36,16 +36,18 @@ class EdgeQueue;
// Class responsible for iterating the heap depth-first
class DFSClosure : public BasicOopIterateClosure {
private:
+ // max dfs depth should not exceed size of stack
+ static const size_t max_dfs_depth = 4000;
+ static UnifiedOopRef _reference_stack[max_dfs_depth];
+
EdgeStore* _edge_store;
BitSet* _mark_bits;
const Edge*_start_edge;
size_t _max_depth;
size_t _depth;
bool _ignore_root_set;
- UnifiedOopRef* _reference_stack;
DFSClosure(EdgeStore* edge_store, BitSet* mark_bits, const Edge* start_edge);
- ~DFSClosure();
void add_chain();
void closure_impl(UnifiedOopRef reference, const oop pointee);
diff --git a/src/hotspot/share/jvmci/jvmciCompiler.cpp b/src/hotspot/share/jvmci/jvmciCompiler.cpp
index 73fc48bd05a..6d5dc16bd53 100644
--- a/src/hotspot/share/jvmci/jvmciCompiler.cpp
+++ b/src/hotspot/share/jvmci/jvmciCompiler.cpp
@@ -42,6 +42,7 @@ JVMCICompiler::JVMCICompiler() : AbstractCompiler(compiler_jvmci) {
// Initialization
void JVMCICompiler::initialize() {
+ assert(!is_c1_or_interpreter_only(), "JVMCI is launched, it's not c1/interpreter only mode");
if (!UseCompiler || !EnableJVMCI || !UseJVMCICompiler || !should_perform_init()) {
return;
}
diff --git a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
index f0927e58f75..91fe38c9efa 100644
--- a/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
+++ b/src/hotspot/share/jvmci/vmStructs_jvmci.cpp
@@ -381,7 +381,6 @@
#define VM_INT_CONSTANTS(declare_constant, declare_constant_with_value, declare_preprocessor_constant) \
declare_preprocessor_constant("ASSERT", DEBUG_ONLY(1) NOT_DEBUG(0)) \
declare_preprocessor_constant("FIELDINFO_TAG_SIZE", FIELDINFO_TAG_SIZE) \
- declare_preprocessor_constant("STACK_BIAS", STACK_BIAS) \
\
declare_constant(CompLevel_none) \
declare_constant(CompLevel_simple) \
@@ -726,8 +725,7 @@
declare_constant(VM_Version::CPU_CRC32) \
declare_constant(VM_Version::CPU_LSE) \
declare_constant(VM_Version::CPU_STXR_PREFETCH) \
- declare_constant(VM_Version::CPU_A53MAC) \
- declare_constant(VM_Version::CPU_DMB_ATOMICS)
+ declare_constant(VM_Version::CPU_A53MAC)
#endif
diff --git a/src/hotspot/share/memory/dynamicArchive.cpp b/src/hotspot/share/memory/dynamicArchive.cpp
index ed6143b7d14..c0a591acbcd 100644
--- a/src/hotspot/share/memory/dynamicArchive.cpp
+++ b/src/hotspot/share/memory/dynamicArchive.cpp
@@ -69,7 +69,7 @@ class DynamicArchiveBuilder : ResourceObj {
DumpRegion* _current_dump_space;
static size_t reserve_alignment() {
- return Metaspace::reserve_alignment();
+ return os::vm_allocation_granularity();
}
static const int _total_dump_regions = 3;
@@ -724,7 +724,7 @@ size_t DynamicArchiveBuilder::estimate_archive_size() {
address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() {
size_t total = estimate_archive_size();
- ReservedSpace rs = MetaspaceShared::reserve_shared_space(total);
+ ReservedSpace rs(total);
if (!rs.is_reserved()) {
log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total);
vm_direct_exit(0);
diff --git a/src/hotspot/share/memory/filemap.cpp b/src/hotspot/share/memory/filemap.cpp
index f2171e3dae7..9645f7e003a 100644
--- a/src/hotspot/share/memory/filemap.cpp
+++ b/src/hotspot/share/memory/filemap.cpp
@@ -1079,6 +1079,8 @@ bool FileMapInfo::open_for_read() {
os::strerror(errno));
}
return false;
+ } else {
+ log_info(cds)("Opened archive %s.", _full_path);
}
_fd = fd;
@@ -1981,11 +1983,13 @@ void FileMapInfo::unmap_region(int i) {
size_t used = si->used();
size_t size = align_up(used, os::vm_allocation_granularity());
- if (mapped_base != NULL && size > 0 && si->mapped_from_file()) {
- log_info(cds)("Unmapping region #%d at base " INTPTR_FORMAT " (%s)", i, p2i(mapped_base),
- shared_region_name[i]);
- if (!os::unmap_memory(mapped_base, size)) {
- fatal("os::unmap_memory failed");
+ if (mapped_base != NULL) {
+ if (size > 0 && si->mapped_from_file()) {
+ log_info(cds)("Unmapping region #%d at base " INTPTR_FORMAT " (%s)", i, p2i(mapped_base),
+ shared_region_name[i]);
+ if (!os::unmap_memory(mapped_base, size)) {
+ fatal("os::unmap_memory failed");
+ }
}
si->set_mapped_base(NULL);
}
diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp
index dca02183885..583f52d41f4 100644
--- a/src/hotspot/share/memory/metaspace.cpp
+++ b/src/hotspot/share/memory/metaspace.cpp
@@ -973,202 +973,96 @@ bool Metaspace::_initialized = false;
#define VIRTUALSPACEMULTIPLIER 2
#ifdef _LP64
-static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
-
-void Metaspace::set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, address cds_base) {
- assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
- // Figure out the narrow_klass_base and the narrow_klass_shift. The
- // narrow_klass_base is the lower of the metaspace base and the cds base
- // (if cds is enabled). The narrow_klass_shift depends on the distance
- // between the lower base and higher address.
- address lower_base = (address)metaspace_rs.base();
- address higher_address = (address)metaspace_rs.end();
- if (cds_base != NULL) {
- assert(UseSharedSpaces, "must be");
- lower_base = MIN2(lower_base, cds_base);
- } else {
- uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
- // If compressed class space fits in lower 32G, we don't need a base.
- if (higher_address <= (address)klass_encoding_max) {
- lower_base = 0; // Effectively lower base is zero.
- }
- }
-
- CompressedKlassPointers::set_base(lower_base);
-
- // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
- // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
- // how dump time narrow_klass_shift is set. Although, CDS can work
- // with zero-shift mode also, to be consistent with AOT it uses
- // LogKlassAlignmentInBytes for klass shift so archived java heap objects
- // can be used at same time as AOT code.
- if (!UseSharedSpaces
- && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
- CompressedKlassPointers::set_shift(0);
- } else {
- CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
- }
- AOTLoader::set_narrow_klass_shift();
-}
-
-// Try to allocate the metaspace at the requested addr.
-void Metaspace::allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base) {
- assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
- assert(using_class_space(), "called improperly");
- assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
- assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
- "Metaspace size is too big");
- assert_is_aligned(requested_addr, _reserve_alignment);
- assert_is_aligned(cds_base, _reserve_alignment);
- assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
-
- if (metaspace_rs.is_reserved()) {
- // CDS should have already reserved the space.
- assert(requested_addr == NULL, "not used");
- assert(cds_base != NULL, "CDS should have already reserved the memory space");
- } else {
- assert(cds_base == NULL, "must be");
- metaspace_rs = reserve_space(compressed_class_space_size(),
- _reserve_alignment, requested_addr,
- false /* use_requested_addr */);
- }
-
- if (!metaspace_rs.is_reserved()) {
- assert(cds_base == NULL, "CDS should have already reserved the memory space");
- // If no successful allocation then try to allocate the space anywhere. If
- // that fails then OOM doom. At this point we cannot try allocating the
- // metaspace as if UseCompressedClassPointers is off because too much
- // initialization has happened that depends on UseCompressedClassPointers.
- // So, UseCompressedClassPointers cannot be turned off at this point.
- metaspace_rs = reserve_space(compressed_class_space_size(),
- _reserve_alignment, NULL, false);
- if (!metaspace_rs.is_reserved()) {
- vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
- compressed_class_space_size()));
- }
- }
-
- if (cds_base == NULL) {
- // If we got here then the metaspace got allocated.
- MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
- }
-
- set_narrow_klass_base_and_shift(metaspace_rs, cds_base);
-
- initialize_class_space(metaspace_rs);
- LogTarget(Trace, gc, metaspace) lt;
- if (lt.is_enabled()) {
- ResourceMark rm;
- LogStream ls(lt);
- print_compressed_class_space(&ls, requested_addr);
- }
-}
-
-void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
- st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
- p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
+void Metaspace::print_compressed_class_space(outputStream* st) {
if (_class_space_list != NULL) {
address base = (address)_class_space_list->current_virtual_space()->bottom();
- st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
- compressed_class_space_size(), p2i(base));
- if (requested_addr != 0) {
- st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
- }
+ address top = base + compressed_class_space_size();
+ st->print("Compressed class space mapped at: " PTR_FORMAT "-" PTR_FORMAT ", size: " SIZE_FORMAT,
+ p2i(base), p2i(top), top - base);
st->cr();
}
}
-// For UseCompressedClassPointers the class space is reserved above the top of
-// the Java heap. The argument passed in is at the base of the compressed space.
+// Given a prereserved space, use that to set up the compressed class space list.
void Metaspace::initialize_class_space(ReservedSpace rs) {
- // The reserved space size may be bigger because of alignment, esp with UseLargePages
- assert(rs.size() >= CompressedClassSpaceSize,
- SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
assert(using_class_space(), "Must be using class space");
+ assert(_class_space_list == NULL && _chunk_manager_class == NULL, "Only call once");
+
+ assert(rs.size() == CompressedClassSpaceSize, SIZE_FORMAT " != " SIZE_FORMAT,
+ rs.size(), CompressedClassSpaceSize);
+ assert(is_aligned(rs.base(), Metaspace::reserve_alignment()) &&
+ is_aligned(rs.size(), Metaspace::reserve_alignment()),
+ "wrong alignment");
+
_class_space_list = new VirtualSpaceList(rs);
_chunk_manager_class = new ChunkManager(true/*is_class*/);
+ // This does currently not work because rs may be the result of a split
+ // operation and NMT seems not to be able to handle splits.
+ // Will be fixed with JDK-8243535.
+ // MemTracker::record_virtual_memory_type((address)rs.base(), mtClass);
+
if (!_class_space_list->initialization_succeeded()) {
vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
}
-}
-#endif // _LP64
+}
-#ifdef PREFERRED_METASPACE_ALIGNMENT
-ReservedSpace Metaspace::reserve_preferred_space(size_t size, size_t alignment,
- bool large_pages, char *requested_addr,
- bool use_requested_addr) {
- // Our compressed klass pointers may fit nicely into the lower 32 bits.
- if (requested_addr != NULL && (uint64_t)requested_addr + size < 4*G) {
- ReservedSpace rs(size, alignment, large_pages, requested_addr);
- if (rs.is_reserved() || use_requested_addr) {
- return rs;
- }
- }
+// Reserve a range of memory at an address suitable for en/decoding narrow
+// Klass pointers (see: CompressedClassPointers::is_valid_base()).
+// The returned address shall both be suitable as a compressed class pointers
+// base, and aligned to Metaspace::reserve_alignment (which is equal to or a
+// multiple of allocation granularity).
+// On error, returns an unreserved space.
+ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t size) {
- struct SearchParams { uintptr_t limit; size_t increment; };
+#ifdef AARCH64
+ const size_t alignment = Metaspace::reserve_alignment();
// AArch64: Try to align metaspace so that we can decode a compressed
// klass with a single MOVK instruction. We can do this iff the
// compressed class base is a multiple of 4G.
- // Aix: Search for a place where we can find memory. If we need to load
- // the base, 4G alignment is helpful, too.
-
- // Go faster above 32G as it is no longer possible to use a zero base.
- // AArch64: Additionally, ensure the lower LogKlassAlignmentInBytes
- // bits of the upper 32-bits of the address are zero so we can handle
- // a shift when decoding.
-
- static const SearchParams search_params[] = {
- // Limit Increment
- { 32*G, AARCH64_ONLY(4*)G, },
- { 1024*G, (4 AARCH64_ONLY(<< LogKlassAlignmentInBytes))*G },
+ // Additionally, above 32G, ensure the lower LogKlassAlignmentInBytes bits
+ // of the upper 32-bits of the address are zero so we can handle a shift
+ // when decoding.
+
+ static const struct {
+ address from;
+ address to;
+ size_t increment;
+ } search_ranges[] = {
+ { (address)(4*G), (address)(32*G), 4*G, },
+ { (address)(32*G), (address)(1024*G), (4 << LogKlassAlignmentInBytes) * G },
+ { NULL, NULL, 0 }
};
- // Null requested_addr means allocate anywhere so ensure the search
- // begins from a non-null address.
- char *a = MAX2(requested_addr, (char *)search_params[0].increment);
-
- for (const SearchParams *p = search_params;
- p < search_params + ARRAY_SIZE(search_params);
- ++p) {
- a = align_up(a, p->increment);
- if (use_requested_addr && a != requested_addr)
- return ReservedSpace();
-
- for (; a < (char *)p->limit; a += p->increment) {
- ReservedSpace rs(size, alignment, large_pages, a);
- if (rs.is_reserved() || use_requested_addr) {
+ for (int i = 0; search_ranges[i].from != NULL; i ++) {
+ address a = search_ranges[i].from;
+ assert(CompressedKlassPointers::is_valid_base(a), "Sanity");
+ while (a < search_ranges[i].to) {
+ ReservedSpace rs(size, Metaspace::reserve_alignment(),
+ false /*large_pages*/, (char*)a);
+ if (rs.is_reserved()) {
+ assert(a == (address)rs.base(), "Sanity");
return rs;
}
+ a += search_ranges[i].increment;
}
}
+ // Note: on AARCH64, if the code above does not find any good placement, we
+ // have no recourse. We return an empty space and the VM will exit.
return ReservedSpace();
-}
-#endif // PREFERRED_METASPACE_ALIGNMENT
-
-// Try to reserve a region for the metaspace at the requested address. Some
-// platforms have particular alignment requirements to allow efficient decode of
-// compressed class pointers in which case requested_addr is treated as hint for
-// where to start looking unless use_requested_addr is true.
-ReservedSpace Metaspace::reserve_space(size_t size, size_t alignment,
- char* requested_addr, bool use_requested_addr) {
- bool large_pages = false; // Don't use large pages for the class space.
- assert(is_aligned(requested_addr, alignment), "must be");
- assert(requested_addr != NULL || !use_requested_addr,
- "cannot set use_requested_addr with NULL address");
-
-#ifdef PREFERRED_METASPACE_ALIGNMENT
- return reserve_preferred_space(size, alignment, large_pages,
- requested_addr, use_requested_addr);
#else
- return ReservedSpace(size, alignment, large_pages, requested_addr);
-#endif
+ // Default implementation: Just reserve anywhere.
+ return ReservedSpace(size, Metaspace::reserve_alignment(), false, (char*)NULL);
+#endif // AARCH64
}
+#endif // _LP64
+
+
void Metaspace::ergo_initialize() {
if (DumpSharedSpaces) {
// Using large pages when dumping the shared archive is currently not implemented.
@@ -1229,16 +1123,23 @@ void Metaspace::ergo_initialize() {
void Metaspace::global_initialize() {
MetaspaceGC::initialize();
- bool class_space_inited = false;
+ // If UseCompressedClassPointers=1, we have two cases:
+ // a) if CDS is active (either dump time or runtime), it will create the ccs
+ // for us, initialize it and set up CompressedKlassPointers encoding.
+ // Class space will be reserved above the mapped archives.
+ // b) if CDS is not active, we will create the ccs on our own. It will be
+ // placed above the java heap, since we assume it has been placed in low
+ // address regions. We may rethink this (see JDK-8244943). Failing that,
+ // it will be placed anywhere.
+
#if INCLUDE_CDS
+ // case (a)
if (DumpSharedSpaces) {
MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
- class_space_inited = true;
} else if (UseSharedSpaces) {
// If any of the archived space fails to map, UseSharedSpaces
// is reset to false.
MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
- class_space_inited = UseSharedSpaces;
}
if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
@@ -1247,16 +1148,49 @@ void Metaspace::global_initialize() {
#endif // INCLUDE_CDS
#ifdef _LP64
- if (using_class_space() && !class_space_inited) {
- char* base;
- if (UseCompressedOops) {
- base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
- } else {
- base = (char*)HeapBaseMinAddress;
+
+ if (using_class_space() && !class_space_is_initialized()) {
+ assert(!UseSharedSpaces && !DumpSharedSpaces, "CDS should be off at this point");
+
+ // case (b)
+ ReservedSpace rs;
+
+ // If UseCompressedOops=1, java heap may have been placed in coops-friendly
+ // territory already (lower address regions), so we attempt to place ccs
+ // right above the java heap.
+ // If UseCompressedOops=0, the heap has been placed anywhere - probably in
+ // high memory regions. In that case, try to place ccs at the lowest allowed
+ // mapping address.
+ address base = UseCompressedOops ? CompressedOops::end() : (address)HeapBaseMinAddress;
+ base = align_up(base, Metaspace::reserve_alignment());
+
+ const size_t size = align_up(CompressedClassSpaceSize, Metaspace::reserve_alignment());
+ if (base != NULL) {
+ if (CompressedKlassPointers::is_valid_base(base)) {
+ rs = ReservedSpace(size, Metaspace::reserve_alignment(),
+ false /* large */, (char*)base);
+ }
}
- ReservedSpace dummy;
- allocate_metaspace_compressed_klass_ptrs(dummy, base, 0);
+
+ // ...failing that, reserve anywhere, but let platform do optimized placement:
+ if (!rs.is_reserved()) {
+ rs = Metaspace::reserve_address_space_for_compressed_classes(size);
+ }
+
+ // ...failing that, give up.
+ if (!rs.is_reserved()) {
+ vm_exit_during_initialization(
+ err_msg("Could not allocate compressed class space: " SIZE_FORMAT " bytes",
+ compressed_class_space_size()));
+ }
+
+ // Initialize space
+ Metaspace::initialize_class_space(rs);
+
+ // Set up compressed class pointer encoding.
+ CompressedKlassPointers::initialize((address)rs.base(), rs.size());
}
+
#endif
// Initialize these before initializing the VirtualSpaceList
@@ -1285,6 +1219,20 @@ void Metaspace::global_initialize() {
_initialized = true;
+#ifdef _LP64
+ if (UseCompressedClassPointers) {
+ // Note: "cds" would be a better fit but keep this for backward compatibility.
+ LogTarget(Info, gc, metaspace) lt;
+ if (lt.is_enabled()) {
+ ResourceMark rm;
+ LogStream ls(lt);
+ CDS_ONLY(MetaspaceShared::print_on(&ls);)
+ Metaspace::print_compressed_class_space(&ls);
+ CompressedKlassPointers::print_mode(&ls);
+ }
+ }
+#endif
+
}
void Metaspace::post_initialize() {
diff --git a/src/hotspot/share/memory/metaspace.hpp b/src/hotspot/share/memory/metaspace.hpp
index 5861c562acb..32552cc4c44 100644
--- a/src/hotspot/share/memory/metaspace.hpp
+++ b/src/hotspot/share/memory/metaspace.hpp
@@ -171,25 +171,25 @@ class Metaspace : public AllStatic {
static void assert_not_frozen() {
assert(!_frozen, "sanity");
}
-#ifdef _LP64
- static void allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base);
-#endif
private:
#ifdef _LP64
- static void set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, address cds_base);
+ // Reserve a range of memory at an address suitable for en/decoding narrow
+ // Klass pointers (see: CompressedClassPointers::is_valid_base()).
+ // The returned address shall both be suitable as a compressed class pointers
+ // base, and aligned to Metaspace::reserve_alignment (which is equal to or a
+ // multiple of allocation granularity).
+ // On error, returns an unreserved space.
+ static ReservedSpace reserve_address_space_for_compressed_classes(size_t size);
+
+ // Given a prereserved space, use that to set up the compressed class space list.
static void initialize_class_space(ReservedSpace rs);
-#endif
- static ReservedSpace reserve_space(size_t size, size_t alignment,
- char* requested_addr, bool use_requested_addr);
+ // Returns true if class space has been setup (initialize_class_space).
+ static bool class_space_is_initialized() { return _class_space_list != NULL; }
-#ifdef PREFERRED_METASPACE_ALIGNMENT
- static ReservedSpace reserve_preferred_space(size_t size, size_t alignment,
- bool large_pages, char *requested_addr,
- bool use_requested_addr);
#endif
public:
@@ -223,7 +223,7 @@ class Metaspace : public AllStatic {
static const char* metadata_type_name(Metaspace::MetadataType mdtype);
- static void print_compressed_class_space(outputStream* st, const char* requested_addr = 0) NOT_LP64({});
+ static void print_compressed_class_space(outputStream* st) NOT_LP64({});
// Return TRUE only if UseCompressedClassPointers is True.
static bool using_class_space() {
diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp b/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp
index c8fbdaa431d..b80a4f5339a 100644
--- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp
+++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.hpp
@@ -84,7 +84,7 @@ class VirtualSpaceNode : public CHeapObj {
_next(NULL), _is_class(is_class), _rs(rs), _top(NULL), _container_count(0), _occupancy_map(NULL) {}
~VirtualSpaceNode();
- // Convenience functions for logical bottom and end
+ // Convenience functions for logical bottom and (committed) end
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
diff --git a/src/hotspot/share/memory/metaspaceShared.cpp b/src/hotspot/share/memory/metaspaceShared.cpp
index 5bc8df59570..816698f3aa2 100644
--- a/src/hotspot/share/memory/metaspaceShared.cpp
+++ b/src/hotspot/share/memory/metaspaceShared.cpp
@@ -69,6 +69,7 @@
#include "runtime/vmOperations.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.inline.hpp"
+#include "utilities/ostream.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/hashtable.inline.hpp"
#if INCLUDE_G1GC
@@ -191,7 +192,7 @@ void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
void DumpRegion::pack(DumpRegion* next) {
assert(!is_packed(), "sanity");
- _end = (char*)align_up(_top, Metaspace::reserve_alignment());
+ _end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
_is_packed = true;
if (next != NULL) {
next->_rs = _rs;
@@ -237,52 +238,53 @@ char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) {
return _ro_region.allocate(num_bytes);
}
-// When reserving an address range using ReservedSpace, we need an alignment that satisfies both:
-// os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions,
-// while keeping the first range at offset 0 of this range.
-// Metaspace::reserve_alignment() -- so we can pass the region to
-// Metaspace::allocate_metaspace_compressed_klass_ptrs.
-size_t MetaspaceShared::reserved_space_alignment() {
- size_t os_align = os::vm_allocation_granularity();
- size_t ms_align = Metaspace::reserve_alignment();
- if (os_align >= ms_align) {
- assert(os_align % ms_align == 0, "must be a multiple");
- return os_align;
- } else {
- assert(ms_align % os_align == 0, "must be a multiple");
- return ms_align;
- }
-}
+size_t MetaspaceShared::reserved_space_alignment() { return os::vm_allocation_granularity(); }
-ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) {
- return Metaspace::reserve_space(size, reserved_space_alignment(),
- requested_address, requested_address != NULL);
+#ifdef _LP64
+// Check SharedBaseAddress for validity. At this point, os::init() must
+// have been ran.
+static void check_SharedBaseAddress() {
+ SharedBaseAddress = align_up(SharedBaseAddress,
+ MetaspaceShared::reserved_space_alignment());
+ if (!CompressedKlassPointers::is_valid_base((address)SharedBaseAddress)) {
+ log_warning(cds)("SharedBaseAddress=" PTR_FORMAT " is invalid for this "
+ "platform, option will be ignored.",
+ p2i((address)SharedBaseAddress));
+ SharedBaseAddress = Arguments::default_SharedBaseAddress();
+ }
}
+#endif
void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
assert(DumpSharedSpaces, "should be called for dump time only");
- const size_t reserve_alignment = reserved_space_alignment();
+
+#ifdef _LP64
+ check_SharedBaseAddress();
+#endif
+
+ const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment();
char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
#ifdef _LP64
- // On 64-bit VM, the heap and class space layout will be the same as if
- // you're running in -Xshare:on mode:
- //
- // +-- SharedBaseAddress (default = 0x800000000)
- // v
- // +-..---------+---------+ ... +----+----+----+--------------------+
- // | Heap | Archive | | MC | RW | RO | class space |
- // +-..---------+---------+ ... +----+----+----+--------------------+
- // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->|
- //
+ assert(CompressedKlassPointers::is_valid_base((address)shared_base), "Sanity");
+ // On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1,
+ // will use that to house both the archives and the ccs. See below for
+ // details.
const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
#else
- // We don't support archives larger than 256MB on 32-bit due to limited virtual address space.
+ // We don't support archives larger than 256MB on 32-bit due to limited
+ // virtual address space.
size_t cds_total = align_down(256*M, reserve_alignment);
#endif
+ // Whether to use SharedBaseAddress as attach address.
bool use_requested_base = true;
+
+ if (shared_base == NULL) {
+ use_requested_base = false;
+ }
+
if (ArchiveRelocationMode == 1) {
log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
use_requested_base = false;
@@ -291,47 +293,104 @@ void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
// First try to reserve the space at the specified SharedBaseAddress.
assert(!_shared_rs.is_reserved(), "must be");
if (use_requested_base) {
- _shared_rs = reserve_shared_space(cds_total, shared_base);
+ _shared_rs = ReservedSpace(cds_total, reserve_alignment,
+ false /* large */, (char*)shared_base);
+ if (_shared_rs.is_reserved()) {
+ assert(_shared_rs.base() == shared_base, "should match");
+ } else {
+ log_info(cds)("dumptime space reservation: failed to map at "
+ "SharedBaseAddress " PTR_FORMAT, p2i(shared_base));
+ }
}
- if (_shared_rs.is_reserved()) {
- assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
- } else {
- // Get a mmap region anywhere if the SharedBaseAddress fails.
- _shared_rs = reserve_shared_space(cds_total);
+ if (!_shared_rs.is_reserved()) {
+ // Get a reserved space anywhere if attaching at the SharedBaseAddress
+ // fails:
+ if (UseCompressedClassPointers) {
+ // If we need to reserve class space as well, let the platform handle
+ // the reservation.
+ LP64_ONLY(_shared_rs =
+ Metaspace::reserve_address_space_for_compressed_classes(cds_total);)
+ NOT_LP64(ShouldNotReachHere();)
+ } else {
+ // anywhere is fine.
+ _shared_rs = ReservedSpace(cds_total, reserve_alignment,
+ false /* large */, (char*)NULL);
+ }
}
+
if (!_shared_rs.is_reserved()) {
vm_exit_during_initialization("Unable to reserve memory for shared space",
err_msg(SIZE_FORMAT " bytes.", cds_total));
}
#ifdef _LP64
- // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up:
- // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes()
- // will store Klasses into this space.
- // + The lower 3 GB is used for the archive -- when preload_classes() is done,
- // ArchiveCompactor will copy the class metadata into this space, first the RW parts,
- // then the RO parts.
-
- size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment);
- ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size);
- CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment);
- _shared_rs = _shared_rs.first_part(max_archive_size);
if (UseCompressedClassPointers) {
- // Set up compress class pointers.
- CompressedKlassPointers::set_base((address)_shared_rs.base());
- // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
- // with AOT.
- CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
- // Set the range of klass addresses to 4GB.
- CompressedKlassPointers::set_range(cds_total);
+
+ assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity");
+
+ // On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space
+ // must be allocated near the cds such as that the compressed Klass pointer
+ // encoding can be used to en/decode pointers from both cds and ccs. Since
+ // Metaspace cannot do this (it knows nothing about cds), we do it for
+ // Metaspace here and pass it the space to use for ccs.
+ //
+ // We do this by reserving space for the ccs behind the archives. Note
+ // however that ccs follows a different alignment
+ // (Metaspace::reserve_alignment), so there may be a gap between ccs and
+ // cds.
+ // We use a similar layout at runtime, see reserve_address_space_for_archives().
+ //
+ // +-- SharedBaseAddress (default = 0x800000000)
+ // v
+ // +-..---------+---------+ ... +----+----+----+--------+-----------------+
+ // | Heap | Archive | | MC | RW | RO | [gap] | class space |
+ // +-..---------+---------+ ... +----+----+----+--------+-----------------+
+ // |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->|
+ //
+ // Note: ccs must follow the archives, and the archives must start at the
+ // encoding base. However, the exact placement of ccs does not matter as
+ // long as it it resides in the encoding range of CompressedKlassPointers
+ // and comes after the archive.
+ //
+ // We do this by splitting up the allocated 4G into 3G of archive space,
+ // followed by 1G for the ccs:
+ // + The upper 1 GB is used as the "temporary compressed class space"
+ // -- preload_classes() will store Klasses into this space.
+ // + The lower 3 GB is used for the archive -- when preload_classes()
+ // is done, ArchiveCompactor will copy the class metadata into this
+ // space, first the RW parts, then the RO parts.
+
+ // Starting address of ccs must be aligned to Metaspace::reserve_alignment()...
+ size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment());
+ address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment());
+ size_t archive_size = class_space_start - (address)_shared_rs.base();
+
+ ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size);
+ _shared_rs = _shared_rs.first_part(archive_size);
+
+ // ... as does the size of ccs.
+ tmp_class_space = tmp_class_space.first_part(class_space_size);
+ CompressedClassSpaceSize = class_space_size;
+
+ // Let Metaspace initialize ccs
Metaspace::initialize_class_space(tmp_class_space);
+
+ // and set up CompressedKlassPointers encoding.
+ CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total);
+
+ log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
+ p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
+
+ log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
+ CompressedClassSpaceSize, p2i(tmp_class_space.base()));
+
+ assert(_shared_rs.end() == tmp_class_space.base() &&
+ is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) &&
+ is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) &&
+ is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity");
}
- log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
- p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
- log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
- CompressedClassSpaceSize, p2i(tmp_class_space.base()));
#endif
init_shared_dump_space(&_mc_region);
@@ -2073,6 +2132,7 @@ bool MetaspaceShared::is_shared_dynamic(void* p) {
void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
+
FileMapInfo* static_mapinfo = open_static_archive();
FileMapInfo* dynamic_mapinfo = NULL;
@@ -2149,7 +2209,8 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
bool use_requested_addr) {
PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
// For product build only -- this is for benchmarking the cost of doing relocation.
- // For debug builds, the check is done in FileMapInfo::map_regions for better test coverage.
+ // For debug builds, the check is done below, after reserving the space, for better test coverage
+ // (see comment below).
log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
return MAP_ARCHIVE_MMAP_FAILURE;
});
@@ -2165,26 +2226,71 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
}
- ReservedSpace main_rs, archive_space_rs, class_space_rs;
+ ReservedSpace archive_space_rs, class_space_rs;
MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo,
- use_requested_addr, main_rs, archive_space_rs,
+ use_requested_addr, archive_space_rs,
class_space_rs);
if (mapped_base_address == NULL) {
result = MAP_ARCHIVE_MMAP_FAILURE;
} else {
+
+#ifdef ASSERT
+ // Some sanity checks after reserving address spaces for archives
+ // and class space.
+ assert(archive_space_rs.is_reserved(), "Sanity");
+ if (Metaspace::using_class_space()) {
+ // Class space must closely follow the archive space. Both spaces
+ // must be aligned correctly.
+ assert(class_space_rs.is_reserved(),
+ "A class space should have been reserved");
+ assert(class_space_rs.base() >= archive_space_rs.end(),
+ "class space should follow the cds archive space");
+ assert(is_aligned(archive_space_rs.base(),
+ MetaspaceShared::reserved_space_alignment()),
+ "Archive space misaligned");
+ assert(is_aligned(class_space_rs.base(),
+ Metaspace::reserve_alignment()),
+ "class space misaligned");
+ }
+#endif // ASSERT
+
log_debug(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
+
+ if (MetaspaceShared::use_windows_memory_mapping()) {
+ // We have now reserved address space for the archives, and will map in
+ // the archive files into this space.
+ //
+ // Special handling for Windows: on Windows we cannot map a file view
+ // into an existing memory mapping. So, we unmap the address range we
+ // just reserved again, which will make it available for mapping the
+ // archives.
+ // Reserving this range has not been for naught however since it makes
+ // us reasonably sure the address range is available.
+ //
+ // But still it may fail, since between unmapping the range and mapping
+ // in the archive someone else may grab the address space. Therefore
+ // there is a fallback in FileMap::map_region() where we just read in
+ // the archive files sequentially instead of mapping it in. We couple
+ // this with use_requested_addr, since we're going to patch all the
+ // pointers anyway so there's no benefit to mmap.
+ if (use_requested_addr) {
+ log_info(cds)("Windows mmap workaround: releasing archive space.");
+ archive_space_rs.release();
+ }
+ }
MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
DEBUG_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
- // This is for simulating mmap failures at the requested address. In debug builds, we do it
- // here (after all archives have possibly been mapped), so we can thoroughly test the code for
- // failure handling (releasing all allocated resource, etc).
+ // This is for simulating mmap failures at the requested address. In
+ // debug builds, we do it here (after all archives have possibly been
+ // mapped), so we can thoroughly test the code for failure handling
+ // (releasing all allocated resource, etc).
log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
if (static_result == MAP_ARCHIVE_SUCCESS) {
static_result = MAP_ARCHIVE_MMAP_FAILURE;
@@ -2217,121 +2323,203 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
}
if (result == MAP_ARCHIVE_SUCCESS) {
- if (!main_rs.is_reserved() && class_space_rs.is_reserved()) {
- MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass);
- }
SharedBaseAddress = (size_t)mapped_base_address;
LP64_ONLY({
if (Metaspace::using_class_space()) {
- assert(class_space_rs.is_reserved(), "must be");
- char* cds_base = static_mapinfo->mapped_base();
- Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base);
+ // Set up ccs in metaspace.
+ Metaspace::initialize_class_space(class_space_rs);
+
+ // Set up compressed Klass pointer encoding: the encoding range must
+ // cover both archive and class space.
+ address cds_base = (address)static_mapinfo->mapped_base();
+ address ccs_end = (address)class_space_rs.end();
+ CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base);
+
// map_heap_regions() compares the current narrow oop and klass encodings
// with the archived ones, so it must be done after all encodings are determined.
static_mapinfo->map_heap_regions();
- CompressedKlassPointers::set_range(CompressedClassSpaceSize);
}
});
} else {
unmap_archive(static_mapinfo);
unmap_archive(dynamic_mapinfo);
- release_reserved_spaces(main_rs, archive_space_rs, class_space_rs);
+ release_reserved_spaces(archive_space_rs, class_space_rs);
}
return result;
}
+
+// This will reserve two address spaces suitable to house Klass structures, one
+// for the cds archives (static archive and optionally dynamic archive) and
+// optionally one move for ccs.
+//
+// Since both spaces must fall within the compressed class pointer encoding
+// range, they are allocated close to each other.
+//
+// Space for archives will be reserved first, followed by a potential gap,
+// followed by the space for ccs:
+//
+// +-- Base address A B End
+// | | | |
+// v v v v
+// +-------------+--------------+ +----------------------+
+// | static arc | [dyn. arch] | [gap] | compr. class space |
+// +-------------+--------------+ +----------------------+
+//
+// (The gap may result from different alignment requirements between metaspace
+// and CDS)
+//
+// If UseCompressedClassPointers is disabled, only one address space will be
+// reserved:
+//
+// +-- Base address End
+// | |
+// v v
+// +-------------+--------------+
+// | static arc | [dyn. arch] |
+// +-------------+--------------+
+//
+// Base address: If use_archive_base_addr address is true, the Base address is
+// determined by the address stored in the static archive. If
+// use_archive_base_addr address is false, this base address is determined
+// by the platform.
+//
+// If UseCompressedClassPointers=1, the range encompassing both spaces will be
+// suitable to en/decode narrow Klass pointers: the base will be valid for
+// encoding, the range [Base, End) not surpass KlassEncodingMetaspaceMax.
+//
+// Return:
+//
+// - On success:
+// - archive_space_rs will be reserved and large enough to host static and
+// if needed dynamic archive: [Base, A).
+// archive_space_rs.base and size will be aligned to CDS reserve
+// granularity.
+// - class_space_rs: If UseCompressedClassPointers=1, class_space_rs will
+// be reserved. Its start address will be aligned to metaspace reserve
+// alignment, which may differ from CDS alignment. It will follow the cds
+// archive space, close enough such that narrow class pointer encoding
+// covers both spaces.
+// If UseCompressedClassPointers=0, class_space_rs remains unreserved.
+// - On error: NULL is returned and the spaces remain unreserved.
char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
FileMapInfo* dynamic_mapinfo,
- bool use_requested_addr,
- ReservedSpace& main_rs,
+ bool use_archive_base_addr,
ReservedSpace& archive_space_rs,
ReservedSpace& class_space_rs) {
- const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space());
- const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size());
- if (use_klass_space) {
- assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated");
- }
- if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) {
- return NULL;
- }
+ address const base_address = (address) (use_archive_base_addr ? static_mapinfo->requested_base_address() : NULL);
+ const size_t archive_space_alignment = MetaspaceShared::reserved_space_alignment();
// Size and requested location of the archive_space_rs (for both static and dynamic archives)
- size_t base_offset = static_mapinfo->mapping_base_offset();
- size_t end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
- assert(base_offset == 0, "must be");
- assert(is_aligned(end_offset, os::vm_allocation_granularity()), "must be");
- assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be");
-
- // In case reserved_space_alignment() != os::vm_allocation_granularity()
- assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be");
- end_offset = align_up(end_offset, reserved_space_alignment());
-
- size_t archive_space_size = end_offset - base_offset;
-
- // Special handling for Windows because it cannot mmap into a reserved space:
- // use_requested_addr: We just map each region individually, and give up if any one of them fails.
- // !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap).
- // We're going to patch all the pointers anyway so there's no benefit for mmap.
-
- if (use_requested_addr) {
- char* archive_space_base = static_mapinfo->requested_base_address() + base_offset;
- char* archive_space_end = archive_space_base + archive_space_size;
- if (!MetaspaceShared::use_windows_memory_mapping()) {
- archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base);
- if (!archive_space_rs.is_reserved()) {
- return NULL;
- }
- }
- if (use_klass_space) {
- // Make sure we can map the klass space immediately following the archive_space space
- // Don't call reserve_shared_space here as that may try to enforce platform-specific
- // alignment rules which only apply to the archive base address
- char* class_space_base = archive_space_end;
- class_space_rs = ReservedSpace(class_space_size, reserved_space_alignment(),
- false /* large_pages */, class_space_base);
- if (!class_space_rs.is_reserved()) {
- return NULL;
- }
- }
- return static_mapinfo->requested_base_address();
- } else {
- if (use_klass_space) {
- main_rs = reserve_shared_space(archive_space_size + class_space_size);
- if (main_rs.is_reserved()) {
- archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true);
- class_space_rs = main_rs.last_part(archive_space_size);
- }
- } else {
- main_rs = reserve_shared_space(archive_space_size);
- archive_space_rs = main_rs;
+ assert(static_mapinfo->mapping_base_offset() == 0, "Must be");
+ size_t archive_end_offset = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
+ size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
+
+ // If a base address is given, it must have valid alignment and be suitable as encoding base.
+ if (base_address != NULL) {
+ assert(is_aligned(base_address, archive_space_alignment),
+ "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
+ if (Metaspace::using_class_space()) {
+ assert(CompressedKlassPointers::is_valid_base(base_address),
+ "Archive base address invalid: " PTR_FORMAT ".", p2i(base_address));
}
+ }
+
+ if (!Metaspace::using_class_space()) {
+ // Get the simple case out of the way first:
+ // no compressed class space, simple allocation.
+ archive_space_rs = ReservedSpace(archive_space_size, archive_space_alignment,
+ false /* bool large */, (char*)base_address);
if (archive_space_rs.is_reserved()) {
+ assert(base_address == NULL ||
+ (address)archive_space_rs.base() == base_address, "Sanity");
return archive_space_rs.base();
- } else {
- return NULL;
}
+ return NULL;
+ }
+
+#ifdef _LP64
+
+ // Complex case: two spaces adjacent to each other, both to be addressable
+ // with narrow class pointers.
+ // We reserve the whole range spanning both spaces, then split that range up.
+
+ const size_t class_space_alignment = Metaspace::reserve_alignment();
+
+ // To simplify matters, lets assume that metaspace alignment will always be
+ // equal or a multiple of archive alignment.
+ assert(is_power_of_2(class_space_alignment) &&
+ is_power_of_2(archive_space_alignment) &&
+ class_space_alignment >= archive_space_alignment,
+ "Sanity");
+
+ const size_t class_space_size = CompressedClassSpaceSize;
+ assert(CompressedClassSpaceSize > 0 &&
+ is_aligned(CompressedClassSpaceSize, class_space_alignment),
+ "CompressedClassSpaceSize malformed: "
+ SIZE_FORMAT, CompressedClassSpaceSize);
+
+ const size_t ccs_begin_offset = align_up(archive_space_size,
+ class_space_alignment);
+ const size_t gap_size = ccs_begin_offset - archive_space_size;
+
+ const size_t total_range_size =
+ align_up(archive_space_size + gap_size + class_space_size,
+ os::vm_allocation_granularity());
+
+ ReservedSpace total_rs;
+ if (base_address != NULL) {
+ // Reserve at the given archive base address, or not at all.
+ total_rs = ReservedSpace(total_range_size, archive_space_alignment,
+ false /* bool large */, (char*) base_address);
+ } else {
+ // Reserve at any address, but leave it up to the platform to choose a good one.
+ total_rs = Metaspace::reserve_address_space_for_compressed_classes(total_range_size);
}
+
+ if (!total_rs.is_reserved()) {
+ return NULL;
+ }
+
+ // Paranoid checks:
+ assert(base_address == NULL || (address)total_rs.base() == base_address,
+ "Sanity (" PTR_FORMAT " vs " PTR_FORMAT ")", p2i(base_address), p2i(total_rs.base()));
+ assert(is_aligned(total_rs.base(), archive_space_alignment), "Sanity");
+ assert(total_rs.size() == total_range_size, "Sanity");
+ assert(CompressedKlassPointers::is_valid_base((address)total_rs.base()), "Sanity");
+
+ // Now split up the space into ccs and cds archive. For simplicity, just leave
+ // the gap reserved at the end of the archive space.
+ archive_space_rs = total_rs.first_part(ccs_begin_offset,
+ (size_t)os::vm_allocation_granularity(),
+ /*split=*/true);
+ class_space_rs = total_rs.last_part(ccs_begin_offset);
+
+ assert(is_aligned(archive_space_rs.base(), archive_space_alignment), "Sanity");
+ assert(is_aligned(archive_space_rs.size(), archive_space_alignment), "Sanity");
+ assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity");
+ assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity");
+
+ return archive_space_rs.base();
+
+#else
+ ShouldNotReachHere();
+ return NULL;
+#endif
+
}
-void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs,
- ReservedSpace& archive_space_rs,
+void MetaspaceShared::release_reserved_spaces(ReservedSpace& archive_space_rs,
ReservedSpace& class_space_rs) {
- if (main_rs.is_reserved()) {
- assert(main_rs.contains(archive_space_rs.base()), "must be");
- assert(main_rs.contains(class_space_rs.base()), "must be");
- log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base()));
- main_rs.release();
- } else {
- if (archive_space_rs.is_reserved()) {
- log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
- archive_space_rs.release();
- }
- if (class_space_rs.is_reserved()) {
- log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
- class_space_rs.release();
- }
+ if (archive_space_rs.is_reserved()) {
+ log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
+ archive_space_rs.release();
+ }
+ if (class_space_rs.is_reserved()) {
+ log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
+ class_space_rs.release();
}
}
@@ -2476,3 +2664,31 @@ intx MetaspaceShared::final_delta() {
return intx(Arguments::default_SharedBaseAddress()) // We want the archive to be mapped to here at runtime
- intx(SharedBaseAddress); // .. but the archive is mapped at here at dump time
}
+
+void MetaspaceShared::print_on(outputStream* st) {
+ if (UseSharedSpaces || DumpSharedSpaces) {
+ st->print("CDS archive(s) mapped at: ");
+ address base;
+ address top;
+ if (UseSharedSpaces) { // Runtime
+ base = (address)MetaspaceObj::shared_metaspace_base();
+ address static_top = (address)_shared_metaspace_static_top;
+ top = (address)MetaspaceObj::shared_metaspace_top();
+ st->print("[" PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(static_top), p2i(top));
+ } else if (DumpSharedSpaces) { // Dump Time
+ base = (address)_shared_rs.base();
+ top = (address)_shared_rs.end();
+ st->print("[" PTR_FORMAT "-" PTR_FORMAT "), ", p2i(base), p2i(top));
+ }
+ st->print("size " SIZE_FORMAT ", ", top - base);
+ st->print("SharedBaseAddress: " PTR_FORMAT ", ArchiveRelocationMode: %d.", SharedBaseAddress, (int)ArchiveRelocationMode);
+ } else {
+ st->print("CDS disabled.");
+ }
+ st->cr();
+}
+
+
+
+
+
diff --git a/src/hotspot/share/memory/metaspaceShared.hpp b/src/hotspot/share/memory/metaspaceShared.hpp
index 4a4ee709f89..ad3ba657140 100644
--- a/src/hotspot/share/memory/metaspaceShared.hpp
+++ b/src/hotspot/share/memory/metaspaceShared.hpp
@@ -36,6 +36,7 @@
#define MAX_SHARED_DELTA (0x7FFFFFFF)
+class outputStream;
class FileMapInfo;
class CHeapBitMap;
struct ArchiveHeapOopmapInfo;
@@ -166,6 +167,8 @@ class ReadClosure : public SerializeClosure {
class MetaspaceShared : AllStatic {
// CDS support
+
+ // Note: _shared_rs and _symbol_rs are only used at dump time.
static ReservedSpace _shared_rs;
static VirtualSpace _shared_vs;
static ReservedSpace _symbol_rs;
@@ -227,6 +230,8 @@ class MetaspaceShared : AllStatic {
static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN;
static void post_initialize(TRAPS) NOT_CDS_RETURN;
+ static void print_on(outputStream* st);
+
// Delta of this object from SharedBaseAddress
static uintx object_delta_uintx(void* obj);
@@ -296,7 +301,6 @@ class MetaspaceShared : AllStatic {
static void link_and_cleanup_shared_classes(TRAPS) NOT_CDS_RETURN;
#if INCLUDE_CDS
- static ReservedSpace reserve_shared_space(size_t size, char* requested_address = NULL);
static size_t reserved_space_alignment();
static void init_shared_dump_space(DumpRegion* first_space);
static DumpRegion* misc_code_dump_space();
@@ -369,16 +373,15 @@ class MetaspaceShared : AllStatic {
static void read_extra_data(const char* filename, TRAPS) NOT_CDS_RETURN;
static FileMapInfo* open_static_archive();
static FileMapInfo* open_dynamic_archive();
+ // use_requested_addr: If true (default), attempt to map at the address the
static MapArchiveResult map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
bool use_requested_addr);
static char* reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
FileMapInfo* dynamic_mapinfo,
- bool use_requested_addr,
- ReservedSpace& main_rs,
+ bool use_archive_base_addr,
ReservedSpace& archive_space_rs,
ReservedSpace& class_space_rs);
- static void release_reserved_spaces(ReservedSpace& main_rs,
- ReservedSpace& archive_space_rs,
+ static void release_reserved_spaces(ReservedSpace& archive_space_rs,
ReservedSpace& class_space_rs);
static MapArchiveResult map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs);
static void unmap_archive(FileMapInfo* mapinfo);
diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp
index 593d58c969b..1f1a0bcf864 100644
--- a/src/hotspot/share/memory/universe.cpp
+++ b/src/hotspot/share/memory/universe.cpp
@@ -719,13 +719,9 @@ jint universe_init() {
jint Universe::initialize_heap() {
assert(_collectedHeap == NULL, "Heap already created");
_collectedHeap = GCConfig::arguments()->create_heap();
- jint status = _collectedHeap->initialize();
- if (status == JNI_OK) {
- log_info(gc)("Using %s", _collectedHeap->name());
- }
-
- return status;
+ log_info(gc)("Using %s", _collectedHeap->name());
+ return _collectedHeap->initialize();
}
void Universe::initialize_tlab() {
diff --git a/src/hotspot/share/oops/compressedOops.cpp b/src/hotspot/share/oops/compressedOops.cpp
index 22057e03674..069e7e3417e 100644
--- a/src/hotspot/share/oops/compressedOops.cpp
+++ b/src/hotspot/share/oops/compressedOops.cpp
@@ -66,7 +66,7 @@ void CompressedOops::initialize(const ReservedHeapSpace& heap_space) {
_heap_address_range = heap_space.region();
- LogTarget(Info, gc, heap, coops) lt;
+ LogTarget(Debug, gc, heap, coops) lt;
if (lt.is_enabled()) {
ResourceMark rm;
LogStream ls(lt);
@@ -182,7 +182,103 @@ void CompressedOops::print_mode(outputStream* st) {
NarrowPtrStruct CompressedKlassPointers::_narrow_klass = { NULL, 0, true };
// CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
-uint64_t CompressedKlassPointers::_narrow_klass_range = (uint64_t(max_juint)+1);;
+// (Todo: we should #ifdef out CompressedKlassPointers for 32bit completely and fix all call sites which
+// are compiled for 32bit to LP64_ONLY).
+size_t CompressedKlassPointers::_range = 0;
+
+
+// Given an address range [addr, addr+len) which the encoding is supposed to
+// cover, choose base, shift and range.
+// The address range is the expected range of uncompressed Klass pointers we
+// will encounter (and the implicit promise that there will be no Klass
+// structures outside this range).
+void CompressedKlassPointers::initialize(address addr, size_t len) {
+#ifdef _LP64
+ assert(is_valid_base(addr), "Address must be a valid encoding base");
+ address const end = addr + len;
+
+ address base;
+ int shift;
+ size_t range;
+
+ if (UseSharedSpaces || DumpSharedSpaces) {
+
+ // Special requirements if CDS is active:
+ // Encoding base and shift must be the same between dump and run time.
+ // CDS takes care that the SharedBaseAddress and CompressedClassSpaceSize
+ // are the same. Archive size will be probably different at runtime, but
+ // it can only be smaller than at, never larger, since archives get
+ // shrunk at the end of the dump process.
+ // From that it follows that the range [addr, len) we are handed in at
+ // runtime will start at the same address then at dumptime, and its len
+ // may be smaller at runtime then it was at dump time.
+ //
+ // To be very careful here, we avoid any optimizations and just keep using
+ // the same address and shift value. Specifically we avoid using zero-based
+ // encoding. We also set the expected value range to 4G (encoding range
+ // cannot be larger than that).
+
+ base = addr;
+ shift = LogKlassAlignmentInBytes;
+
+ // This must be true since at dumptime cds+ccs is 4G, at runtime it can
+ // only be smaller, see comment above.
+ assert(len <= 4 * G, "Encoding range cannot be larger than 4G");
+ range = 4 * G;
+
+ } else {
+
+ // Otherwise we attempt to use a zero base if the range fits in lower 32G.
+ if (end <= (address)KlassEncodingMetaspaceMax) {
+ base = 0;
+ } else {
+ base = addr;
+ }
+
+ // Highest offset a Klass* can ever have in relation to base.
+ range = end - base;
+
+ // We may not even need a shift if the range fits into 32bit:
+ const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
+ if (range < UnscaledClassSpaceMax) {
+ shift = 0;
+ } else {
+ shift = LogKlassAlignmentInBytes;
+ }
+
+ }
+
+ set_base(base);
+ set_shift(shift);
+ set_range(range);
+
+ // Note: this may modify our shift.
+ AOTLoader::set_narrow_klass_shift();
+#else
+ fatal("64bit only.");
+#endif
+}
+
+// Given an address p, return true if p can be used as an encoding base.
+// (Some platforms have restrictions of what constitutes a valid base address).
+bool CompressedKlassPointers::is_valid_base(address p) {
+#ifdef AARCH64
+ // Below 32G, base must be aligned to 4G.
+ // Above that point, base must be aligned to 32G
+ if (p < (address)(32 * G)) {
+ return is_aligned(p, 4 * G);
+ }
+ return is_aligned(p, (4 << LogKlassAlignmentInBytes) * G);
+#else
+ return true;
+#endif
+}
+
+void CompressedKlassPointers::print_mode(outputStream* st) {
+ st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d, "
+ "Narrow klass range: " SIZE_FORMAT_HEX, p2i(base()), shift(),
+ range());
+}
void CompressedKlassPointers::set_base(address base) {
assert(UseCompressedClassPointers, "no compressed klass ptrs?");
@@ -194,7 +290,7 @@ void CompressedKlassPointers::set_shift(int shift) {
_narrow_klass._shift = shift;
}
-void CompressedKlassPointers::set_range(uint64_t range) {
+void CompressedKlassPointers::set_range(size_t range) {
assert(UseCompressedClassPointers, "no compressed klass ptrs?");
- _narrow_klass_range = range;
+ _range = range;
}
diff --git a/src/hotspot/share/oops/compressedOops.hpp b/src/hotspot/share/oops/compressedOops.hpp
index 8985afe236f..3522606b457 100644
--- a/src/hotspot/share/oops/compressedOops.hpp
+++ b/src/hotspot/share/oops/compressedOops.hpp
@@ -133,16 +133,37 @@ class CompressedKlassPointers : public AllStatic {
static NarrowPtrStruct _narrow_klass;
- // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
- static uint64_t _narrow_klass_range;
+ // Together with base, this defines the address range within which Klass
+ // structures will be located: [base, base+range). While the maximal
+ // possible encoding range is 4|32G for shift 0|3, if we know beforehand
+ // the expected range of Klass* pointers will be smaller, a platform
+ // could use this info to optimize encoding.
+ static size_t _range;
-public:
static void set_base(address base);
+ static void set_range(size_t range);
+
+public:
+
static void set_shift(int shift);
- static void set_range(uint64_t range);
+
+
+ // Given an address p, return true if p can be used as an encoding base.
+ // (Some platforms have restrictions of what constitutes a valid base
+ // address).
+ static bool is_valid_base(address p);
+
+ // Given an address range [addr, addr+len) which the encoding is supposed to
+ // cover, choose base, shift and range.
+ // The address range is the expected range of uncompressed Klass pointers we
+ // will encounter (and the implicit promise that there will be no Klass
+ // structures outside this range).
+ static void initialize(address addr, size_t len);
+
+ static void print_mode(outputStream* st);
static address base() { return _narrow_klass._base; }
- static uint64_t range() { return _narrow_klass_range; }
+ static size_t range() { return _range; }
static int shift() { return _narrow_klass._shift; }
static bool is_null(Klass* v) { return v == NULL; }
@@ -153,6 +174,7 @@ class CompressedKlassPointers : public AllStatic {
static inline Klass* decode(narrowKlass v);
static inline narrowKlass encode_not_null(Klass* v);
static inline narrowKlass encode(Klass* v);
+
};
#endif // SHARE_OOPS_COMPRESSEDOOPS_HPP
diff --git a/src/hotspot/share/oops/instanceKlass.cpp b/src/hotspot/share/oops/instanceKlass.cpp
index a6b0b87d61a..406758332fa 100644
--- a/src/hotspot/share/oops/instanceKlass.cpp
+++ b/src/hotspot/share/oops/instanceKlass.cpp
@@ -3087,9 +3087,9 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
assert_lock_strong(CompiledMethod_lock);
#ifndef PRODUCT
if (TieredCompilation) {
- nmethod * prev = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), n->comp_level(), true);
- assert(prev == NULL || !prev->is_in_use(),
- "redundunt OSR recompilation detected. memory leak in CodeCache!");
+ nmethod* prev = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), n->comp_level(), true);
+ assert(prev == NULL || !prev->is_in_use() || StressRecompilation,
+ "redundant OSR recompilation detected. memory leak in CodeCache!");
}
#endif
// only one compilation can be active
diff --git a/src/hotspot/share/oops/klass.cpp b/src/hotspot/share/oops/klass.cpp
index 8d093a2d76d..d5e4d76ad48 100644
--- a/src/hotspot/share/oops/klass.cpp
+++ b/src/hotspot/share/oops/klass.cpp
@@ -774,6 +774,7 @@ void Klass::print_on(outputStream* st) const {
#define BULLET " - "
+// Caller needs ResourceMark
void Klass::oop_print_on(oop obj, outputStream* st) {
// print title
st->print_cr("%s ", internal_name());
diff --git a/src/hotspot/share/oops/oopsHierarchy.hpp b/src/hotspot/share/oops/oopsHierarchy.hpp
index a359d13946c..ac5b91bc94b 100644
--- a/src/hotspot/share/oops/oopsHierarchy.hpp
+++ b/src/hotspot/share/oops/oopsHierarchy.hpp
@@ -80,36 +80,28 @@ class oop {
void register_oop();
void unregister_oop();
-public:
- void set_obj(const void* p) {
- _o = (oopDesc*)p;
+ void register_if_checking() {
if (CheckUnhandledOops) register_oop();
}
- oop() { set_obj(NULL); }
- oop(const oop& o) { set_obj(o.obj()); }
- oop(const volatile oop& o) { set_obj(o.obj()); }
- oop(const void* p) { set_obj(p); }
- ~oop() {
+public:
+ oop() : _o(NULL) { register_if_checking(); }
+ oop(const oop& o) : _o(o._o) { register_if_checking(); }
+ oop(const void* p) : _o((oopDesc*)p) { register_if_checking(); }
+ ~oop() {
if (CheckUnhandledOops) unregister_oop();
}
- oopDesc* obj() const volatile { return _o; }
+ oopDesc* obj() const { return _o; }
+ oopDesc* operator->() const { return _o; }
+ operator oopDesc* () const { return _o; }
- // General access
- oopDesc* operator->() const { return obj(); }
- bool operator==(const oop o) const { return obj() == o.obj(); }
- bool operator==(void *p) const { return obj() == p; }
- bool operator!=(const volatile oop o) const { return obj() != o.obj(); }
- bool operator!=(void *p) const { return obj() != p; }
+ bool operator==(const oop& o) const { return _o == o._o; }
+ bool operator==(void *p) const { return _o == p; }
+ bool operator!=(const oop& o) const { return _o != o._o; }
+ bool operator!=(void *p) const { return _o != p; }
- // Assignment
- oop& operator=(const oop& o) { _o = o.obj(); return *this; }
- volatile oop& operator=(const oop& o) volatile { _o = o.obj(); return *this; }
- volatile oop& operator=(const volatile oop& o) volatile { _o = o.obj(); return *this; }
-
- // Explict user conversions
- operator oopDesc* () const volatile { return obj(); }
+ oop& operator=(const oop& o) { _o = o._o; return *this; }
};
template<>
@@ -128,7 +120,6 @@ struct PrimitiveConversions::Translate : public TrueType {
type##Oop() : oop() {} \
type##Oop(const type##Oop& o) : oop(o) {} \
type##Oop(const oop& o) : oop(o) {} \
- type##Oop(const volatile oop& o) : oop(o) {} \
type##Oop(const void* p) : oop(p) {} \
operator type##OopDesc* () const { return (type##OopDesc*)obj(); } \
type##OopDesc* operator->() const { \
@@ -138,14 +129,6 @@ struct PrimitiveConversions::Translate : public TrueType {
oop::operator=(o); \
return *this; \
} \
- volatile type##Oop& operator=(const type##Oop& o) volatile { \
- (void)const_cast(oop::operator=(o)); \
- return *this; \
- } \
- volatile type##Oop& operator=(const volatile type##Oop& o) volatile {\
- (void)const_cast(oop::operator=(o)); \
- return *this; \
- } \
}; \
\
template<> \
diff --git a/src/hotspot/share/opto/c2compiler.cpp b/src/hotspot/share/opto/c2compiler.cpp
index 9495ac2fb43..6f554dea07b 100644
--- a/src/hotspot/share/opto/c2compiler.cpp
+++ b/src/hotspot/share/opto/c2compiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -84,6 +84,7 @@ bool C2Compiler::init_c2_runtime() {
}
void C2Compiler::initialize() {
+ assert(!is_c1_or_interpreter_only(), "C2 compiler is launched, it's not c1/interpreter only mode");
// The first compiler thread that gets here will initialize the
// small amount of global state (and runtime stubs) that C2 needs.
diff --git a/src/hotspot/share/opto/generateOptoStub.cpp b/src/hotspot/share/opto/generateOptoStub.cpp
index 9c19b6a4b26..7dc8431e977 100644
--- a/src/hotspot/share/opto/generateOptoStub.cpp
+++ b/src/hotspot/share/opto/generateOptoStub.cpp
@@ -99,7 +99,7 @@ void GraphKit::gen_stub(address C_function,
// users will look at the other fields.
//
Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
- Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
+ Node *last_sp = frameptr();
store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered);
// Set _thread_in_native
diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp
index a18d4801aa6..3c91e8e2ead 100644
--- a/src/hotspot/share/prims/jni.cpp
+++ b/src/hotspot/share/prims/jni.cpp
@@ -316,23 +316,11 @@ JNI_ENTRY(jclass, jni_DefineClass(JNIEnv *env, const char *name, jobject loaderR
jclass cls = NULL;
DT_RETURN_MARK(DefineClass, jclass, (const jclass&)cls);
- TempNewSymbol class_name = NULL;
- // Since exceptions can be thrown, class initialization can take place
- // if name is NULL no check for class name in .class stream has to be made.
- if (name != NULL) {
- const int str_len = (int)strlen(name);
- if (str_len > Symbol::max_length()) {
- // It's impossible to create this class; the name cannot fit
- // into the constant pool.
- Exceptions::fthrow(THREAD_AND_LOCATION,
- vmSymbols::java_lang_NoClassDefFoundError(),
- "Class name exceeds maximum length of %d: %s",
- Symbol::max_length(),
- name);
- return 0;
- }
- class_name = SymbolTable::new_symbol(name);
- }
+ // Class resolution will get the class name from the .class stream if the name is null.
+ TempNewSymbol class_name = name == NULL ? NULL :
+ SystemDictionary::class_name_symbol(name, vmSymbols::java_lang_NoClassDefFoundError(),
+ CHECK_NULL);
+
ResourceMark rm(THREAD);
ClassFileStream st((u1*)buf, bufLen, NULL, ClassFileStream::verify);
Handle class_loader (THREAD, JNIHandles::resolve(loaderRef));
@@ -374,19 +362,10 @@ JNI_ENTRY(jclass, jni_FindClass(JNIEnv *env, const char *name))
jclass result = NULL;
DT_RETURN_MARK(FindClass, jclass, (const jclass&)result);
- // Sanity check the name: it cannot be null or larger than the maximum size
- // name we can fit in the constant pool.
- if (name == NULL) {
- THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), "No class name given");
- }
- if ((int)strlen(name) > Symbol::max_length()) {
- Exceptions::fthrow(THREAD_AND_LOCATION,
- vmSymbols::java_lang_NoClassDefFoundError(),
- "Class name exceeds maximum length of %d: %s",
- Symbol::max_length(),
- name);
- return 0;
- }
+ // This should be ClassNotFoundException imo.
+ TempNewSymbol class_name =
+ SystemDictionary::class_name_symbol(name, vmSymbols::java_lang_NoClassDefFoundError(),
+ CHECK_NULL);
//%note jni_3
Handle protection_domain;
@@ -418,8 +397,7 @@ JNI_ENTRY(jclass, jni_FindClass(JNIEnv *env, const char *name))
}
}
- TempNewSymbol sym = SymbolTable::new_symbol(name);
- result = find_class_from_class_loader(env, sym, true, loader,
+ result = find_class_from_class_loader(env, class_name, true, loader,
protection_domain, true, thread);
if (log_is_enabled(Debug, class, resolve) && result != NULL) {
diff --git a/src/hotspot/share/prims/jniCheck.cpp b/src/hotspot/share/prims/jniCheck.cpp
index 2370af285c2..47da2257a83 100644
--- a/src/hotspot/share/prims/jniCheck.cpp
+++ b/src/hotspot/share/prims/jniCheck.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -43,6 +43,7 @@
#include "runtime/jfieldIDWorkaround.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.inline.hpp"
+#include "utilities/utf8.hpp"
// Complain every extra number of unplanned local refs
#define CHECK_JNI_LOCAL_REF_CAP_WARN_THRESHOLD 32
@@ -134,6 +135,8 @@ static const char * fatal_wrong_field = "Wrong field ID passed to JNI";
static const char * fatal_instance_field_not_found = "Instance field not found in JNI get/set field operations";
static const char * fatal_instance_field_mismatch = "Field type (instance) mismatch in JNI get/set field operations";
static const char * fatal_non_string = "JNI string operation received a non-string";
+static const char * fatal_non_utf8_class_name1 = "JNI class name is not a valid UTF8 string \"";
+static const char * fatal_non_utf8_class_name2 = "\"";
// When in VM state:
@@ -489,6 +492,13 @@ void jniCheck::validate_class_descriptor(JavaThread* thr, const char* name) {
warn_bad_class_descriptor1, name, warn_bad_class_descriptor2);
ReportJNIWarning(thr, msg);
}
+
+ // Verify that the class name given is a valid utf8 string
+ if (!UTF8::is_legal_utf8((const unsigned char*)name, (int)strlen(name), false)) {
+ char msg[JVM_MAXPATHLEN];
+ jio_snprintf(msg, JVM_MAXPATHLEN, "%s%s%s", fatal_non_utf8_class_name1, name, fatal_non_utf8_class_name2);
+ ReportJNIFatalError(thr, msg);
+ }
}
Klass* jniCheck::validate_class(JavaThread* thr, jclass clazz, bool allow_primitive) {
diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp
index 600f58ef967..ad6346cbdec 100644
--- a/src/hotspot/share/prims/jvm.cpp
+++ b/src/hotspot/share/prims/jvm.cpp
@@ -810,12 +810,13 @@ JVM_ENTRY(jclass, JVM_FindClassFromBootLoader(JNIEnv* env,
const char* name))
JVMWrapper("JVM_FindClassFromBootLoader");
- // Java libraries should ensure that name is never null...
+ // Java libraries should ensure that name is never null or illegal.
if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
// It's impossible to create this class; the name cannot fit
// into the constant pool.
return NULL;
}
+ assert(UTF8::is_legal_utf8((const unsigned char*)name, (int)strlen(name), false), "illegal UTF name");
TempNewSymbol h_name = SymbolTable::new_symbol(name);
Klass* k = SystemDictionary::resolve_or_null(h_name, CHECK_NULL);
@@ -834,14 +835,10 @@ JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name,
jboolean init, jobject loader,
jclass caller))
JVMWrapper("JVM_FindClassFromCaller throws ClassNotFoundException");
- // Java libraries should ensure that name is never null...
- if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
- // It's impossible to create this class; the name cannot fit
- // into the constant pool.
- THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name);
- }
- TempNewSymbol h_name = SymbolTable::new_symbol(name);
+ TempNewSymbol h_name =
+ SystemDictionary::class_name_symbol(name, vmSymbols::java_lang_ClassNotFoundException(),
+ CHECK_NULL);
oop loader_oop = JNIHandles::resolve(loader);
oop from_class = JNIHandles::resolve(caller);
@@ -870,20 +867,9 @@ JVM_END
JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name,
jboolean init, jclass from))
JVMWrapper("JVM_FindClassFromClass");
- if (name == NULL) {
- THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), "No class name given");
- }
- if ((int)strlen(name) > Symbol::max_length()) {
- // It's impossible to create this class; the name cannot fit
- // into the constant pool.
- Exceptions::fthrow(THREAD_AND_LOCATION,
- vmSymbols::java_lang_NoClassDefFoundError(),
- "Class name exceeds maximum length of %d: %s",
- Symbol::max_length(),
- name);
- return 0;
- }
- TempNewSymbol h_name = SymbolTable::new_symbol(name);
+ TempNewSymbol h_name =
+ SystemDictionary::class_name_symbol(name, vmSymbols::java_lang_ClassNotFoundException(),
+ CHECK_NULL);
oop from_class_oop = JNIHandles::resolve(from);
Klass* from_class = (from_class_oop == NULL)
? (Klass*)NULL
@@ -949,23 +935,10 @@ static jclass jvm_define_class_common(JNIEnv *env, const char *name,
ClassLoader::perf_app_classfile_bytes_read()->inc(len);
}
- // Since exceptions can be thrown, class initialization can take place
- // if name is NULL no check for class name in .class stream has to be made.
- TempNewSymbol class_name = NULL;
- if (name != NULL) {
- const int str_len = (int)strlen(name);
- if (str_len > Symbol::max_length()) {
- // It's impossible to create this class; the name cannot fit
- // into the constant pool.
- Exceptions::fthrow(THREAD_AND_LOCATION,
- vmSymbols::java_lang_NoClassDefFoundError(),
- "Class name exceeds maximum length of %d: %s",
- Symbol::max_length(),
- name);
- return 0;
- }
- class_name = SymbolTable::new_symbol(name, str_len);
- }
+ // Class resolution will get the class name from the .class stream if the name is null.
+ TempNewSymbol class_name = name == NULL ? NULL :
+ SystemDictionary::class_name_symbol(name, vmSymbols::java_lang_NoClassDefFoundError(),
+ CHECK_NULL);
ResourceMark rm(THREAD);
ClassFileStream st((u1*)buf, len, source, ClassFileStream::verify);
@@ -1054,24 +1027,10 @@ static jclass jvm_lookup_define_class(JNIEnv *env, jclass lookup, const char *na
}
}
-
- // Since exceptions can be thrown, class initialization can take place
- // if name is NULL no check for class name in .class stream has to be made.
- TempNewSymbol class_name = NULL;
- if (name != NULL) {
- const int str_len = (int)strlen(name);
- if (str_len > Symbol::max_length()) {
- // It's impossible to create this class; the name cannot fit
- // into the constant pool.
- Exceptions::fthrow(THREAD_AND_LOCATION,
- vmSymbols::java_lang_NoClassDefFoundError(),
- "Class name exceeds maximum length of %d: %s",
- Symbol::max_length(),
- name);
- return 0;
- }
- class_name = SymbolTable::new_symbol(name, str_len);
- }
+ // Class resolution will get the class name from the .class stream if the name is null.
+ TempNewSymbol class_name = name == NULL ? NULL :
+ SystemDictionary::class_name_symbol(name, vmSymbols::java_lang_NoClassDefFoundError(),
+ CHECK_NULL);
Handle protection_domain (THREAD, JNIHandles::resolve(pd));
const char* source = is_nestmate ? host_class->external_name() : "__JVM_LookupDefineClass__";
diff --git a/src/hotspot/share/runtime/arguments.cpp b/src/hotspot/share/runtime/arguments.cpp
index bbb845b1fc7..833d7169eff 100644
--- a/src/hotspot/share/runtime/arguments.cpp
+++ b/src/hotspot/share/runtime/arguments.cpp
@@ -560,6 +560,9 @@ static SpecialFlag const special_jvm_flags[] = {
#endif // !X86
{ "UseAdaptiveGCBoundary", JDK_Version::undefined(), JDK_Version::jdk(15), JDK_Version::jdk(16) },
{ "MonitorBound", JDK_Version::jdk(14), JDK_Version::jdk(15), JDK_Version::jdk(16) },
+#ifdef AARCH64
+ { "UseBarriersForVolatile", JDK_Version::undefined(), JDK_Version::jdk(15), JDK_Version::jdk(16) },
+#endif
#ifdef TEST_VERIFY_SPECIAL_JVM_FLAGS
// These entries will generate build errors. Their purpose is to test the macros.
@@ -599,7 +602,6 @@ static AliasedFlag const aliased_jvm_flags[] = {
// NOTE: A compatibility request will be necessary for each alias to be removed.
static AliasedLoggingFlag const aliased_logging_flags[] = {
- { "PrintCompressedOopsMode", LogLevel::Info, true, LOG_TAGS(gc, heap, coops) },
{ "PrintSharedSpaces", LogLevel::Info, true, LOG_TAGS(cds) },
{ "TraceBiasedLocking", LogLevel::Info, true, LOG_TAGS(biasedlocking) },
{ "TraceClassLoading", LogLevel::Info, true, LOG_TAGS(class, load) },
diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp
index 4c3f916733c..d87602960e6 100644
--- a/src/hotspot/share/runtime/thread.cpp
+++ b/src/hotspot/share/runtime/thread.cpp
@@ -1665,7 +1665,7 @@ void JavaThread::initialize() {
}
#endif // INCLUDE_JVMCI
_reserved_stack_activation = NULL; // stack base not known yet
- (void)const_cast(_exception_oop = oop(NULL));
+ set_exception_oop(oop());
_exception_pc = 0;
_exception_handler_pc = 0;
_is_method_handle_return = 0;
@@ -2252,6 +2252,13 @@ bool JavaThread::is_lock_owned(address adr) const {
return false;
}
+oop JavaThread::exception_oop() const {
+ return Atomic::load(&_exception_oop);
+}
+
+void JavaThread::set_exception_oop(oop o) {
+ Atomic::store(&_exception_oop, o);
+}
void JavaThread::add_monitor_chunk(MonitorChunk* chunk) {
chunk->set_next(monitor_chunks());
diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp
index dd01a61c57c..b33021d63f8 100644
--- a/src/hotspot/share/runtime/thread.hpp
+++ b/src/hotspot/share/runtime/thread.hpp
@@ -1574,12 +1574,12 @@ class JavaThread: public Thread {
#endif // INCLUDE_JVMCI
// Exception handling for compiled methods
- oop exception_oop() const { return _exception_oop; }
+ oop exception_oop() const;
address exception_pc() const { return _exception_pc; }
address exception_handler_pc() const { return _exception_handler_pc; }
bool is_method_handle_return() const { return _is_method_handle_return == 1; }
- void set_exception_oop(oop o) { (void)const_cast(_exception_oop = o); }
+ void set_exception_oop(oop o);
void set_exception_pc(address a) { _exception_pc = a; }
void set_exception_handler_pc(address a) { _exception_handler_pc = a; }
void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; }
diff --git a/src/hotspot/share/runtime/vmStructs.cpp b/src/hotspot/share/runtime/vmStructs.cpp
index 0361b79c34d..1509b77f609 100644
--- a/src/hotspot/share/runtime/vmStructs.cpp
+++ b/src/hotspot/share/runtime/vmStructs.cpp
@@ -2036,12 +2036,6 @@ typedef HashtableEntry KlassHashtableEntry;
\
declare_preprocessor_constant("ASSERT", DEBUG_ONLY(1) NOT_DEBUG(0)) \
\
- /**************/ \
- /* Stack bias */ \
- /**************/ \
- \
- declare_preprocessor_constant("STACK_BIAS", STACK_BIAS) \
- \
/****************/ \
/* Object sizes */ \
/****************/ \
diff --git a/src/hotspot/share/services/memoryManager.cpp b/src/hotspot/share/services/memoryManager.cpp
index 61041f63509..681ec280c6a 100644
--- a/src/hotspot/share/services/memoryManager.cpp
+++ b/src/hotspot/share/services/memoryManager.cpp
@@ -38,10 +38,8 @@
#include "services/gcNotifier.hpp"
#include "utilities/dtrace.hpp"
-MemoryManager::MemoryManager(const char* name) : _name(name) {
- _num_pools = 0;
- (void)const_cast(_memory_mgr_obj = instanceOop(NULL));
-}
+MemoryManager::MemoryManager(const char* name) :
+ _num_pools(0), _name(name), _memory_mgr_obj() {}
int MemoryManager::add_pool(MemoryPool* pool) {
int index = _num_pools;
@@ -54,6 +52,10 @@ int MemoryManager::add_pool(MemoryPool* pool) {
return index;
}
+bool MemoryManager::is_manager(instanceHandle mh) const {
+ return mh() == Atomic::load(&_memory_mgr_obj);
+}
+
MemoryManager* MemoryManager::get_code_cache_memory_manager() {
return new MemoryManager("CodeCacheManager");
}
diff --git a/src/hotspot/share/services/memoryManager.hpp b/src/hotspot/share/services/memoryManager.hpp
index 5cc4e20d736..dbd3312c960 100644
--- a/src/hotspot/share/services/memoryManager.hpp
+++ b/src/hotspot/share/services/memoryManager.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@ class MemoryManager : public CHeapObj {
int add_pool(MemoryPool* pool);
- bool is_manager(instanceHandle mh) { return mh() == _memory_mgr_obj; }
+ bool is_manager(instanceHandle mh) const;
virtual instanceOop get_memory_manager_instance(TRAPS);
virtual bool is_gc_memory_manager() { return false; }
diff --git a/src/hotspot/share/services/memoryPool.cpp b/src/hotspot/share/services/memoryPool.cpp
index 1a4dc69a228..32f3261016d 100644
--- a/src/hotspot/share/services/memoryPool.cpp
+++ b/src/hotspot/share/services/memoryPool.cpp
@@ -42,24 +42,27 @@ MemoryPool::MemoryPool(const char* name,
size_t init_size,
size_t max_size,
bool support_usage_threshold,
- bool support_gc_threshold) {
- _name = name;
- _initial_size = init_size;
- _max_size = max_size;
- (void)const_cast(_memory_pool_obj = instanceOop(NULL));
- _available_for_allocation = true;
- _num_managers = 0;
- _type = type;
-
- // initialize the max and init size of collection usage
- _after_gc_usage = MemoryUsage(_initial_size, 0, 0, _max_size);
-
- _usage_sensor = NULL;
- _gc_usage_sensor = NULL;
+ bool support_gc_threshold) :
+ _name(name),
+ _type(type),
+ _initial_size(init_size),
+ _max_size(max_size),
+ _available_for_allocation(true),
+ _managers(),
+ _num_managers(0),
+ _peak_usage(),
+ _after_gc_usage(init_size, 0, 0, max_size),
// usage threshold supports both high and low threshold
- _usage_threshold = new ThresholdSupport(support_usage_threshold, support_usage_threshold);
+ _usage_threshold(new ThresholdSupport(support_usage_threshold, support_usage_threshold)),
// gc usage threshold supports only high threshold
- _gc_usage_threshold = new ThresholdSupport(support_gc_threshold, support_gc_threshold);
+ _gc_usage_threshold(new ThresholdSupport(support_gc_threshold, support_gc_threshold)),
+ _usage_sensor(),
+ _gc_usage_sensor(),
+ _memory_pool_obj()
+{}
+
+bool MemoryPool::is_pool(instanceHandle pool) const {
+ return pool() == Atomic::load(&_memory_pool_obj);
}
void MemoryPool::add_manager(MemoryManager* mgr) {
diff --git a/src/hotspot/share/services/memoryPool.hpp b/src/hotspot/share/services/memoryPool.hpp
index 9b46badf44b..70cf6c73e17 100644
--- a/src/hotspot/share/services/memoryPool.hpp
+++ b/src/hotspot/share/services/memoryPool.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -95,7 +95,7 @@ class MemoryPool : public CHeapObj {
// max size could be changed
virtual size_t max_size() const { return _max_size; }
- bool is_pool(instanceHandle pool) { return pool() == _memory_pool_obj; }
+ bool is_pool(instanceHandle pool) const;
bool available_for_allocation() { return _available_for_allocation; }
bool set_available_for_allocation(bool value) {
diff --git a/src/hotspot/share/services/virtualMemoryTracker.cpp b/src/hotspot/share/services/virtualMemoryTracker.cpp
index 5dc134e00cd..0a8364022ac 100644
--- a/src/hotspot/share/services/virtualMemoryTracker.cpp
+++ b/src/hotspot/share/services/virtualMemoryTracker.cpp
@@ -403,7 +403,8 @@ void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag)
if (reserved_rgn != NULL) {
assert(reserved_rgn->contain_address(addr), "Containment");
if (reserved_rgn->flag() != flag) {
- assert(reserved_rgn->flag() == mtNone, "Overwrite memory type");
+ assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")",
+ NMTUtil::flag_to_name(reserved_rgn->flag()));
reserved_rgn->set_flag(flag);
}
}
diff --git a/src/hotspot/share/utilities/bitMap.cpp b/src/hotspot/share/utilities/bitMap.cpp
index 0040d9ad544..8dcc1da6ddc 100644
--- a/src/hotspot/share/utilities/bitMap.cpp
+++ b/src/hotspot/share/utilities/bitMap.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -87,7 +87,13 @@ BitMap::bm_word_t* BitMap::reallocate(const Allocator& allocator, bm_word_t* old
MIN2(old_size_in_words, new_size_in_words));
}
- if (clear && new_size_in_words > old_size_in_words) {
+ if (clear && (new_size_in_bits > old_size_in_bits)) {
+ // If old_size_in_bits is not word-aligned, then the preceeding
+ // copy can include some trailing bits in the final copied word
+ // that also need to be cleared. See clear_range_within_word.
+ bm_word_t mask = bit_mask(old_size_in_bits) - 1;
+ map[raw_to_words_align_down(old_size_in_bits)] &= mask;
+ // Clear the remaining full words.
clear_range_of_words(map, old_size_in_words, new_size_in_words);
}
}
@@ -668,12 +674,11 @@ BitMap::idx_t BitMap::count_one_bits_within_word(idx_t beg, idx_t end) const {
}
BitMap::idx_t BitMap::count_one_bits() const {
- return count_one_bits_in_range_of_words(0, size_in_words());
+ return count_one_bits(0, size());
}
// Returns the number of bits set within [beg, end).
BitMap::idx_t BitMap::count_one_bits(idx_t beg, idx_t end) const {
-
verify_range(beg, end);
idx_t beg_full_word = to_words_align_up(beg);
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 7ed1d8b300a..279b0e292a5 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -790,9 +790,6 @@ class JavaValue {
};
-#define STACK_BIAS 0
-
-
// TosState describes the top-of-stack state before and after the execution of
// a bytecode or method. The top-of-stack value may be cached in one or more CPU
// registers. The TosState corresponds to the 'machine representation' of this cached
diff --git a/src/hotspot/share/utilities/vmError.cpp b/src/hotspot/share/utilities/vmError.cpp
index db066efe846..a1323157749 100644
--- a/src/hotspot/share/utilities/vmError.cpp
+++ b/src/hotspot/share/utilities/vmError.cpp
@@ -29,6 +29,8 @@
#include "compiler/disassembler.hpp"
#include "gc/shared/gcConfig.hpp"
#include "logging/logConfiguration.hpp"
+#include "memory/metaspace.hpp"
+#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.hpp"
@@ -907,15 +909,19 @@ void VMError::report(outputStream* st, bool _verbose) {
st->cr();
}
+#ifdef _LP64
STEP("printing compressed oops mode")
if (_verbose && UseCompressedOops) {
CompressedOops::print_mode(st);
if (UseCompressedClassPointers) {
+ CDS_ONLY(MetaspaceShared::print_on(st);)
Metaspace::print_compressed_class_space(st);
+ CompressedKlassPointers::print_mode(st);
}
st->cr();
}
+#endif
STEP("printing heap information")
@@ -1108,15 +1114,18 @@ void VMError::print_vm_info(outputStream* st) {
st->cr();
}
+#ifdef _LP64
// STEP("printing compressed oops mode")
-
if (UseCompressedOops) {
CompressedOops::print_mode(st);
if (UseCompressedClassPointers) {
+ CDS_ONLY(MetaspaceShared::print_on(st);)
Metaspace::print_compressed_class_space(st);
+ CompressedKlassPointers::print_mode(st);
}
st->cr();
}
+#endif
// STEP("printing heap information")
diff --git a/src/java.base/macosx/native/libjli/java_md_macosx.m b/src/java.base/macosx/native/libjli/java_md_macosx.m
index 38911ccaabb..cf55e05f35d 100644
--- a/src/java.base/macosx/native/libjli/java_md_macosx.m
+++ b/src/java.base/macosx/native/libjli/java_md_macosx.m
@@ -633,18 +633,6 @@ static void MacOSXStartup(int argc, char *argv[]) {
return exec_path;
}
-/*
- * BSD's implementation of CounterGet()
- */
-int64_t
-CounterGet()
-{
- struct timeval tv;
- gettimeofday(&tv, NULL);
- return (tv.tv_sec * 1000000) + tv.tv_usec;
-}
-
-
/* --- Splash Screen shared library support --- */
static JavaVM* SetJavaVMValue()
diff --git a/src/java.base/share/classes/java/lang/StringConcatHelper.java b/src/java.base/share/classes/java/lang/StringConcatHelper.java
index 41fa6cca15d..6c5ce33fecc 100644
--- a/src/java.base/share/classes/java/lang/StringConcatHelper.java
+++ b/src/java.base/share/classes/java/lang/StringConcatHelper.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -188,12 +188,12 @@ private static long prepend(long indexCoder, byte[] buf, boolean value) {
* @param indexCoder final char index in the buffer, along with coder packed
* into higher bits.
* @param buf buffer to append to
- * @param prefix a constant to prepend before value
* @param value boolean value to encode
+ * @param prefix a constant to prepend before value
* @param suffix a constant to prepend after value
* @return updated index (coder value retained)
*/
- static long prepend(long indexCoder, byte[] buf, String prefix, boolean value, String suffix) {
+ static long prepend(long indexCoder, byte[] buf, boolean value, String prefix, String suffix) {
if (suffix != null) indexCoder = prepend(indexCoder, buf, suffix);
indexCoder = prepend(indexCoder, buf, value);
if (prefix != null) indexCoder = prepend(indexCoder, buf, prefix);
@@ -207,12 +207,12 @@ static long prepend(long indexCoder, byte[] buf, String prefix, boolean value, S
* @param indexCoder final char index in the buffer, along with coder packed
* into higher bits.
* @param buf buffer to append to
- * @param prefix a constant to prepend before value
* @param value boolean value to encode
+ * @param prefix a constant to prepend before value
* @param suffix a constant to prepend after value
* @return updated index (coder value retained)
*/
- static long prepend(long indexCoder, byte[] buf, String prefix, byte value, String suffix) {
+ static long prepend(long indexCoder, byte[] buf, byte value, String prefix, String suffix) {
if (suffix != null) indexCoder = prepend(indexCoder, buf, suffix);
indexCoder = prepend(indexCoder, buf, (int)value);
if (prefix != null) indexCoder = prepend(indexCoder, buf, prefix);
@@ -245,12 +245,12 @@ private static long prepend(long indexCoder, byte[] buf, char value) {
* @param indexCoder final char index in the buffer, along with coder packed
* into higher bits.
* @param buf buffer to append to
- * @param prefix a constant to prepend before value
* @param value boolean value to encode
+ * @param prefix a constant to prepend before value
* @param suffix a constant to prepend after value
* @return updated index (coder value retained)
*/
- static long prepend(long indexCoder, byte[] buf, String prefix, char value, String suffix) {
+ static long prepend(long indexCoder, byte[] buf, char value, String prefix, String suffix) {
if (suffix != null) indexCoder = prepend(indexCoder, buf, suffix);
indexCoder = prepend(indexCoder, buf, value);
if (prefix != null) indexCoder = prepend(indexCoder, buf, prefix);
@@ -264,12 +264,12 @@ static long prepend(long indexCoder, byte[] buf, String prefix, char value, Stri
* @param indexCoder final char index in the buffer, along with coder packed
* into higher bits.
* @param buf buffer to append to
- * @param prefix a constant to prepend before value
* @param value boolean value to encode
+ * @param prefix a constant to prepend before value
* @param suffix a constant to prepend after value
* @return updated index (coder value retained)
*/
- static long prepend(long indexCoder, byte[] buf, String prefix, short value, String suffix) {
+ static long prepend(long indexCoder, byte[] buf, short value, String prefix, String suffix) {
if (suffix != null) indexCoder = prepend(indexCoder, buf, suffix);
indexCoder = prepend(indexCoder, buf, (int)value);
if (prefix != null) indexCoder = prepend(indexCoder, buf, prefix);
@@ -301,12 +301,12 @@ private static long prepend(long indexCoder, byte[] buf, int value) {
* @param indexCoder final char index in the buffer, along with coder packed
* into higher bits.
* @param buf buffer to append to
- * @param prefix a constant to prepend before value
* @param value boolean value to encode
+ * @param prefix a constant to prepend before value
* @param suffix a constant to prepend after value
* @return updated index (coder value retained)
*/
- static long prepend(long indexCoder, byte[] buf, String prefix, int value, String suffix) {
+ static long prepend(long indexCoder, byte[] buf, int value, String prefix, String suffix) {
if (suffix != null) indexCoder = prepend(indexCoder, buf, suffix);
indexCoder = prepend(indexCoder, buf, value);
if (prefix != null) indexCoder = prepend(indexCoder, buf, prefix);
@@ -338,12 +338,12 @@ private static long prepend(long indexCoder, byte[] buf, long value) {
* @param indexCoder final char index in the buffer, along with coder packed
* into higher bits.
* @param buf buffer to append to
- * @param prefix a constant to prepend before value
* @param value boolean value to encode
+ * @param prefix a constant to prepend before value
* @param suffix a constant to prepend after value
* @return updated index (coder value retained)
*/
- static long prepend(long indexCoder, byte[] buf, String prefix, long value, String suffix) {
+ static long prepend(long indexCoder, byte[] buf, long value, String prefix, String suffix) {
if (suffix != null) indexCoder = prepend(indexCoder, buf, suffix);
indexCoder = prepend(indexCoder, buf, value);
if (prefix != null) indexCoder = prepend(indexCoder, buf, prefix);
@@ -377,12 +377,12 @@ private static long prepend(long indexCoder, byte[] buf, String value) {
* @param indexCoder final char index in the buffer, along with coder packed
* into higher bits.
* @param buf buffer to append to
- * @param prefix a constant to prepend before value
* @param value boolean value to encode
+ * @param prefix a constant to prepend before value
* @param suffix a constant to prepend after value
* @return updated index (coder value retained)
*/
- static long prepend(long indexCoder, byte[] buf, String prefix, String value, String suffix) {
+ static long prepend(long indexCoder, byte[] buf, String value, String prefix, String suffix) {
if (suffix != null) indexCoder = prepend(indexCoder, buf, suffix);
indexCoder = prepend(indexCoder, buf, value);
if (prefix != null) indexCoder = prepend(indexCoder, buf, prefix);
diff --git a/src/java.base/share/classes/java/lang/System.java b/src/java.base/share/classes/java/lang/System.java
index 6ac9d5535e4..4bc6f30d473 100644
--- a/src/java.base/share/classes/java/lang/System.java
+++ b/src/java.base/share/classes/java/lang/System.java
@@ -2287,6 +2287,10 @@ public long stringConcatInitialCoder() {
return StringConcatHelper.initialCoder();
}
+ public long stringConcatMix(long lengthCoder, String constant) {
+ return StringConcatHelper.mix(lengthCoder, constant);
+ }
+
public Object classData(Class> c) {
return c.getClassData();
}
diff --git a/src/java.base/share/classes/java/lang/invoke/Invokers.java b/src/java.base/share/classes/java/lang/invoke/Invokers.java
index 7416abb4493..3cc96dc49aa 100644
--- a/src/java.base/share/classes/java/lang/invoke/Invokers.java
+++ b/src/java.base/share/classes/java/lang/invoke/Invokers.java
@@ -365,7 +365,7 @@ private static LambdaForm varHandleMethodGenericLinkerHandleForm(VarHandle.Acces
final int ARG_LIMIT = ARG_BASE + mtype.parameterCount();
int nameCursor = ARG_LIMIT;
final int VAD_ARG = nameCursor++;
- final int UNBOUND_VH = nameCursor++;
+ final int UNBOUND_VH = nameCursor++;
final int CHECK_TYPE = nameCursor++;
final int CHECK_CUSTOM = (CUSTOMIZE_THRESHOLD >= 0) ? nameCursor++ : -1;
final int LINKER_CALL = nameCursor++;
diff --git a/src/java.base/share/classes/java/lang/invoke/MemoryAccessVarHandleGenerator.java b/src/java.base/share/classes/java/lang/invoke/MemoryAccessVarHandleGenerator.java
index c33da7bb57c..6e66fc1ab42 100644
--- a/src/java.base/share/classes/java/lang/invoke/MemoryAccessVarHandleGenerator.java
+++ b/src/java.base/share/classes/java/lang/invoke/MemoryAccessVarHandleGenerator.java
@@ -26,9 +26,10 @@
package java.lang.invoke;
import jdk.internal.access.foreign.MemoryAddressProxy;
-import jdk.internal.misc.Unsafe;
import jdk.internal.org.objectweb.asm.ClassReader;
import jdk.internal.org.objectweb.asm.ClassWriter;
+import jdk.internal.org.objectweb.asm.ConstantDynamic;
+import jdk.internal.org.objectweb.asm.Handle;
import jdk.internal.org.objectweb.asm.MethodVisitor;
import jdk.internal.org.objectweb.asm.Opcodes;
import jdk.internal.org.objectweb.asm.Type;
@@ -42,10 +43,10 @@
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
+import static jdk.internal.org.objectweb.asm.Opcodes.AALOAD;
import static jdk.internal.org.objectweb.asm.Opcodes.ACC_FINAL;
import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PRIVATE;
import static jdk.internal.org.objectweb.asm.Opcodes.ACC_PUBLIC;
@@ -53,10 +54,16 @@
import static jdk.internal.org.objectweb.asm.Opcodes.ACC_SUPER;
import static jdk.internal.org.objectweb.asm.Opcodes.ALOAD;
import static jdk.internal.org.objectweb.asm.Opcodes.ARETURN;
+import static jdk.internal.org.objectweb.asm.Opcodes.ASTORE;
import static jdk.internal.org.objectweb.asm.Opcodes.BIPUSH;
import static jdk.internal.org.objectweb.asm.Opcodes.CHECKCAST;
import static jdk.internal.org.objectweb.asm.Opcodes.GETFIELD;
+import static jdk.internal.org.objectweb.asm.Opcodes.GETSTATIC;
+import static jdk.internal.org.objectweb.asm.Opcodes.H_INVOKESTATIC;
import static jdk.internal.org.objectweb.asm.Opcodes.ICONST_0;
+import static jdk.internal.org.objectweb.asm.Opcodes.ICONST_1;
+import static jdk.internal.org.objectweb.asm.Opcodes.ICONST_2;
+import static jdk.internal.org.objectweb.asm.Opcodes.ICONST_3;
import static jdk.internal.org.objectweb.asm.Opcodes.ILOAD;
import static jdk.internal.org.objectweb.asm.Opcodes.INVOKESPECIAL;
import static jdk.internal.org.objectweb.asm.Opcodes.INVOKESTATIC;
@@ -66,10 +73,12 @@
import static jdk.internal.org.objectweb.asm.Opcodes.LLOAD;
import static jdk.internal.org.objectweb.asm.Opcodes.NEWARRAY;
import static jdk.internal.org.objectweb.asm.Opcodes.PUTFIELD;
+import static jdk.internal.org.objectweb.asm.Opcodes.PUTSTATIC;
import static jdk.internal.org.objectweb.asm.Opcodes.RETURN;
import static jdk.internal.org.objectweb.asm.Opcodes.DUP;
import static jdk.internal.org.objectweb.asm.Opcodes.SIPUSH;
import static jdk.internal.org.objectweb.asm.Opcodes.T_LONG;
+import static jdk.internal.org.objectweb.asm.Opcodes.V14;
class MemoryAccessVarHandleGenerator {
private static final String DEBUG_DUMP_CLASSES_DIR_PROPERTY = "jdk.internal.foreign.ClassGenerator.DEBUG_DUMP_CLASSES_DIR";
@@ -117,13 +126,12 @@ class MemoryAccessVarHandleGenerator {
}
}
- private static final Unsafe U = Unsafe.getUnsafe();
-
private final String implClassName;
private final int dimensions;
private final Class> carrier;
private final Class> helperClass;
private final VarForm form;
+ private final Object[] classData;
MemoryAccessVarHandleGenerator(Class> carrier, int dims) {
this.dimensions = dims;
@@ -133,6 +141,10 @@ class MemoryAccessVarHandleGenerator {
this.form = new VarForm(BASE_CLASS, MemoryAddressProxy.class, carrier, components);
this.helperClass = helperClassCache.get(carrier);
this.implClassName = helperClass.getName().replace('.', '/') + dimensions;
+ // live constants
+ Class>[] intermediate = new Class>[dimensions];
+ Arrays.fill(intermediate, long.class);
+ this.classData = new Object[] { carrier, intermediate, ADD_OFFSETS_HANDLE, MUL_OFFSETS_HANDLE };
}
/*
@@ -140,18 +152,24 @@ class MemoryAccessVarHandleGenerator {
* The factory has type (ZJJ[J)VarHandle.
*/
MethodHandle generateHandleFactory() {
- Class> implCls = generateClass();
+ byte[] classBytes = generateClassBytes();
+ if (DEBUG_DUMP_CLASSES_DIR != null) {
+ debugWriteClassToFile(classBytes);
+ }
try {
+ MethodHandles.Lookup lookup = MethodHandles.lookup().defineHiddenClassWithClassData(classBytes, classData);
+ Class> implCls = lookup.lookupClass();
Class>[] components = new Class>[dimensions];
Arrays.fill(components, long.class);
VarForm form = new VarForm(implCls, MemoryAddressProxy.class, carrier, components);
MethodType constrType = MethodType.methodType(void.class, VarForm.class, boolean.class, long.class, long.class, long.class, long[].class);
- MethodHandle constr = MethodHandles.Lookup.IMPL_LOOKUP.findConstructor(implCls, constrType);
+ MethodHandle constr = lookup.findConstructor(implCls, constrType);
constr = MethodHandles.insertArguments(constr, 0, form);
return constr;
} catch (Throwable ex) {
+ debugPrintClass(classBytes);
throw new AssertionError(ex);
}
}
@@ -160,20 +178,22 @@ MethodHandle generateHandleFactory() {
* Generate a specialized VarHandle class for given carrier
* and access coordinates.
*/
- Class> generateClass() {
- BinderClassWriter cw = new BinderClassWriter();
+ byte[] generateClassBytes() {
+ ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS);
if (DEBUG) {
System.out.println("Generating header implementation class");
}
- cw.visit(52, ACC_PUBLIC | ACC_SUPER, implClassName, null, Type.getInternalName(BASE_CLASS), null);
+ cw.visit(V14, ACC_PUBLIC | ACC_SUPER, implClassName, null, Type.getInternalName(BASE_CLASS), null);
//add dimension fields
for (int i = 0; i < dimensions; i++) {
cw.visitField(ACC_PRIVATE | ACC_FINAL, "dim" + i, "J", null, null);
}
+ addStaticInitializer(cw);
+
addConstructor(cw);
addAccessModeTypeMethod(cw);
@@ -186,13 +206,53 @@ Class> generateClass() {
addAccessModeMethodIfNeeded(mode, cw);
}
-
cw.visitEnd();
- byte[] classBytes = cw.toByteArray();
- return defineClass(cw, classBytes);
+ return cw.toByteArray();
}
- void addConstructor(BinderClassWriter cw) {
+ void addStaticInitializer(ClassWriter cw) {
+ // carrier and intermediate
+ cw.visitField(ACC_PRIVATE | ACC_STATIC | ACC_FINAL, "carrier", Class.class.descriptorString(), null, null);
+ cw.visitField(ACC_PRIVATE | ACC_STATIC | ACC_FINAL, "intermediate", Class[].class.descriptorString(), null, null);
+ cw.visitField(ACC_PRIVATE | ACC_STATIC | ACC_FINAL, "addHandle", MethodHandle.class.descriptorString(), null, null);
+ cw.visitField(ACC_PRIVATE | ACC_STATIC | ACC_FINAL, "mulHandle", MethodHandle.class.descriptorString(), null, null);
+
+ MethodVisitor mv = cw.visitMethod(Opcodes.ACC_STATIC, "", "()V", null, null);
+ mv.visitCode();
+ // extract class data in static final fields
+ MethodType mtype = MethodType.methodType(Object.class, MethodHandles.Lookup.class, String.class, Class.class);
+ Handle bsm = new Handle(H_INVOKESTATIC, Type.getInternalName(MethodHandles.class), "classData",
+ mtype.descriptorString(), false);
+ ConstantDynamic dynamic = new ConstantDynamic("classData", Object[].class.descriptorString(), bsm);
+ mv.visitLdcInsn(dynamic);
+ mv.visitTypeInsn(CHECKCAST, Type.getInternalName(Object[].class));
+ mv.visitVarInsn(ASTORE, 0);
+ mv.visitVarInsn(ALOAD, 0);
+ mv.visitInsn(ICONST_0);
+ mv.visitInsn(AALOAD);
+ mv.visitTypeInsn(CHECKCAST, Type.getInternalName(Class.class));
+ mv.visitFieldInsn(PUTSTATIC, implClassName, "carrier", Class.class.descriptorString());
+ mv.visitVarInsn(ALOAD, 0);
+ mv.visitInsn(ICONST_1);
+ mv.visitInsn(AALOAD);
+ mv.visitTypeInsn(CHECKCAST, Type.getInternalName(Class[].class));
+ mv.visitFieldInsn(PUTSTATIC, implClassName, "intermediate", Class[].class.descriptorString());
+ mv.visitVarInsn(ALOAD, 0);
+ mv.visitInsn(ICONST_2);
+ mv.visitInsn(AALOAD);
+ mv.visitTypeInsn(CHECKCAST, Type.getInternalName(MethodHandle.class));
+ mv.visitFieldInsn(PUTSTATIC, implClassName, "addHandle", MethodHandle.class.descriptorString());
+ mv.visitVarInsn(ALOAD, 0);
+ mv.visitInsn(ICONST_3);
+ mv.visitInsn(AALOAD);
+ mv.visitTypeInsn(CHECKCAST, Type.getInternalName(MethodHandle.class));
+ mv.visitFieldInsn(PUTSTATIC, implClassName, "mulHandle", MethodHandle.class.descriptorString());
+ mv.visitInsn(Opcodes.RETURN);
+ mv.visitMaxs(0, 0);
+ mv.visitEnd();
+ }
+
+ void addConstructor(ClassWriter cw) {
MethodType constrType = MethodType.methodType(void.class, VarForm.class, boolean.class, long.class, long.class, long.class, long[].class);
MethodVisitor mv = cw.visitMethod(0, "", constrType.toMethodDescriptorString(), null, null);
mv.visitCode();
@@ -219,21 +279,16 @@ void addConstructor(BinderClassWriter cw) {
mv.visitEnd();
}
- void addAccessModeTypeMethod(BinderClassWriter cw) {
+ void addAccessModeTypeMethod(ClassWriter cw) {
MethodType modeMethType = MethodType.methodType(MethodType.class, VarHandle.AccessMode.class);
MethodVisitor mv = cw.visitMethod(ACC_FINAL, "accessModeTypeUncached", modeMethType.toMethodDescriptorString(), null, null);
mv.visitCode();
mv.visitVarInsn(ALOAD, 1);
- mv.visitFieldInsn(GETFIELD, Type.getInternalName(VarHandle.AccessMode.class), "at", Type.getDescriptor(VarHandle.AccessType.class));
- mv.visitLdcInsn(cw.makeConstantPoolPatch(MemoryAddressProxy.class));
- mv.visitTypeInsn(CHECKCAST, Type.getInternalName(Class.class));
- mv.visitLdcInsn(cw.makeConstantPoolPatch(carrier));
+ mv.visitFieldInsn(GETFIELD, Type.getInternalName(VarHandle.AccessMode.class), "at", VarHandle.AccessType.class.descriptorString());
+ mv.visitLdcInsn(Type.getType(MemoryAddressProxy.class));
mv.visitTypeInsn(CHECKCAST, Type.getInternalName(Class.class));
-
- Class>[] dims = new Class>[dimensions];
- Arrays.fill(dims, long.class);
- mv.visitLdcInsn(cw.makeConstantPoolPatch(dims));
- mv.visitTypeInsn(CHECKCAST, Type.getInternalName(Class[].class));
+ mv.visitFieldInsn(GETSTATIC, implClassName, "carrier", Class.class.descriptorString());
+ mv.visitFieldInsn(GETSTATIC, implClassName, "intermediate", Class[].class.descriptorString());
mv.visitMethodInsn(INVOKEVIRTUAL, Type.getInternalName(VarHandle.AccessType.class),
"accessModeType", MethodType.methodType(MethodType.class, Class.class, Class.class, Class[].class).toMethodDescriptorString(), false);
@@ -244,7 +299,7 @@ void addAccessModeTypeMethod(BinderClassWriter cw) {
mv.visitEnd();
}
- void addAccessModeMethodIfNeeded(VarHandle.AccessMode mode, BinderClassWriter cw) {
+ void addAccessModeMethodIfNeeded(VarHandle.AccessMode mode, ClassWriter cw) {
String methName = mode.methodName();
MethodType methType = form.getMethodType(mode.at.ordinal())
.insertParameterTypes(0, VarHandle.class);
@@ -276,15 +331,14 @@ void addAccessModeMethodIfNeeded(VarHandle.AccessMode mode, BinderClassWriter cw
mv.visitFieldInsn(GETFIELD, Type.getInternalName(BASE_CLASS), "offset", "J");
for (int i = 0 ; i < dimensions ; i++) {
// load ADD MH
- mv.visitLdcInsn(cw.makeConstantPoolPatch(ADD_OFFSETS_HANDLE));
- mv.visitTypeInsn(CHECKCAST, Type.getInternalName(MethodHandle.class));
+ mv.visitFieldInsn(GETSTATIC, implClassName, "addHandle", MethodHandle.class.descriptorString());
//fixup stack so that ADD MH ends up bottom
mv.visitInsn(Opcodes.DUP_X2);
mv.visitInsn(Opcodes.POP);
// load MUL MH
- mv.visitLdcInsn(cw.makeConstantPoolPatch(MUL_OFFSETS_HANDLE));
+ mv.visitFieldInsn(GETSTATIC, implClassName, "mulHandle", MethodHandle.class.descriptorString());
mv.visitTypeInsn(CHECKCAST, Type.getInternalName(MethodHandle.class));
mv.visitVarInsn(ALOAD, 0); // load recv
@@ -327,7 +381,7 @@ void addAccessModeMethodIfNeeded(VarHandle.AccessMode mode, BinderClassWriter cw
}
}
- void addStridesAccessor(BinderClassWriter cw) {
+ void addStridesAccessor(ClassWriter cw) {
MethodVisitor mv = cw.visitMethod(ACC_FINAL, "strides", "()[J", null, null);
mv.visitCode();
iConstInsn(mv, dimensions);
@@ -346,31 +400,15 @@ void addStridesAccessor(BinderClassWriter cw) {
mv.visitEnd();
}
- void addCarrierAccessor(BinderClassWriter cw) {
+ void addCarrierAccessor(ClassWriter cw) {
MethodVisitor mv = cw.visitMethod(ACC_FINAL, "carrier", "()Ljava/lang/Class;", null, null);
mv.visitCode();
- mv.visitLdcInsn(cw.makeConstantPoolPatch(carrier));
- mv.visitTypeInsn(CHECKCAST, Type.getInternalName(Class.class));
+ mv.visitFieldInsn(GETSTATIC, implClassName, "carrier", Class.class.descriptorString());
mv.visitInsn(ARETURN);
mv.visitMaxs(0, 0);
mv.visitEnd();
}
- //where
- private Class> defineClass(BinderClassWriter cw, byte[] classBytes) {
- try {
- if (DEBUG_DUMP_CLASSES_DIR != null) {
- debugWriteClassToFile(classBytes);
- }
- Object[] patches = cw.resolvePatches(classBytes);
- Class> c = U.defineAnonymousClass(BASE_CLASS, classBytes, patches);
- return c;
- } catch (Throwable e) {
- debugPrintClass(classBytes);
- throw e;
- }
- }
-
// shared code generation helpers
private static int getSlotsForType(Class> c) {
@@ -469,57 +507,4 @@ private void debugWriteDataToFile(byte[] data, File file) {
throw new RuntimeException("Failed to write class " + implClassName + " to file " + file);
}
}
-
- static class BinderClassWriter extends ClassWriter {
-
- private final ArrayList cpPatches = new ArrayList<>();
- private int curUniquePatchIndex = 0;
-
- BinderClassWriter() {
- super(ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS);
- }
-
- public String makeConstantPoolPatch(Object o) {
- int myUniqueIndex = curUniquePatchIndex++;
- String cpPlaceholder = "CONSTANT_PLACEHOLDER_" + myUniqueIndex;
- int index = newConst(cpPlaceholder);
- cpPatches.add(new ConstantPoolPatch(index, cpPlaceholder, o));
- return cpPlaceholder;
- }
-
- public Object[] resolvePatches(byte[] classFile) {
- if (cpPatches.isEmpty()) {
- return null;
- }
-
- int size = ((classFile[8] & 0xFF) << 8) | (classFile[9] & 0xFF);
-
- Object[] patches = new Object[size];
- for (ConstantPoolPatch p : cpPatches) {
- if (p.index >= size) {
- throw new InternalError("Failed to resolve constant pool patch entries");
- }
- patches[p.index] = p.value;
- }
-
- return patches;
- }
-
- static class ConstantPoolPatch {
- final int index;
- final String placeholder;
- final Object value;
-
- ConstantPoolPatch(int index, String placeholder, Object value) {
- this.index = index;
- this.placeholder = placeholder;
- this.value = value;
- }
-
- @Override
- public String toString() {
- return "CpPatch/index="+index+",placeholder="+placeholder+",value="+value;
- }
- }
- }
}
diff --git a/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java b/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java
index dabfd7eba93..67151eb0d0f 100644
--- a/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java
+++ b/src/java.base/share/classes/java/lang/invoke/StringConcatFactory.java
@@ -27,12 +27,6 @@
import jdk.internal.access.JavaLangAccess;
import jdk.internal.access.SharedSecrets;
-import jdk.internal.misc.Unsafe;
-import jdk.internal.misc.VM;
-import jdk.internal.org.objectweb.asm.ClassWriter;
-import jdk.internal.org.objectweb.asm.Label;
-import jdk.internal.org.objectweb.asm.MethodVisitor;
-import jdk.internal.org.objectweb.asm.Opcodes;
import jdk.internal.vm.annotation.Stable;
import sun.invoke.util.Wrapper;
@@ -45,10 +39,7 @@
import java.util.concurrent.ConcurrentMap;
import java.util.function.Function;
-import static java.lang.invoke.MethodHandles.lookup;
import static java.lang.invoke.MethodType.methodType;
-import static java.lang.invoke.MethodHandles.Lookup.ClassOption.*;
-import static jdk.internal.org.objectweb.asm.Opcodes.*;
/**
* Methods to facilitate the creation of String concatenation methods, that
@@ -128,112 +119,8 @@ public final class StringConcatFactory {
*/
private static final int MAX_INDY_CONCAT_ARG_SLOTS = 200;
- /**
- * Concatenation strategy to use. See {@link Strategy} for possible options.
- * This option is controllable with -Djava.lang.invoke.stringConcat JDK option.
- *
- * Defaults to MH_INLINE_SIZED_EXACT if not set.
- */
- private static final Strategy STRATEGY;
-
private static final JavaLangAccess JLA = SharedSecrets.getJavaLangAccess();
- private enum Strategy {
- /**
- * Bytecode generator, calling into {@link java.lang.StringBuilder}.
- */
- BC_SB,
-
- /**
- * Bytecode generator, calling into {@link java.lang.StringBuilder};
- * but trying to estimate the required storage.
- */
- BC_SB_SIZED,
-
- /**
- * Bytecode generator, calling into {@link java.lang.StringBuilder};
- * but computing the required storage exactly.
- */
- BC_SB_SIZED_EXACT,
-
- /**
- * MethodHandle-based generator, that in the end calls into {@link java.lang.StringBuilder}.
- * This strategy also tries to estimate the required storage.
- */
- MH_SB_SIZED,
-
- /**
- * MethodHandle-based generator, that in the end calls into {@link java.lang.StringBuilder}.
- * This strategy also estimate the required storage exactly.
- */
- MH_SB_SIZED_EXACT,
-
- /**
- * MethodHandle-based generator, that constructs its own byte[] array from
- * the arguments. It computes the required storage exactly.
- */
- MH_INLINE_SIZED_EXACT
- }
-
- /**
- * Enables debugging: this may print debugging messages, perform additional (non-neutral for performance)
- * checks, etc.
- */
- private static final boolean DEBUG;
-
- static {
- final String strategy =
- VM.getSavedProperty("java.lang.invoke.stringConcat");
- STRATEGY = (strategy == null) ? null : Strategy.valueOf(strategy);
-
- if (STRATEGY == null || STRATEGY == Strategy.MH_INLINE_SIZED_EXACT) {
- // Force initialization of default strategy:
- Unsafe.getUnsafe().ensureClassInitialized(MethodHandleInlineCopyStrategy.class);
- }
-
- DEBUG = Boolean.parseBoolean(
- VM.getSavedProperty("java.lang.invoke.stringConcat.debug"));
- }
-
- /**
- * Cache key is a composite of:
- * - class name, that lets to disambiguate stubs, to avoid excess sharing
- * - method type, describing the dynamic arguments for concatenation
- * - concat recipe, describing the constants and concat shape
- */
- private static final class Key {
- final String className;
- final MethodType mt;
- final Recipe recipe;
-
- public Key(String className, MethodType mt, Recipe recipe) {
- this.className = className;
- this.mt = mt;
- this.recipe = recipe;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
-
- Key key = (Key) o;
-
- if (!className.equals(key.className)) return false;
- if (!mt.equals(key.mt)) return false;
- if (!recipe.equals(key.recipe)) return false;
- return true;
- }
-
- @Override
- public int hashCode() {
- int result = className.hashCode();
- result = 31 * result + mt.hashCode();
- result = 31 * result + recipe.hashCode();
- return result;
- }
- }
-
/**
* Parses the recipe string, and produces a traversable collection of
* {@link java.lang.invoke.StringConcatFactory.RecipeElement}-s for generator
@@ -445,10 +332,6 @@ public int hashCode() {
public static CallSite makeConcat(MethodHandles.Lookup lookup,
String name,
MethodType concatType) throws StringConcatException {
- if (DEBUG) {
- System.out.println("StringConcatFactory " + STRATEGY + " is here for " + concatType);
- }
-
return doStringConcat(lookup, name, concatType, true, null);
}
@@ -568,10 +451,6 @@ public static CallSite makeConcatWithConstants(MethodHandles.Lookup lookup,
MethodType concatType,
String recipe,
Object... constants) throws StringConcatException {
- if (DEBUG) {
- System.out.println("StringConcatFactory " + STRATEGY + " is here for " + concatType + ", {" + recipe + "}, " + Arrays.toString(constants));
- }
-
return doStringConcat(lookup, name, concatType, false, recipe, constants);
}
@@ -643,61 +522,14 @@ private static CallSite doStringConcat(MethodHandles.Lookup lookup,
MAX_INDY_CONCAT_ARG_SLOTS);
}
- MethodType mt = adaptType(concatType);
Recipe rec = new Recipe(recipe, constants);
- MethodHandle mh = generate(lookup, mt, rec);
+ MethodHandle mh = generate(lookup, concatType, rec);
return new ConstantCallSite(mh.asType(concatType));
}
- /**
- * Adapt method type to an API we are going to use.
- *
- * This strips the concrete classes from the signatures, thus preventing
- * class leakage when we cache the concatenation stubs.
- *
- * @param args actual argument types
- * @return argument types the strategy is going to use
- */
- private static MethodType adaptType(MethodType args) {
- Class>[] ptypes = null;
- for (int i = 0; i < args.parameterCount(); i++) {
- Class> ptype = args.parameterType(i);
- if (!ptype.isPrimitive() &&
- ptype != String.class &&
- ptype != Object.class) { // truncate to Object
- if (ptypes == null) {
- ptypes = args.parameterArray();
- }
- ptypes[i] = Object.class;
- }
- // else other primitives or String or Object (unchanged)
- }
- return (ptypes != null)
- ? MethodType.methodType(args.returnType(), ptypes)
- : args;
- }
-
private static MethodHandle generate(Lookup lookup, MethodType mt, Recipe recipe) throws StringConcatException {
try {
- if (STRATEGY == null) {
- return MethodHandleInlineCopyStrategy.generate(mt, recipe);
- }
- switch (STRATEGY) {
- case BC_SB:
- return BytecodeStringBuilderStrategy.generate(lookup, mt, recipe, Mode.DEFAULT);
- case BC_SB_SIZED:
- return BytecodeStringBuilderStrategy.generate(lookup, mt, recipe, Mode.SIZED);
- case BC_SB_SIZED_EXACT:
- return BytecodeStringBuilderStrategy.generate(lookup, mt, recipe, Mode.SIZED_EXACT);
- case MH_SB_SIZED:
- return MethodHandleStringBuilderStrategy.generate(mt, recipe, Mode.SIZED);
- case MH_SB_SIZED_EXACT:
- return MethodHandleStringBuilderStrategy.generate(mt, recipe, Mode.SIZED_EXACT);
- case MH_INLINE_SIZED_EXACT:
- return MethodHandleInlineCopyStrategy.generate(mt, recipe);
- default:
- throw new StringConcatException("Concatenation strategy " + STRATEGY + " is not implemented");
- }
+ return generateMHInlineCopy(mt, recipe);
} catch (Error | StringConcatException e) {
// Pass through any error or existing StringConcatException
throw e;
@@ -706,1198 +538,352 @@ private static MethodHandle generate(Lookup lookup, MethodType mt, Recipe recipe
}
}
- private enum Mode {
- DEFAULT(false, false),
- SIZED(true, false),
- SIZED_EXACT(true, true);
-
- private final boolean sized;
- private final boolean exact;
-
- Mode(boolean sized, boolean exact) {
- this.sized = sized;
- this.exact = exact;
- }
-
- boolean isSized() {
- return sized;
- }
-
- boolean isExact() {
- return exact;
- }
- }
/**
- * Bytecode StringBuilder strategy.
- *
- *
This strategy operates in three modes, gated by {@link Mode}.
- *
- *
{@link Strategy#BC_SB}: "bytecode StringBuilder".
- *
- *
This strategy spins up the bytecode that has the same StringBuilder
- * chain javac would otherwise emit. This strategy uses only the public API,
- * and comes as the baseline for the current JDK behavior. On other words,
- * this strategy moves the javac generated bytecode to runtime. The
- * generated bytecode is loaded via Lookup::defineClass, but with
- * the caller class coming from the BSM -- in other words, the protection
- * guarantees are inherited from the method where invokedynamic was
- * originally called. This means, among other things, that the bytecode is
- * verified for all non-JDK uses.
- *
- *
{@link Strategy#BC_SB_SIZED}: "bytecode StringBuilder, but
- * sized".
- *
- *
This strategy acts similarly to {@link Strategy#BC_SB}, but it also
- * tries to guess the capacity required for StringBuilder to accept all
- * arguments without resizing. This strategy only makes an educated guess:
- * it only guesses the space required for known types (e.g. primitives and
- * Strings), but does not otherwise convert arguments. Therefore, the
- * capacity estimate may be wrong, and StringBuilder may have to
- * transparently resize or trim when doing the actual concatenation. While
- * this does not constitute a correctness issue (in the end, that what BC_SB
- * has to do anyway), this does pose a potential performance problem.
- *
- *
{@link Strategy#BC_SB_SIZED_EXACT}: "bytecode StringBuilder, but
- * sized exactly".
- *
- *
This strategy improves on @link Strategy#BC_SB_SIZED}, by first
- * converting all arguments to String in order to get the exact capacity
- * StringBuilder should have. The conversion is done via the public
- * String.valueOf and/or Object.toString methods, and does not touch any
- * private String API.
+ *
This strategy replicates what StringBuilders are doing: it builds the
+ * byte[] array on its own and passes that byte[] array to String
+ * constructor. This strategy requires access to some private APIs in JDK,
+ * most notably, the private String constructor that accepts byte[] arrays
+ * without copying.
*/
- private static final class BytecodeStringBuilderStrategy {
- static final int CLASSFILE_VERSION = 52;
- static final String METHOD_NAME = "concat";
-
- private static final ConcurrentMap CACHE;
-
- /**
- * Enables caching of strategy stubs. This may improve the linkage time by reusing the generated
- * code, at the expense of contaminating the profiles.
- */
- private static final boolean CACHE_ENABLE;
-
- /**
- * Dump generated classes to disk, for debugging purposes.
- */
- private static final ProxyClassesDumper DUMPER;
-
- static {
- CACHE_ENABLE = Boolean.parseBoolean(
- VM.getSavedProperty("java.lang.invoke.stringConcat.cache"));
- CACHE = CACHE_ENABLE ? new ConcurrentHashMap<>() : null;
-
- final String dumpPath =
- VM.getSavedProperty("java.lang.invoke.stringConcat.dumpClasses");
-
- DUMPER = (dumpPath == null) ? null : ProxyClassesDumper.getInstance(dumpPath);
- }
-
- private BytecodeStringBuilderStrategy() {
- // no instantiation
- }
-
- private static MethodHandle generate(Lookup lookup, MethodType args, Recipe recipe, Mode mode) throws Exception {
- String className = getClassName(lookup.lookupClass());
- Key key = null;
- if (CACHE_ENABLE) {
- key = new Key(className, args, recipe);
- MethodHandle mh = CACHE.get(key);
- if (mh != null) {
- return mh;
- }
- }
-
- ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS + ClassWriter.COMPUTE_FRAMES);
-
- cw.visit(CLASSFILE_VERSION,
- ACC_SUPER + ACC_PUBLIC + ACC_FINAL + ACC_SYNTHETIC,
- className,
- null,
- "java/lang/Object",
- null
- );
-
- MethodVisitor mv = cw.visitMethod(
- ACC_PUBLIC + ACC_STATIC + ACC_FINAL,
- METHOD_NAME,
- args.toMethodDescriptorString(),
- null,
- null);
-
- // use of @ForceInline no longer has any effect
- mv.visitAnnotation("Ljdk/internal/vm/annotation/ForceInline;", true);
- mv.visitCode();
-
- Class>[] arr = args.parameterArray();
- boolean[] guaranteedNonNull = new boolean[arr.length];
-
- if (mode.isExact()) {
- /*
- In exact mode, we need to convert all arguments to their String representations,
- as this allows to compute their String sizes exactly. We cannot use private
- methods for primitives in here, therefore we need to convert those as well.
-
- We also record what arguments are guaranteed to be non-null as the result
- of the conversion. String.valueOf does the null checks for us. The only
- corner case to take care of is String.valueOf(Object) returning null itself.
-
- Also, if any conversion happened, then the slot indices in the incoming
- arguments are not equal to the final local maps. The only case this may break
- is when converting 2-slot long/double argument to 1-slot String. Therefore,
- we get away with tracking modified offset, since no conversion can overwrite
- the upcoming the argument.
- */
-
- int off = 0;
- int modOff = 0;
- for (int c = 0; c < arr.length; c++) {
- Class> cl = arr[c];
- if (cl == String.class) {
- if (off != modOff) {
- mv.visitIntInsn(getLoadOpcode(cl), off);
- mv.visitIntInsn(ASTORE, modOff);
- }
- } else {
- mv.visitIntInsn(getLoadOpcode(cl), off);
- mv.visitMethodInsn(
- INVOKESTATIC,
- "java/lang/String",
- "valueOf",
- getStringValueOfDesc(cl),
- false
- );
- mv.visitIntInsn(ASTORE, modOff);
- arr[c] = String.class;
- guaranteedNonNull[c] = cl.isPrimitive();
- }
- off += getParameterSize(cl);
- modOff += getParameterSize(String.class);
- }
- }
-
- if (mode.isSized()) {
- /*
- When operating in sized mode (this includes exact mode), it makes sense to make
- StringBuilder append chains look familiar to OptimizeStringConcat. For that, we
- need to do null-checks early, not make the append chain shape simpler.
- */
-
- int off = 0;
- for (RecipeElement el : recipe.getElements()) {
- switch (el.getTag()) {
- case TAG_CONST:
- // Guaranteed non-null, no null check required.
- break;
- case TAG_ARG:
- // Null-checks are needed only for String arguments, and when a previous stage
- // did not do implicit null-checks. If a String is null, we eagerly replace it
- // with "null" constant. Note, we omit Objects here, because we don't call
- // .length() on them down below.
- int ac = el.getArgPos();
- Class> cl = arr[ac];
- if (cl == String.class && !guaranteedNonNull[ac]) {
- Label l0 = new Label();
- mv.visitIntInsn(ALOAD, off);
- mv.visitJumpInsn(IFNONNULL, l0);
- mv.visitLdcInsn("null");
- mv.visitIntInsn(ASTORE, off);
- mv.visitLabel(l0);
- }
- off += getParameterSize(cl);
- break;
- default:
- throw new StringConcatException("Unhandled tag: " + el.getTag());
- }
- }
- }
-
- // Prepare StringBuilder instance
- mv.visitTypeInsn(NEW, "java/lang/StringBuilder");
- mv.visitInsn(DUP);
-
- if (mode.isSized()) {
- /*
- Sized mode requires us to walk through the arguments, and estimate the final length.
- In exact mode, this will operate on Strings only. This code would accumulate the
- final length on stack.
- */
- int len = 0;
- int off = 0;
-
- mv.visitInsn(ICONST_0);
-
- for (RecipeElement el : recipe.getElements()) {
- switch (el.getTag()) {
- case TAG_CONST:
- len += el.getValue().length();
- break;
- case TAG_ARG:
- /*
- If an argument is String, then we can call .length() on it. Sized/Exact modes have
- converted arguments for us. If an argument is primitive, we can provide a guess
- for its String representation size.
- */
- Class> cl = arr[el.getArgPos()];
- if (cl == String.class) {
- mv.visitIntInsn(ALOAD, off);
- mv.visitMethodInsn(
- INVOKEVIRTUAL,
- "java/lang/String",
- "length",
- "()I",
- false
- );
- mv.visitInsn(IADD);
- } else if (cl.isPrimitive()) {
- len += estimateSize(cl);
- }
- off += getParameterSize(cl);
- break;
- default:
- throw new StringConcatException("Unhandled tag: " + el.getTag());
- }
- }
-
- // Constants have non-zero length, mix in
- if (len > 0) {
- iconst(mv, len);
- mv.visitInsn(IADD);
- }
+ private static MethodHandle generateMHInlineCopy(MethodType mt, Recipe recipe) throws Throwable {
- mv.visitMethodInsn(
- INVOKESPECIAL,
- "java/lang/StringBuilder",
- "",
- "(I)V",
- false
- );
- } else {
- mv.visitMethodInsn(
- INVOKESPECIAL,
- "java/lang/StringBuilder",
- "",
- "()V",
- false
- );
- }
-
- // At this point, we have a blank StringBuilder on stack, fill it in with .append calls.
- {
- int off = 0;
- for (RecipeElement el : recipe.getElements()) {
- String desc;
- switch (el.getTag()) {
- case TAG_CONST:
- mv.visitLdcInsn(el.getValue());
- desc = getSBAppendDesc(String.class);
- break;
- case TAG_ARG:
- Class> cl = arr[el.getArgPos()];
- mv.visitVarInsn(getLoadOpcode(cl), off);
- off += getParameterSize(cl);
- desc = getSBAppendDesc(cl);
- break;
- default:
- throw new StringConcatException("Unhandled tag: " + el.getTag());
- }
-
- mv.visitMethodInsn(
- INVOKEVIRTUAL,
- "java/lang/StringBuilder",
- "append",
- desc,
- false
- );
- }
- }
-
- if (DEBUG && mode.isExact()) {
- /*
- Exactness checks compare the final StringBuilder.capacity() with a resulting
- String.length(). If these values disagree, that means StringBuilder had to perform
- storage trimming, which defeats the purpose of exact strategies.
- */
-
- /*
- The logic for this check is as follows:
-
- Stack before: Op:
- (SB) dup, dup
- (SB, SB, SB) capacity()
- (int, SB, SB) swap
- (SB, int, SB) toString()
- (S, int, SB) length()
- (int, int, SB) if_icmpeq
- (SB)
-
- Note that it leaves the same StringBuilder on exit, like the one on enter.
- */
-
- mv.visitInsn(DUP);
- mv.visitInsn(DUP);
-
- mv.visitMethodInsn(
- INVOKEVIRTUAL,
- "java/lang/StringBuilder",
- "capacity",
- "()I",
- false
- );
-
- mv.visitInsn(SWAP);
-
- mv.visitMethodInsn(
- INVOKEVIRTUAL,
- "java/lang/StringBuilder",
- "toString",
- "()Ljava/lang/String;",
- false
- );
-
- mv.visitMethodInsn(
- INVOKEVIRTUAL,
- "java/lang/String",
- "length",
- "()I",
- false
- );
-
- Label l0 = new Label();
- mv.visitJumpInsn(IF_ICMPEQ, l0);
-
- mv.visitTypeInsn(NEW, "java/lang/AssertionError");
- mv.visitInsn(DUP);
- mv.visitLdcInsn("Failed exactness check");
- mv.visitMethodInsn(INVOKESPECIAL,
- "java/lang/AssertionError",
- "",
- "(Ljava/lang/Object;)V",
- false);
- mv.visitInsn(ATHROW);
-
- mv.visitLabel(l0);
- }
-
- mv.visitMethodInsn(
- INVOKEVIRTUAL,
- "java/lang/StringBuilder",
- "toString",
- "()Ljava/lang/String;",
- false
- );
-
- mv.visitInsn(ARETURN);
-
- mv.visitMaxs(-1, -1);
- mv.visitEnd();
- cw.visitEnd();
-
- byte[] classBytes = cw.toByteArray();
- try {
- Class> innerClass = lookup.defineHiddenClass(classBytes, true, STRONG).lookupClass();
- dumpIfEnabled(className, classBytes);
- MethodHandle mh = lookup.findStatic(innerClass, METHOD_NAME, args);
- if (CACHE_ENABLE) {
- CACHE.put(key, mh);
- }
- return mh;
- } catch (Exception e) {
- dumpIfEnabled(className + "$$FAILED", classBytes);
- throw new StringConcatException("Exception while spinning the class", e);
- }
- }
-
- /**
- * The generated class is in the same package as the host class as
- * it's the implementation of the string concatenation for the host
- * class.
- *
- * When cache is enabled, we want to cache as much as we can.
- */
- private static String getClassName(Class> hostClass) {
- if (CACHE_ENABLE) {
- String pkgName = hostClass.getPackageName();
- return (!pkgName.isEmpty() ? pkgName.replace('.', '/') + "/" : "") + "Stubs$$StringConcat";
- } else {
- String name = hostClass.isHidden() ? hostClass.getName().replace('/', '_')
- : hostClass.getName();
- return name.replace('.', '/') + "$$StringConcat";
- }
- }
-
- private static void dumpIfEnabled(String name, byte[] bytes) {
- if (DUMPER != null) {
- DUMPER.dumpClass(name, bytes);
- }
- }
-
- private static String getSBAppendDesc(Class> cl) {
- if (cl.isPrimitive()) {
- if (cl == Integer.TYPE || cl == Byte.TYPE || cl == Short.TYPE) {
- return "(I)Ljava/lang/StringBuilder;";
- } else if (cl == Boolean.TYPE) {
- return "(Z)Ljava/lang/StringBuilder;";
- } else if (cl == Character.TYPE) {
- return "(C)Ljava/lang/StringBuilder;";
- } else if (cl == Double.TYPE) {
- return "(D)Ljava/lang/StringBuilder;";
- } else if (cl == Float.TYPE) {
- return "(F)Ljava/lang/StringBuilder;";
- } else if (cl == Long.TYPE) {
- return "(J)Ljava/lang/StringBuilder;";
- } else {
- throw new IllegalStateException("Unhandled primitive StringBuilder.append: " + cl);
- }
- } else if (cl == String.class) {
- return "(Ljava/lang/String;)Ljava/lang/StringBuilder;";
- } else {
- return "(Ljava/lang/Object;)Ljava/lang/StringBuilder;";
- }
- }
-
- private static String getStringValueOfDesc(Class> cl) {
- if (cl.isPrimitive()) {
- if (cl == Integer.TYPE || cl == Byte.TYPE || cl == Short.TYPE) {
- return "(I)Ljava/lang/String;";
- } else if (cl == Boolean.TYPE) {
- return "(Z)Ljava/lang/String;";
- } else if (cl == Character.TYPE) {
- return "(C)Ljava/lang/String;";
- } else if (cl == Double.TYPE) {
- return "(D)Ljava/lang/String;";
- } else if (cl == Float.TYPE) {
- return "(F)Ljava/lang/String;";
- } else if (cl == Long.TYPE) {
- return "(J)Ljava/lang/String;";
- } else {
- throw new IllegalStateException("Unhandled String.valueOf: " + cl);
- }
- } else if (cl == String.class) {
- return "(Ljava/lang/String;)Ljava/lang/String;";
- } else {
- return "(Ljava/lang/Object;)Ljava/lang/String;";
- }
- }
-
- /**
- * The following method is copied from
- * org.objectweb.asm.commons.InstructionAdapter. Part of ASM: a very small
- * and fast Java bytecode manipulation framework.
- * Copyright (c) 2000-2005 INRIA, France Telecom All rights reserved.
- */
- private static void iconst(MethodVisitor mv, final int cst) {
- if (cst >= -1 && cst <= 5) {
- mv.visitInsn(Opcodes.ICONST_0 + cst);
- } else if (cst >= Byte.MIN_VALUE && cst <= Byte.MAX_VALUE) {
- mv.visitIntInsn(Opcodes.BIPUSH, cst);
- } else if (cst >= Short.MIN_VALUE && cst <= Short.MAX_VALUE) {
- mv.visitIntInsn(Opcodes.SIPUSH, cst);
- } else {
- mv.visitLdcInsn(cst);
- }
- }
-
- private static int getLoadOpcode(Class> c) {
- if (c == Void.TYPE) {
- throw new InternalError("Unexpected void type of load opcode");
- }
- return ILOAD + getOpcodeOffset(c);
- }
+ // Fast-path two-argument Object + Object concatenations
+ if (recipe.getElements().size() == 2) {
+ // Two object arguments
+ if (mt.parameterCount() == 2 &&
+ !mt.parameterType(0).isPrimitive() &&
+ !mt.parameterType(1).isPrimitive() &&
+ recipe.getElements().get(0).getTag() == TAG_ARG &&
+ recipe.getElements().get(1).getTag() == TAG_ARG) {
- private static int getOpcodeOffset(Class> c) {
- if (c.isPrimitive()) {
- if (c == Long.TYPE) {
- return 1;
- } else if (c == Float.TYPE) {
- return 2;
- } else if (c == Double.TYPE) {
- return 3;
- }
- return 0;
- } else {
- return 4;
- }
- }
+ return simpleConcat();
- private static int getParameterSize(Class> c) {
- if (c == Void.TYPE) {
- return 0;
- } else if (c == Long.TYPE || c == Double.TYPE) {
- return 2;
- }
- return 1;
- }
- }
-
- /**
- * MethodHandle StringBuilder strategy.
- *
- * This strategy operates in two modes, gated by {@link Mode}.
- *
- *
{@link Strategy#MH_SB_SIZED}: "MethodHandles StringBuilder,
- * sized".
- *
- *
This strategy avoids spinning up the bytecode by building the
- * computation on MethodHandle combinators. The computation is built with
- * public MethodHandle APIs, resolved from a public Lookup sequence, and
- * ends up calling the public StringBuilder API. Therefore, this strategy
- * does not use any private API at all since everything is handled under
- * cover by java.lang.invoke APIs.
- *
- *
{@link Strategy#MH_SB_SIZED_EXACT}: "MethodHandles StringBuilder,
- * sized exactly".
- *
- *
This strategy improves on @link Strategy#MH_SB_SIZED}, by first
- * converting all arguments to String in order to get the exact capacity
- * StringBuilder should have. The conversion is done via the public
- * String.valueOf and/or Object.toString methods, and does not touch any
- * private String API.
- */
- private static final class MethodHandleStringBuilderStrategy {
- private MethodHandleStringBuilderStrategy() {
- // no instantiation
- }
+ } else if (mt.parameterCount() == 1 &&
+ !mt.parameterType(0).isPrimitive()) {
+ // One Object argument, one constant
+ MethodHandle mh = simpleConcat();
- private static MethodHandle generate(MethodType mt, Recipe recipe, Mode mode) throws Exception {
- int pc = mt.parameterCount();
-
- Class>[] ptypes = mt.parameterArray();
- MethodHandle[] filters = new MethodHandle[ptypes.length];
- for (int i = 0; i < ptypes.length; i++) {
- MethodHandle filter;
- switch (mode) {
- case SIZED:
- // In sized mode, we convert all references and floats/doubles
- // to String: there is no specialization for different
- // classes in StringBuilder API, and it will convert to
- // String internally anyhow.
- filter = Stringifiers.forMost(ptypes[i]);
- break;
- case SIZED_EXACT:
- // In exact mode, we convert everything to String:
- // this helps to compute the storage exactly.
- filter = Stringifiers.forAny(ptypes[i]);
- break;
- default:
- throw new StringConcatException("Not supported");
- }
- if (filter != null) {
- filters[i] = filter;
- ptypes[i] = filter.type().returnType();
- }
- }
+ if (recipe.getElements().get(0).getTag() == TAG_CONST &&
+ recipe.getElements().get(1).getTag() == TAG_ARG) {
+ // First recipe element is a constant
+ return MethodHandles.insertArguments(mh, 0,
+ recipe.getElements().get(0).getValue());
- MethodHandle[] lengthers = new MethodHandle[pc];
-
- // Figure out lengths: constants' lengths can be deduced on the spot.
- // All reference arguments were filtered to String in the combinators below, so we can
- // call the usual String.length(). Primitive values string sizes can be estimated.
- int initial = 0;
- for (RecipeElement el : recipe.getElements()) {
- switch (el.getTag()) {
- case TAG_CONST:
- initial += el.getValue().length();
- break;
- case TAG_ARG:
- final int i = el.getArgPos();
- Class> type = ptypes[i];
- if (type.isPrimitive()) {
- MethodHandle est = MethodHandles.constant(int.class, estimateSize(type));
- est = MethodHandles.dropArguments(est, 0, type);
- lengthers[i] = est;
- } else {
- lengthers[i] = STRING_LENGTH;
- }
- break;
- default:
- throw new StringConcatException("Unhandled tag: " + el.getTag());
- }
- }
+ } else if (recipe.getElements().get(1).getTag() == TAG_CONST &&
+ recipe.getElements().get(0).getTag() == TAG_ARG) {
+ // Second recipe element is a constant
+ return MethodHandles.insertArguments(mh, 1,
+ recipe.getElements().get(1).getValue());
- // Create (StringBuilder, ) shape for appending:
- MethodHandle builder = MethodHandles.dropArguments(MethodHandles.identity(StringBuilder.class), 1, ptypes);
-
- // Compose append calls. This is done in reverse because the application order is
- // reverse as well.
- List elements = recipe.getElements();
- for (int i = elements.size() - 1; i >= 0; i--) {
- RecipeElement el = elements.get(i);
- MethodHandle appender;
- switch (el.getTag()) {
- case TAG_CONST:
- MethodHandle mh = appender(adaptToStringBuilder(String.class));
- appender = MethodHandles.insertArguments(mh, 1, el.getValue());
- break;
- case TAG_ARG:
- int ac = el.getArgPos();
- appender = appender(ptypes[ac]);
-
- // Insert dummy arguments to match the prefix in the signature.
- // The actual appender argument will be the ac-ith argument.
- if (ac != 0) {
- appender = MethodHandles.dropArguments(appender, 1, Arrays.copyOf(ptypes, ac));
- }
- break;
- default:
- throw new StringConcatException("Unhandled tag: " + el.getTag());
}
- builder = MethodHandles.foldArguments(builder, appender);
- }
-
- // Build the sub-tree that adds the sizes and produces a StringBuilder:
-
- // a) Start with the reducer that accepts all arguments, plus one
- // slot for the initial value. Inject the initial value right away.
- // This produces ()int shape:
- MethodHandle sum = getReducerFor(pc + 1);
- MethodHandle adder = MethodHandles.insertArguments(sum, 0, initial);
-
- // b) Apply lengthers to transform arguments to lengths, producing ()int
- adder = MethodHandles.filterArguments(adder, 0, lengthers);
-
- // c) Instantiate StringBuilder ()int -> ()StringBuilder
- MethodHandle newBuilder = MethodHandles.filterReturnValue(adder, NEW_STRING_BUILDER);
-
- // d) Fold in StringBuilder constructor, this produces ()StringBuilder
- MethodHandle mh = MethodHandles.foldArguments(builder, newBuilder);
-
- // Convert non-primitive arguments to Strings
- mh = MethodHandles.filterArguments(mh, 0, filters);
-
- // Convert ()StringBuilder to ()String
- if (DEBUG && mode.isExact()) {
- mh = MethodHandles.filterReturnValue(mh, BUILDER_TO_STRING_CHECKED);
- } else {
- mh = MethodHandles.filterReturnValue(mh, BUILDER_TO_STRING);
}
-
- return mh;
- }
-
- private static MethodHandle getReducerFor(int cnt) {
- return SUMMERS.computeIfAbsent(cnt, SUMMER);
+ // else... fall-through to slow-path
}
- private static MethodHandle appender(Class> appendType) {
- MethodHandle appender = lookupVirtual(MethodHandles.publicLookup(), StringBuilder.class, "append",
- StringBuilder.class, adaptToStringBuilder(appendType));
-
- // appenders should return void, this would not modify the target signature during folding
- MethodType nt = MethodType.methodType(void.class, StringBuilder.class, appendType);
- return appender.asType(nt);
- }
-
- private static String toStringChecked(StringBuilder sb) {
- String s = sb.toString();
- if (s.length() != sb.capacity()) {
- throw new AssertionError("Exactness check failed: result length = " + s.length() + ", buffer capacity = " + sb.capacity());
- }
- return s;
- }
-
- private static int sum(int v1, int v2) {
- return v1 + v2;
- }
-
- private static int sum(int v1, int v2, int v3) {
- return v1 + v2 + v3;
- }
-
- private static int sum(int v1, int v2, int v3, int v4) {
- return v1 + v2 + v3 + v4;
- }
-
- private static int sum(int v1, int v2, int v3, int v4, int v5) {
- return v1 + v2 + v3 + v4 + v5;
- }
-
- private static int sum(int v1, int v2, int v3, int v4, int v5, int v6) {
- return v1 + v2 + v3 + v4 + v5 + v6;
- }
-
- private static int sum(int v1, int v2, int v3, int v4, int v5, int v6, int v7) {
- return v1 + v2 + v3 + v4 + v5 + v6 + v7;
- }
-
- private static int sum(int v1, int v2, int v3, int v4, int v5, int v6, int v7, int v8) {
- return v1 + v2 + v3 + v4 + v5 + v6 + v7 + v8;
- }
-
- private static int sum(int initial, int[] vs) {
- int sum = initial;
- for (int v : vs) {
- sum += v;
- }
- return sum;
- }
-
- private static final Lookup MHSBS_LOOKUP = lookup();
-
- private static final ConcurrentMap SUMMERS;
-
- // This one is deliberately non-lambdified to optimize startup time:
- private static final Function SUMMER = new Function() {
- @Override
- public MethodHandle apply(Integer cnt) {
- if (cnt == 1) {
- return MethodHandles.identity(int.class);
- } else if (cnt <= 8) {
- // Variable-arity collectors are not as efficient as small-count methods,
- // unroll some initial sizes.
- Class>[] cls = new Class>[cnt];
- Arrays.fill(cls, int.class);
- return lookupStatic(MHSBS_LOOKUP, MethodHandleStringBuilderStrategy.class, "sum", int.class, cls);
- } else {
- return lookupStatic(MHSBS_LOOKUP, MethodHandleStringBuilderStrategy.class, "sum", int.class, int.class, int[].class)
- .asCollector(int[].class, cnt - 1);
+ // Create filters and obtain filtered parameter types. Filters would be used in the beginning
+ // to convert the incoming arguments into the arguments we can process (e.g. Objects -> Strings).
+ // The filtered argument type list is used all over in the combinators below.
+ Class>[] ptypes = mt.parameterArray();
+ MethodHandle[] filters = null;
+ for (int i = 0; i < ptypes.length; i++) {
+ MethodHandle filter = stringifierFor(ptypes[i]);
+ if (filter != null) {
+ if (filters == null) {
+ filters = new MethodHandle[ptypes.length];
}
- }
- };
-
- private static final MethodHandle NEW_STRING_BUILDER, STRING_LENGTH, BUILDER_TO_STRING, BUILDER_TO_STRING_CHECKED;
-
- static {
- SUMMERS = new ConcurrentHashMap<>();
- Lookup publicLookup = MethodHandles.publicLookup();
- NEW_STRING_BUILDER = lookupConstructor(publicLookup, StringBuilder.class, int.class);
- STRING_LENGTH = lookupVirtual(publicLookup, String.class, "length", int.class);
- BUILDER_TO_STRING = lookupVirtual(publicLookup, StringBuilder.class, "toString", String.class);
- if (DEBUG) {
- BUILDER_TO_STRING_CHECKED = lookupStatic(MHSBS_LOOKUP, MethodHandleStringBuilderStrategy.class,
- "toStringChecked", String.class, StringBuilder.class);
- } else {
- BUILDER_TO_STRING_CHECKED = null;
+ filters[i] = filter;
+ ptypes[i] = filter.type().returnType();
}
}
- }
-
+ // Start building the combinator tree. The tree "starts" with ()String, and "finishes"
+ // with the (byte[], long)String shape to invoke newString in StringConcatHelper. The combinators are
+ // assembled bottom-up, which makes the code arguably hard to read.
- /**
- * {@link Strategy#MH_INLINE_SIZED_EXACT}: "MethodHandles inline,
- * sized exactly".
- *
- *
This strategy replicates what StringBuilders are doing: it builds the
- * byte[] array on its own and passes that byte[] array to String
- * constructor. This strategy requires access to some private APIs in JDK,
- * most notably, the read-only Integer/Long.stringSize methods that measure
- * the character length of the integers, and the private String constructor
- * that accepts byte[] arrays without copying. While this strategy assumes a
- * particular implementation details for String, this opens the door for
- * building a very optimal concatenation sequence. This is the only strategy
- * that requires porting if there are private JDK changes occur.
- */
- private static final class MethodHandleInlineCopyStrategy {
- private MethodHandleInlineCopyStrategy() {
- // no instantiation
- }
-
- static MethodHandle generate(MethodType mt, Recipe recipe) throws Throwable {
-
- // Fast-path two-argument Object + Object concatenations
- if (recipe.getElements().size() == 2) {
- // Two object arguments
- if (mt.parameterCount() == 2 &&
- !mt.parameterType(0).isPrimitive() &&
- !mt.parameterType(1).isPrimitive() &&
- recipe.getElements().get(0).getTag() == TAG_ARG &&
- recipe.getElements().get(1).getTag() == TAG_ARG) {
+ // Drop all remaining parameter types, leave only helper arguments:
+ MethodHandle mh;
- return simpleConcat();
+ mh = MethodHandles.dropArguments(newString(), 2, ptypes);
- } else if (mt.parameterCount() == 1 &&
- !mt.parameterType(0).isPrimitive()) {
- // One Object argument, one constant
- MethodHandle mh = simpleConcat();
+ long initialLengthCoder = INITIAL_CODER;
- if (recipe.getElements().get(0).getTag() == TAG_CONST &&
- recipe.getElements().get(1).getTag() == TAG_ARG) {
- // First recipe element is a constant
- return MethodHandles.insertArguments(mh, 0,
- recipe.getElements().get(0).getValue());
+ // Mix in prependers. This happens when (byte[], long) = (storage, indexCoder) is already
+ // known from the combinators below. We are assembling the string backwards, so the index coded
+ // into indexCoder is the *ending* index.
- } else if (recipe.getElements().get(1).getTag() == TAG_CONST &&
- recipe.getElements().get(0).getTag() == TAG_ARG) {
- // Second recipe element is a constant
- return MethodHandles.insertArguments(mh, 1,
- recipe.getElements().get(1).getValue());
+ // We need one prepender per argument, but also need to fold in constants. We do so by greedily
+ // create prependers that fold in surrounding constants into the argument prepender. This reduces
+ // the number of unique MH combinator tree shapes we'll create in an application.
+ String prefixConstant = null, suffixConstant = null;
+ int pos = -1;
+ for (RecipeElement el : recipe.getElements()) {
+ // Do the prepend, and put "new" index at index 1
+ switch (el.getTag()) {
+ case TAG_CONST: {
+ String constantValue = el.getValue();
- }
- }
- // else... fall-through to slow-path
- }
+ // Eagerly update the initialLengthCoder value
+ initialLengthCoder = JLA.stringConcatMix(initialLengthCoder, constantValue);
- // Create filters and obtain filtered parameter types. Filters would be used in the beginning
- // to convert the incoming arguments into the arguments we can process (e.g. Objects -> Strings).
- // The filtered argument type list is used all over in the combinators below.
- Class>[] ptypes = mt.parameterArray();
- MethodHandle[] filters = null;
- for (int i = 0; i < ptypes.length; i++) {
- MethodHandle filter = Stringifiers.forMost(ptypes[i]);
- if (filter != null) {
- if (filters == null) {
- filters = new MethodHandle[ptypes.length];
+ if (pos < 0) {
+ // Collecting into prefixConstant
+ prefixConstant = prefixConstant == null ? constantValue : prefixConstant + constantValue;
+ } else {
+ // Collecting into suffixConstant
+ suffixConstant = suffixConstant == null ? constantValue : suffixConstant + constantValue;
}
- filters[i] = filter;
- ptypes[i] = filter.type().returnType();
+ break;
}
- }
-
- // Start building the combinator tree. The tree "starts" with ()String, and "finishes"
- // with the (byte[], long)String shape to invoke newString in StringConcatHelper. The combinators are
- // assembled bottom-up, which makes the code arguably hard to read.
-
- // Drop all remaining parameter types, leave only helper arguments:
- MethodHandle mh;
-
- mh = MethodHandles.dropArguments(newString(), 2, ptypes);
-
- long initialLengthCoder = INITIAL_CODER;
-
- // Mix in prependers. This happens when (byte[], long) = (storage, indexCoder) is already
- // known from the combinators below. We are assembling the string backwards, so the index coded
- // into indexCoder is the *ending* index.
-
- // We need one prepender per argument, but also need to fold in constants. We do so by greedily
- // create prependers that fold in surrounding constants into the argument prepender. This reduces
- // the number of unique MH combinator tree shapes we'll create in an application.
- String prefixConstant = null, suffixConstant = null;
- int pos = -1;
- for (RecipeElement el : recipe.getElements()) {
- // Do the prepend, and put "new" index at index 1
- switch (el.getTag()) {
- case TAG_CONST: {
- String constantValue = el.getValue();
-
- // Eagerly update the initialLengthCoder value
- initialLengthCoder = (long)mixer(String.class).invoke(initialLengthCoder, constantValue);
-
- if (pos < 0) {
- // Collecting into prefixConstant
- prefixConstant = prefixConstant == null ? constantValue : prefixConstant + constantValue;
- } else {
- // Collecting into suffixConstant
- suffixConstant = suffixConstant == null ? constantValue : suffixConstant + constantValue;
- }
- break;
- }
- case TAG_ARG: {
+ case TAG_ARG: {
- if (pos >= 0) {
- // Flush the previous non-constant arg with any prefix/suffix constant
- mh = MethodHandles.filterArgumentsWithCombiner(
+ if (pos >= 0) {
+ // Flush the previous non-constant arg with any prefix/suffix constant
+ mh = MethodHandles.filterArgumentsWithCombiner(
mh, 1,
prepender(prefixConstant, ptypes[pos], suffixConstant),
1, 0, // indexCoder, storage
2 + pos // selected argument
- );
- prefixConstant = suffixConstant = null;
- }
- // Mark the pos of next non-constant arg
- pos = el.getArgPos();
- break;
+ );
+ prefixConstant = suffixConstant = null;
}
- default:
- throw new StringConcatException("Unhandled tag: " + el.getTag());
+ // Mark the pos of next non-constant arg
+ pos = el.getArgPos();
+ break;
}
+ default:
+ throw new StringConcatException("Unhandled tag: " + el.getTag());
}
+ }
- // Insert any trailing args, constants
- if (pos >= 0) {
- mh = MethodHandles.filterArgumentsWithCombiner(
+ // Insert any trailing args, constants
+ if (pos >= 0) {
+ mh = MethodHandles.filterArgumentsWithCombiner(
mh, 1,
prepender(prefixConstant, ptypes[pos], suffixConstant),
1, 0, // indexCoder, storage
2 + pos // selected argument
- );
- } else if (prefixConstant != null) {
- assert (suffixConstant == null);
- // Sole prefixConstant can only happen if there were no non-constant arguments
- mh = MethodHandles.filterArgumentsWithCombiner(
+ );
+ } else if (prefixConstant != null) {
+ assert (suffixConstant == null);
+ // Sole prefixConstant can only happen if there were no non-constant arguments
+ mh = MethodHandles.filterArgumentsWithCombiner(
mh, 1,
MethodHandles.insertArguments(prepender(null, String.class, null), 2, prefixConstant),
1, 0 // indexCoder, storage
- );
- }
-
- // Fold in byte[] instantiation at argument 0
- mh = MethodHandles.foldArgumentsWithCombiner(mh, 0, newArray(),
- 1 // index
);
+ }
- // Start combining length and coder mixers.
- //
- // Length is easy: constant lengths can be computed on the spot, and all non-constant
- // shapes have been either converted to Strings, or explicit methods for getting the
- // string length out of primitives are provided.
- //
- // Coders are more interesting. Only Object, String and char arguments (and constants)
- // can have non-Latin1 encoding. It is easier to blindly convert constants to String,
- // and deduce the coder from there. Arguments would be either converted to Strings
- // during the initial filtering, or handled by specializations in MIXERS.
- //
- // The method handle shape before all mixers are combined in is:
- // (long, )String = ("indexCoder", )
- //
- // We will bind the initialLengthCoder value to the last mixer (the one that will be
- // executed first), then fold that in. This leaves the shape after all mixers are
- // combined in as:
- // ()String = ()
-
- int ac = -1;
- MethodHandle mix = null;
- for (RecipeElement el : recipe.getElements()) {
- switch (el.getTag()) {
- case TAG_CONST:
- // Constants already handled in the code above
- break;
- case TAG_ARG:
- if (ac >= 0) {
- // Compute new "index" in-place using old value plus the appropriate argument.
- mh = MethodHandles.filterArgumentsWithCombiner(mh, 0, mix,
- 0, // old-index
- 1 + ac // selected argument
- );
- }
-
- ac = el.getArgPos();
- Class> argClass = ptypes[ac];
- mix = mixer(argClass);
-
- break;
- default:
- throw new StringConcatException("Unhandled tag: " + el.getTag());
- }
- }
+ // Fold in byte[] instantiation at argument 0
+ mh = MethodHandles.foldArgumentsWithCombiner(mh, 0, newArray(),
+ 1 // index
+ );
+
+ // Start combining length and coder mixers.
+ //
+ // Length is easy: constant lengths can be computed on the spot, and all non-constant
+ // shapes have been either converted to Strings, or explicit methods for getting the
+ // string length out of primitives are provided.
+ //
+ // Coders are more interesting. Only Object, String and char arguments (and constants)
+ // can have non-Latin1 encoding. It is easier to blindly convert constants to String,
+ // and deduce the coder from there. Arguments would be either converted to Strings
+ // during the initial filtering, or handled by specializations in MIXERS.
+ //
+ // The method handle shape before all mixers are combined in is:
+ // (long, )String = ("indexCoder", )
+ //
+ // We will bind the initialLengthCoder value to the last mixer (the one that will be
+ // executed first), then fold that in. This leaves the shape after all mixers are
+ // combined in as:
+ // ()String = ()
+
+ int ac = -1;
+ MethodHandle mix = null;
+ for (RecipeElement el : recipe.getElements()) {
+ switch (el.getTag()) {
+ case TAG_CONST:
+ // Constants already handled in the code above
+ break;
+ case TAG_ARG:
+ if (ac >= 0) {
+ // Compute new "index" in-place using old value plus the appropriate argument.
+ mh = MethodHandles.filterArgumentsWithCombiner(mh, 0, mix,
+ 0, // old-index
+ 1 + ac // selected argument
+ );
+ }
- // Insert the initialLengthCoder value into the final mixer, then
- // fold that into the base method handle
- if (ac >= 0) {
- mix = MethodHandles.insertArguments(mix, 0, initialLengthCoder);
- mh = MethodHandles.foldArgumentsWithCombiner(mh, 0, mix,
- 1 + ac // selected argument
- );
- } else {
- // No mixer (constants only concat), insert initialLengthCoder directly
- mh = MethodHandles.insertArguments(mh, 0, initialLengthCoder);
+ ac = el.getArgPos();
+ Class> argClass = ptypes[ac];
+ mix = mixer(argClass);
+
+ break;
+ default:
+ throw new StringConcatException("Unhandled tag: " + el.getTag());
}
+ }
- // The method handle shape here is ().
+ // Insert the initialLengthCoder value into the final mixer, then
+ // fold that into the base method handle
+ if (ac >= 0) {
+ mix = MethodHandles.insertArguments(mix, 0, initialLengthCoder);
+ mh = MethodHandles.foldArgumentsWithCombiner(mh, 0, mix,
+ 1 + ac // selected argument
+ );
+ } else {
+ // No mixer (constants only concat), insert initialLengthCoder directly
+ mh = MethodHandles.insertArguments(mh, 0, initialLengthCoder);
+ }
- // Apply filters, converting the arguments:
- if (filters != null) {
- mh = MethodHandles.filterArguments(mh, 0, filters);
- }
+ // The method handle shape here is ().
- return mh;
+ // Apply filters, converting the arguments:
+ if (filters != null) {
+ mh = MethodHandles.filterArguments(mh, 0, filters);
}
- private static MethodHandle prepender(String prefix, Class> cl, String suffix) {
- return MethodHandles.insertArguments(
- MethodHandles.insertArguments(
- PREPENDERS.computeIfAbsent(cl, PREPEND), 2, prefix), 3, suffix);
+ return mh;
+ }
+
+ private static MethodHandle prepender(String prefix, Class> cl, String suffix) {
+ if (prefix == null && suffix == null) {
+ return NULL_PREPENDERS.computeIfAbsent(cl, NULL_PREPEND);
}
+ return MethodHandles.insertArguments(
+ PREPENDERS.computeIfAbsent(cl, PREPEND), 3, prefix, suffix);
+ }
- private static MethodHandle mixer(Class> cl) {
- return MIXERS.computeIfAbsent(cl, MIX);
+ private static MethodHandle mixer(Class> cl) {
+ return MIXERS.computeIfAbsent(cl, MIX);
+ }
+
+ // This one is deliberately non-lambdified to optimize startup time:
+ private static final Function, MethodHandle> PREPEND = new Function<>() {
+ @Override
+ public MethodHandle apply(Class> c) {
+ return JLA.stringConcatHelper("prepend",
+ methodType(long.class, long.class, byte[].class,
+ Wrapper.asPrimitiveType(c), String.class, String.class));
}
+ };
- // This one is deliberately non-lambdified to optimize startup time:
- private static final Function, MethodHandle> PREPEND = new Function<>() {
- @Override
- public MethodHandle apply(Class> c) {
- return JLA.stringConcatHelper("prepend",
- methodType(long.class, long.class, byte[].class,
- String.class, Wrapper.asPrimitiveType(c), String.class));
- }
- };
+ private static final Function, MethodHandle> NULL_PREPEND = new Function<>() {
+ @Override
+ public MethodHandle apply(Class> c) {
+ return MethodHandles.insertArguments(
+ PREPENDERS.computeIfAbsent(c, PREPEND), 3, (String)null, (String)null);
+ }
+ };
- // This one is deliberately non-lambdified to optimize startup time:
- private static final Function, MethodHandle> MIX = new Function<>() {
- @Override
- public MethodHandle apply(Class> c) {
- return JLA.stringConcatHelper("mix", methodType(long.class, long.class, Wrapper.asPrimitiveType(c)));
- }
- };
+ // This one is deliberately non-lambdified to optimize startup time:
+ private static final Function, MethodHandle> MIX = new Function<>() {
+ @Override
+ public MethodHandle apply(Class> c) {
+ return JLA.stringConcatHelper("mix", methodType(long.class, long.class, Wrapper.asPrimitiveType(c)));
+ }
+ };
- private @Stable static MethodHandle SIMPLE_CONCAT;
- private static MethodHandle simpleConcat() {
- if (SIMPLE_CONCAT == null) {
- SIMPLE_CONCAT = JLA.stringConcatHelper("simpleConcat", methodType(String.class, Object.class, Object.class));
- }
- return SIMPLE_CONCAT;
+ private @Stable static MethodHandle SIMPLE_CONCAT;
+ private static MethodHandle simpleConcat() {
+ if (SIMPLE_CONCAT == null) {
+ SIMPLE_CONCAT = JLA.stringConcatHelper("simpleConcat", methodType(String.class, Object.class, Object.class));
}
+ return SIMPLE_CONCAT;
+ }
- private @Stable static MethodHandle NEW_STRING;
- private static MethodHandle newString() {
- MethodHandle mh = NEW_STRING;
- if (mh == null) {
- NEW_STRING = mh =
+ private @Stable static MethodHandle NEW_STRING;
+ private static MethodHandle newString() {
+ MethodHandle mh = NEW_STRING;
+ if (mh == null) {
+ NEW_STRING = mh =
JLA.stringConcatHelper("newString", methodType(String.class, byte[].class, long.class));
- }
- return mh;
}
- private @Stable static MethodHandle NEW_ARRAY;
- private static MethodHandle newArray() {
- MethodHandle mh = NEW_ARRAY;
- if (mh == null) {
- NEW_ARRAY = mh =
+ return mh;
+ }
+ private @Stable static MethodHandle NEW_ARRAY;
+ private static MethodHandle newArray() {
+ MethodHandle mh = NEW_ARRAY;
+ if (mh == null) {
+ NEW_ARRAY = mh =
JLA.stringConcatHelper("newArray", methodType(byte[].class, long.class));
- }
- return mh;
- }
-
- private static final ConcurrentMap, MethodHandle> PREPENDERS;
- private static final ConcurrentMap, MethodHandle> MIXERS;
- private static final long INITIAL_CODER;
-
- static {
- INITIAL_CODER = JLA.stringConcatInitialCoder();
- PREPENDERS = new ConcurrentHashMap<>();
- MIXERS = new ConcurrentHashMap<>();
}
+ return mh;
}
/**
- * Public gateways to public "stringify" methods. These methods have the form String apply(T obj), and normally
- * delegate to {@code String.valueOf}, depending on argument's type.
+ * Public gateways to public "stringify" methods. These methods have the
+ * form String apply(T obj), and normally delegate to {@code String.valueOf},
+ * depending on argument's type.
*/
- private static final class Stringifiers {
- private Stringifiers() {
- // no instantiation
- }
-
- private static final MethodHandle OBJECT_INSTANCE =
- JLA.stringConcatHelper("stringOf", methodType(String.class, Object.class));
-
- private static class FloatStringifiers {
- private static final MethodHandle FLOAT_INSTANCE =
+ private @Stable static MethodHandle OBJECT_STRINGIFIER;
+ private static MethodHandle objectStringifier() {
+ MethodHandle mh = OBJECT_STRINGIFIER;
+ if (mh == null) {
+ OBJECT_STRINGIFIER = mh =
+ JLA.stringConcatHelper("stringOf", methodType(String.class, Object.class));
+ }
+ return mh;
+ }
+ private @Stable static MethodHandle FLOAT_STRINGIFIER;
+ private static MethodHandle floatStringifier() {
+ MethodHandle mh = FLOAT_STRINGIFIER;
+ if (mh == null) {
+ FLOAT_STRINGIFIER = mh =
lookupStatic(MethodHandles.publicLookup(), String.class, "valueOf", String.class, float.class);
-
- private static final MethodHandle DOUBLE_INSTANCE =
- lookupStatic(MethodHandles.publicLookup(), String.class, "valueOf", String.class, double.class);
- }
-
- private static class StringifierAny extends ClassValue {
-
- private static final ClassValue INSTANCE = new StringifierAny();
-
- @Override
- protected MethodHandle computeValue(Class> cl) {
- if (cl == byte.class || cl == short.class || cl == int.class) {
- return lookupStatic(MethodHandles.publicLookup(), String.class, "valueOf", String.class, int.class);
- } else if (cl == boolean.class) {
- return lookupStatic(MethodHandles.publicLookup(), String.class, "valueOf", String.class, boolean.class);
- } else if (cl == char.class) {
- return lookupStatic(MethodHandles.publicLookup(), String.class, "valueOf", String.class, char.class);
- } else if (cl == long.class) {
- return lookupStatic(MethodHandles.publicLookup(), String.class, "valueOf", String.class, long.class);
- } else {
- MethodHandle mh = forMost(cl);
- if (mh != null) {
- return mh;
- } else {
- throw new IllegalStateException("Unknown class: " + cl);
- }
- }
- }
- }
-
- /**
- * Returns a stringifier for references and floats/doubles only.
- * Always returns null for other primitives.
- *
- * @param t class to stringify
- * @return stringifier; null, if not available
- */
- static MethodHandle forMost(Class> t) {
- if (!t.isPrimitive()) {
- return OBJECT_INSTANCE;
- } else if (t == float.class) {
- return FloatStringifiers.FLOAT_INSTANCE;
- } else if (t == double.class) {
- return FloatStringifiers.DOUBLE_INSTANCE;
- }
- return null;
}
-
- /**
- * Returns a stringifier for any type. Never returns null.
- *
- * @param t class to stringify
- * @return stringifier
- */
- static MethodHandle forAny(Class> t) {
- return StringifierAny.INSTANCE.get(t);
+ return mh;
+ }
+ private @Stable static MethodHandle DOUBLE_STRINGIFIER;
+ private static MethodHandle doubleStringifier() {
+ MethodHandle mh = DOUBLE_STRINGIFIER;
+ if (mh == null) {
+ DOUBLE_STRINGIFIER = mh =
+ lookupStatic(MethodHandles.publicLookup(), String.class, "valueOf", String.class, double.class);
}
+ return mh;
}
- /* ------------------------------- Common utilities ------------------------------------ */
+ private static final ConcurrentMap, MethodHandle> PREPENDERS;
+ private static final ConcurrentMap, MethodHandle> NULL_PREPENDERS;
+ private static final ConcurrentMap, MethodHandle> MIXERS;
+ private static final long INITIAL_CODER;
- static MethodHandle lookupStatic(Lookup lookup, Class> refc, String name, Class> rtype, Class>... ptypes) {
- try {
- return lookup.findStatic(refc, name, MethodType.methodType(rtype, ptypes));
- } catch (NoSuchMethodException | IllegalAccessException e) {
- throw new AssertionError(e);
- }
+ static {
+ INITIAL_CODER = JLA.stringConcatInitialCoder();
+ PREPENDERS = new ConcurrentHashMap<>();
+ NULL_PREPENDERS = new ConcurrentHashMap<>();
+ MIXERS = new ConcurrentHashMap<>();
}
- static MethodHandle lookupVirtual(Lookup lookup, Class> refc, String name, Class> rtype, Class>... ptypes) {
- try {
- return lookup.findVirtual(refc, name, MethodType.methodType(rtype, ptypes));
- } catch (NoSuchMethodException | IllegalAccessException e) {
- throw new AssertionError(e);
- }
+ /**
+ * Returns a stringifier for references and floats/doubles only.
+ * Always returns null for other primitives.
+ *
+ * @param t class to stringify
+ * @return stringifier; null, if not available
+ */
+ private static MethodHandle stringifierFor(Class> t) {
+ if (!t.isPrimitive()) {
+ return objectStringifier();
+ } else if (t == float.class) {
+ return floatStringifier();
+ } else if (t == double.class) {
+ return doubleStringifier();
+ }
+ return null;
}
- static MethodHandle lookupConstructor(Lookup lookup, Class> refc, Class> ptypes) {
+ private static MethodHandle lookupStatic(Lookup lookup, Class> refc, String name,
+ Class> rtype, Class>... ptypes) {
try {
- return lookup.findConstructor(refc, MethodType.methodType(void.class, ptypes));
+ return lookup.findStatic(refc, name, MethodType.methodType(rtype, ptypes));
} catch (NoSuchMethodException | IllegalAccessException e) {
throw new AssertionError(e);
}
}
- static int estimateSize(Class> cl) {
- if (cl == Integer.TYPE) {
- return 11; // "-2147483648"
- } else if (cl == Boolean.TYPE) {
- return 5; // "false"
- } else if (cl == Byte.TYPE) {
- return 4; // "-128"
- } else if (cl == Character.TYPE) {
- return 1; // duh
- } else if (cl == Short.TYPE) {
- return 6; // "-32768"
- } else if (cl == Double.TYPE) {
- return 26; // apparently, no larger than this, see FloatingDecimal.BinaryToASCIIBuffer.buffer
- } else if (cl == Float.TYPE) {
- return 26; // apparently, no larger than this, see FloatingDecimal.BinaryToASCIIBuffer.buffer
- } else if (cl == Long.TYPE) {
- return 20; // "-9223372036854775808"
- } else {
- throw new IllegalArgumentException("Cannot estimate the size for " + cl);
- }
- }
-
- static Class> adaptToStringBuilder(Class> c) {
- if (c.isPrimitive()) {
- if (c == Byte.TYPE || c == Short.TYPE) {
- return int.class;
- }
- } else {
- if (c != String.class) {
- return Object.class;
- }
- }
- return c;
- }
-
private StringConcatFactory() {
// no instantiation
}
-
}
diff --git a/src/java.base/share/classes/java/lang/reflect/Executable.java b/src/java.base/share/classes/java/lang/reflect/Executable.java
index 67af84c166d..def7500f2ad 100644
--- a/src/java.base/share/classes/java/lang/reflect/Executable.java
+++ b/src/java.base/share/classes/java/lang/reflect/Executable.java
@@ -307,12 +307,12 @@ Type[] getAllGenericParameterTypes() {
final boolean realParamData = hasRealParameterData();
final Type[] genericParamTypes = getGenericParameterTypes();
final Type[] nonGenericParamTypes = getParameterTypes();
- final Type[] out = new Type[nonGenericParamTypes.length];
- final Parameter[] params = getParameters();
- int fromidx = 0;
// If we have real parameter data, then we use the
// synthetic and mandate flags to our advantage.
if (realParamData) {
+ final Type[] out = new Type[nonGenericParamTypes.length];
+ final Parameter[] params = getParameters();
+ int fromidx = 0;
for (int i = 0; i < out.length; i++) {
final Parameter param = params[i];
if (param.isSynthetic() || param.isImplicit()) {
@@ -325,6 +325,7 @@ Type[] getAllGenericParameterTypes() {
fromidx++;
}
}
+ return out;
} else {
// Otherwise, use the non-generic parameter data.
// Without method parameter reflection data, we have
@@ -334,7 +335,6 @@ Type[] getAllGenericParameterTypes() {
return genericParamTypes.length == nonGenericParamTypes.length ?
genericParamTypes : nonGenericParamTypes;
}
- return out;
}
}
diff --git a/src/java.base/share/classes/java/net/URLConnection.java b/src/java.base/share/classes/java/net/URLConnection.java
index 810885525ed..4f2175522c8 100644
--- a/src/java.base/share/classes/java/net/URLConnection.java
+++ b/src/java.base/share/classes/java/net/URLConnection.java
@@ -667,12 +667,15 @@ public long getHeaderFieldDate(String name, long Default) {
/**
* Returns the key for the {@code n}th header field.
- * It returns {@code null} if there are fewer than {@code n+1} fields.
+ * Some implementations may treat the {@code 0}th
+ * header field as special, in which case, {@link #getHeaderField(int) getHeaderField(0)}
+ * may return some value, but {@code getHeaderFieldKey(0)} returns {@code null}.
+ * For {@code n > 0 } it returns {@code null} if there are fewer than {@code n+1} fields.
*
* @param n an index, where {@code n>=0}
* @return the key for the {@code n}th header field,
* or {@code null} if there are fewer than {@code n+1}
- * fields.
+ * fields when {@code n > 0}.
*/
public String getHeaderFieldKey(int n) {
return null;
diff --git a/src/java.base/share/classes/java/util/HashMap.java b/src/java.base/share/classes/java/util/HashMap.java
index 0a18b651eb9..77743d680cc 100644
--- a/src/java.base/share/classes/java/util/HashMap.java
+++ b/src/java.base/share/classes/java/util/HashMap.java
@@ -555,20 +555,19 @@ public boolean isEmpty() {
*/
public V get(Object key) {
Node e;
- return (e = getNode(hash(key), key)) == null ? null : e.value;
+ return (e = getNode(key)) == null ? null : e.value;
}
/**
* Implements Map.get and related methods.
*
- * @param hash hash for key
* @param key the key
* @return the node, or null if none
*/
- final Node getNode(int hash, Object key) {
- Node[] tab; Node first, e; int n; K k;
+ final Node getNode(Object key) {
+ Node[] tab; Node first, e; int n, hash; K k;
if ((tab = table) != null && (n = tab.length) > 0 &&
- (first = tab[(n - 1) & hash]) != null) {
+ (first = tab[(n - 1) & (hash = hash(key))]) != null) {
if (first.hash == hash && // always check first node
((k = first.key) == key || (key != null && key.equals(k))))
return first;
@@ -594,7 +593,7 @@ final Node getNode(int hash, Object key) {
* key.
*/
public boolean containsKey(Object key) {
- return getNode(hash(key), key) != null;
+ return getNode(key) != null;
}
/**
@@ -1105,7 +1104,7 @@ public final boolean contains(Object o) {
return false;
Map.Entry,?> e = (Map.Entry,?>) o;
Object key = e.getKey();
- Node candidate = getNode(hash(key), key);
+ Node candidate = getNode(key);
return candidate != null && candidate.equals(e);
}
public final boolean remove(Object o) {
@@ -1141,7 +1140,7 @@ public final void forEach(Consumer super Map.Entry> action) {
@Override
public V getOrDefault(Object key, V defaultValue) {
Node e;
- return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
+ return (e = getNode(key)) == null ? defaultValue : e.value;
}
@Override
@@ -1157,7 +1156,7 @@ public boolean remove(Object key, Object value) {
@Override
public boolean replace(K key, V oldValue, V newValue) {
Node e; V v;
- if ((e = getNode(hash(key), key)) != null &&
+ if ((e = getNode(key)) != null &&
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
e.value = newValue;
afterNodeAccess(e);
@@ -1169,7 +1168,7 @@ public boolean replace(K key, V oldValue, V newValue) {
@Override
public V replace(K key, V value) {
Node e;
- if ((e = getNode(hash(key), key)) != null) {
+ if ((e = getNode(key)) != null) {
V oldValue = e.value;
e.value = value;
afterNodeAccess(e);
@@ -1260,8 +1259,7 @@ public V computeIfPresent(K key,
if (remappingFunction == null)
throw new NullPointerException();
Node e; V oldValue;
- int hash = hash(key);
- if ((e = getNode(hash, key)) != null &&
+ if ((e = getNode(key)) != null &&
(oldValue = e.value) != null) {
int mc = modCount;
V v = remappingFunction.apply(key, oldValue);
@@ -1271,8 +1269,10 @@ public V computeIfPresent(K key,
afterNodeAccess(e);
return v;
}
- else
+ else {
+ int hash = hash(key);
removeNode(hash, key, null, false, true);
+ }
}
return null;
}
diff --git a/src/java.base/share/classes/java/util/LinkedHashMap.java b/src/java.base/share/classes/java/util/LinkedHashMap.java
index 550d99412a3..9c40ea8e05c 100644
--- a/src/java.base/share/classes/java/util/LinkedHashMap.java
+++ b/src/java.base/share/classes/java/util/LinkedHashMap.java
@@ -438,7 +438,7 @@ public boolean containsValue(Object value) {
*/
public V get(Object key) {
Node e;
- if ((e = getNode(hash(key), key)) == null)
+ if ((e = getNode(key)) == null)
return null;
if (accessOrder)
afterNodeAccess(e);
@@ -450,7 +450,7 @@ public V get(Object key) {
*/
public V getOrDefault(Object key, V defaultValue) {
Node e;
- if ((e = getNode(hash(key), key)) == null)
+ if ((e = getNode(key)) == null)
return defaultValue;
if (accessOrder)
afterNodeAccess(e);
@@ -685,7 +685,7 @@ public final boolean contains(Object o) {
return false;
Map.Entry,?> e = (Map.Entry,?>) o;
Object key = e.getKey();
- Node candidate = getNode(hash(key), key);
+ Node candidate = getNode(key);
return candidate != null && candidate.equals(e);
}
public final boolean remove(Object o) {
diff --git a/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java b/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java
index 729bd7bc21c..c89275a335c 100644
--- a/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java
+++ b/src/java.base/share/classes/jdk/internal/access/JavaLangAccess.java
@@ -338,6 +338,11 @@ public interface JavaLangAccess {
*/
long stringConcatInitialCoder();
+ /**
+ * Update lengthCoder for constant
+ */
+ long stringConcatMix(long lengthCoder, String constant);
+
/*
* Get the class data associated with the given class.
* @param c the class
diff --git a/src/java.base/share/classes/sun/net/www/URLConnection.java b/src/java.base/share/classes/sun/net/www/URLConnection.java
index f8901217bb1..fdfe2bfe44a 100644
--- a/src/java.base/share/classes/sun/net/www/URLConnection.java
+++ b/src/java.base/share/classes/sun/net/www/URLConnection.java
@@ -25,6 +25,7 @@
package sun.net.www;
+import java.io.IOException;
import java.net.URL;
import java.util.*;
@@ -109,6 +110,26 @@ public String getHeaderField(String name) {
return properties == null ? null : properties.findValue(name);
}
+
+ Map> headerFields;
+
+ @Override
+ public Map> getHeaderFields() {
+ if (headerFields == null) {
+ try {
+ getInputStream();
+ if (properties == null) {
+ headerFields = super.getHeaderFields();
+ } else {
+ headerFields = properties.getHeaders();
+ }
+ } catch (IOException e) {
+ return super.getHeaderFields();
+ }
+ }
+ return headerFields;
+ }
+
/**
* Return the key for the nth header field. Returns null if
* there are fewer than n fields. This can be used to iterate
diff --git a/src/java.base/share/classes/sun/net/www/protocol/file/FileURLConnection.java b/src/java.base/share/classes/sun/net/www/protocol/file/FileURLConnection.java
index 0996aa6fd20..c6878a63699 100644
--- a/src/java.base/share/classes/sun/net/www/protocol/file/FileURLConnection.java
+++ b/src/java.base/share/classes/sun/net/www/protocol/file/FileURLConnection.java
@@ -138,6 +138,11 @@ private void initializeHeaders() {
}
}
+ public Map> getHeaderFields() {
+ initializeHeaders();
+ return super.getHeaderFields();
+ }
+
public String getHeaderField(String name) {
initializeHeaders();
return super.getHeaderField(name);
diff --git a/src/java.base/share/native/libjli/java.c b/src/java.base/share/native/libjli/java.c
index 637c2092ecd..3485bf4e670 100644
--- a/src/java.base/share/native/libjli/java.c
+++ b/src/java.base/share/native/libjli/java.c
@@ -292,7 +292,7 @@ JLI_Launch(int argc, char ** argv, /* main argc, argv */
ifn.GetDefaultJavaVMInitArgs = 0;
if (JLI_IsTraceLauncher()) {
- start = CounterGet();
+ start = CurrentTimeMicros();
}
if (!LoadJavaVM(jvmpath, &ifn)) {
@@ -300,11 +300,10 @@ JLI_Launch(int argc, char ** argv, /* main argc, argv */
}
if (JLI_IsTraceLauncher()) {
- end = CounterGet();
+ end = CurrentTimeMicros();
}
- JLI_TraceLauncher("%ld micro seconds to LoadJavaVM\n",
- (long)(jint)Counter2Micros(end-start));
+ JLI_TraceLauncher("%ld micro seconds to LoadJavaVM\n", (long)(end-start));
++argv;
--argc;
@@ -413,7 +412,7 @@ JavaMain(void* _args)
RegisterThread();
/* Initialize the virtual machine */
- start = CounterGet();
+ start = CurrentTimeMicros();
if (!InitializeJVM(&vm, &env, &ifn)) {
JLI_ReportErrorMessage(JVM_ERROR1);
exit(1);
@@ -467,9 +466,8 @@ JavaMain(void* _args)
FreeKnownVMs(); /* after last possible PrintUsage */
if (JLI_IsTraceLauncher()) {
- end = CounterGet();
- JLI_TraceLauncher("%ld micro seconds to InitializeJVM\n",
- (long)(jint)Counter2Micros(end-start));
+ end = CurrentTimeMicros();
+ JLI_TraceLauncher("%ld micro seconds to InitializeJVM\n", (long)(end-start));
}
/* At this stage, argc/argv have the application's arguments */
@@ -1622,7 +1620,7 @@ LoadMainClass(JNIEnv *env, int mode, char *name)
jclass cls = GetLauncherHelperClass(env);
NULL_CHECK0(cls);
if (JLI_IsTraceLauncher()) {
- start = CounterGet();
+ start = CurrentTimeMicros();
}
NULL_CHECK0(mid = (*env)->GetStaticMethodID(env, cls,
"checkAndLoadMain",
@@ -1633,9 +1631,8 @@ LoadMainClass(JNIEnv *env, int mode, char *name)
USE_STDERR, mode, str));
if (JLI_IsTraceLauncher()) {
- end = CounterGet();
- printf("%ld micro seconds to load main class\n",
- (long)(jint)Counter2Micros(end-start));
+ end = CurrentTimeMicros();
+ printf("%ld micro seconds to load main class\n", (long)(end-start));
printf("----%s----\n", JLDEBUG_ENV_ENTRY);
}
@@ -2087,7 +2084,7 @@ ReadKnownVMs(const char *jvmCfgName, jboolean speculative)
char *serverClassVMName = NULL;
static char *whiteSpace = " \t";
if (JLI_IsTraceLauncher()) {
- start = CounterGet();
+ start = CurrentTimeMicros();
}
jvmCfg = fopen(jvmCfgName, "r");
@@ -2172,9 +2169,8 @@ ReadKnownVMs(const char *jvmCfgName, jboolean speculative)
knownVMsCount = cnt;
if (JLI_IsTraceLauncher()) {
- end = CounterGet();
- printf("%ld micro seconds to parse jvm.cfg\n",
- (long)(jint)Counter2Micros(end-start));
+ end = CurrentTimeMicros();
+ printf("%ld micro seconds to parse jvm.cfg\n", (long)(end-start));
}
return cnt;
diff --git a/src/java.base/share/native/libjli/java.h b/src/java.base/share/native/libjli/java.h
index d5506b35601..f768b58a001 100644
--- a/src/java.base/share/native/libjli/java.h
+++ b/src/java.base/share/native/libjli/java.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -170,6 +170,7 @@ jint ReadKnownVMs(const char *jvmcfg, jboolean speculative);
char *CheckJvmType(int *argc, char ***argv, jboolean speculative);
void AddOption(char *str, void *info);
jboolean IsWhiteSpaceOption(const char* name);
+jlong CurrentTimeMicros();
// Utility function defined in args.c
int isTerminalOpt(char *arg);
diff --git a/src/java.base/unix/classes/sun/nio/fs/UnixChannelFactory.java b/src/java.base/unix/classes/sun/nio/fs/UnixChannelFactory.java
index 103d157a4d1..043b4a3da64 100644
--- a/src/java.base/unix/classes/sun/nio/fs/UnixChannelFactory.java
+++ b/src/java.base/unix/classes/sun/nio/fs/UnixChannelFactory.java
@@ -103,16 +103,6 @@ static Flags toFlags(Set extends OpenOption> options) {
}
}
-
- /**
- * Constructs a file channel from an existing (open) file descriptor
- */
- static FileChannel newFileChannel(int fd, String path, boolean reading, boolean writing) {
- FileDescriptor fdObj = new FileDescriptor();
- fdAccess.set(fdObj, fd);
- return FileChannelImpl.open(fdObj, path, reading, writing, false, null);
- }
-
/**
* Constructs a file channel by opening a file using a dfd/path pair
*/
diff --git a/src/java.base/unix/classes/sun/nio/fs/UnixException.java b/src/java.base/unix/classes/sun/nio/fs/UnixException.java
index 540578d8251..9c90911f9cf 100644
--- a/src/java.base/unix/classes/sun/nio/fs/UnixException.java
+++ b/src/java.base/unix/classes/sun/nio/fs/UnixException.java
@@ -100,11 +100,6 @@ private IOException translateToIOException(String file, String other) {
return new FileSystemException(file, other, errorString());
}
- void rethrowAsIOException(String file) throws IOException {
- IOException x = translateToIOException(file, null);
- throw x;
- }
-
void rethrowAsIOException(UnixPath file, UnixPath other) throws IOException {
String a = (file == null) ? null : file.getPathForExceptionMessage();
String b = (other == null) ? null : other.getPathForExceptionMessage();
diff --git a/src/java.base/unix/classes/sun/nio/fs/UnixFileSystemProvider.java b/src/java.base/unix/classes/sun/nio/fs/UnixFileSystemProvider.java
index e92711f0168..87284a9feb7 100644
--- a/src/java.base/unix/classes/sun/nio/fs/UnixFileSystemProvider.java
+++ b/src/java.base/unix/classes/sun/nio/fs/UnixFileSystemProvider.java
@@ -34,7 +34,6 @@
import java.io.IOException;
import java.io.FilePermission;
import java.util.*;
-import java.security.AccessController;
import sun.nio.ch.ThreadPool;
import sun.security.util.SecurityConstants;
diff --git a/src/java.base/unix/classes/sun/nio/fs/UnixUserPrincipals.java b/src/java.base/unix/classes/sun/nio/fs/UnixUserPrincipals.java
index 603933992ca..b7a9e7692f0 100644
--- a/src/java.base/unix/classes/sun/nio/fs/UnixUserPrincipals.java
+++ b/src/java.base/unix/classes/sun/nio/fs/UnixUserPrincipals.java
@@ -67,10 +67,6 @@ int gid() {
throw new AssertionError();
}
- boolean isSpecial() {
- return id == -1;
- }
-
@Override
public String getName() {
return name;
@@ -113,7 +109,7 @@ static class Group extends User implements GroupPrincipal {
// return UserPrincipal representing given uid
static User fromUid(int uid) {
- String name = null;
+ String name;
try {
name = Util.toString(getpwuid(uid));
} catch (UnixException x) {
@@ -124,7 +120,7 @@ static User fromUid(int uid) {
// return GroupPrincipal representing given gid
static Group fromGid(int gid) {
- String name = null;
+ String name;
try {
name = Util.toString(getgrgid(gid));
} catch (UnixException x) {
@@ -141,7 +137,7 @@ private static int lookupName(String name, boolean isGroup)
if (sm != null) {
sm.checkPermission(new RuntimePermission("lookupUserInformation"));
}
- int id = -1;
+ int id;
try {
id = (isGroup) ? getgrnam(name) : getpwnam(name);
} catch (UnixException x) {
diff --git a/src/java.base/unix/native/libjli/java_md_solinux.c b/src/java.base/unix/native/libjli/java_md.c
similarity index 97%
rename from src/java.base/unix/native/libjli/java_md_solinux.c
rename to src/java.base/unix/native/libjli/java_md.c
index 66148343173..822026fb23c 100644
--- a/src/java.base/unix/native/libjli/java_md_solinux.c
+++ b/src/java.base/unix/native/libjli/java_md.c
@@ -709,20 +709,3 @@ ProcessPlatformOption(const char *arg)
{
return JNI_FALSE;
}
-
-/*
- * Provide a CounterGet() implementation based on gettimeofday() which
- * is universally available, even though it may not be 'high resolution'
- * compared to platforms that provide gethrtime() (like Solaris). It is
- * also subject to time-of-day changes, but alternatives may not be
- * known to be available at either build time or run time.
- */
-uint64_t CounterGet() {
- uint64_t result = 0;
- struct timeval tv;
- if (gettimeofday(&tv, NULL) != -1) {
- result = 1000000LL * (uint64_t)tv.tv_sec;
- result += (uint64_t)tv.tv_usec;
- }
- return result;
-}
diff --git a/src/java.base/unix/native/libjli/java_md.h b/src/java.base/unix/native/libjli/java_md.h
index be3cc0d0e36..e3600f67347 100644
--- a/src/java.base/unix/native/libjli/java_md.h
+++ b/src/java.base/unix/native/libjli/java_md.h
@@ -32,6 +32,8 @@
#include
#include
#include
+#include
+#include
#include "manifest_info.h"
#include "jli_util.h"
@@ -61,9 +63,11 @@ static jboolean GetJREPath(char *path, jint pathsize, jboolean speculative);
#include "java_md_aix.h"
#endif
-#ifdef MACOSX
-#include "java_md_macosx.h"
-#else /* !MACOSX */
-#include "java_md_solinux.h"
-#endif /* MACOSX */
+#if defined(MACOSX)
+#include
+#define environ (*_NSGetEnviron())
+#else
+extern char **environ;
+#endif
+
#endif /* JAVA_MD_H */
diff --git a/src/java.base/unix/native/libjli/java_md_common.c b/src/java.base/unix/native/libjli/java_md_common.c
index d1d654efcf3..9651036a730 100644
--- a/src/java.base/unix/native/libjli/java_md_common.c
+++ b/src/java.base/unix/native/libjli/java_md_common.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,6 +22,7 @@
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
+#include
#include "java.h"
/*
@@ -364,3 +365,20 @@ CreateApplicationArgs(JNIEnv *env, char **strv, int argc)
{
return NewPlatformStringArray(env, strv, argc);
}
+
+/*
+ * Provide a CurrentTimeMicros() implementation based on gettimeofday() which
+ * is universally available, even though it may not be 'high resolution'
+ * compared to platforms that provide gethrtime() (like Solaris). It is
+ * also subject to time-of-day changes, but alternatives may not be
+ * known to be available at either build time or run time.
+ */
+jlong CurrentTimeMicros() {
+ jlong result = 0;
+ struct timeval tv;
+ if (gettimeofday(&tv, NULL) != -1) {
+ result = 1000000LL * (jlong)tv.tv_sec;
+ result += (jlong)tv.tv_usec;
+ }
+ return result;
+}
diff --git a/src/java.base/windows/native/libjli/java_md.c b/src/java.base/windows/native/libjli/java_md.c
index 053787d1d11..241bd89ad9f 100644
--- a/src/java.base/windows/native/libjli/java_md.c
+++ b/src/java.base/windows/native/libjli/java_md.c
@@ -459,7 +459,7 @@ static jboolean counterAvailable = JNI_FALSE;
static jboolean counterInitialized = JNI_FALSE;
static LARGE_INTEGER counterFrequency;
-jlong CounterGet()
+jlong CurrentTimeMicros()
{
LARGE_INTEGER count;
@@ -471,16 +471,10 @@ jlong CounterGet()
return 0;
}
QueryPerformanceCounter(&count);
- return (jlong)(count.QuadPart);
-}
-jlong Counter2Micros(jlong counts)
-{
- if (!counterAvailable || !counterInitialized) {
- return 0;
- }
- return (counts * 1000 * 1000)/counterFrequency.QuadPart;
+ return (jlong)(count.QuadPart * 1000 * 1000 / counterFrequency.QuadPart);
}
+
/*
* windows snprintf does not guarantee a null terminator in the buffer,
* if the computed size is equal to or greater than the buffer size,
diff --git a/src/java.base/windows/native/libjli/java_md.h b/src/java.base/windows/native/libjli/java_md.h
index a2392eba8fc..9b551bfe160 100644
--- a/src/java.base/windows/native/libjli/java_md.h
+++ b/src/java.base/windows/native/libjli/java_md.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -41,13 +41,6 @@
#define JLONG_FORMAT_SPECIFIER "%I64d"
-/*
- * Support for doing cheap, accurate interval timing.
- */
-extern jlong CounterGet(void);
-extern jlong Counter2Micros(jlong counts);
-
-
/*
* Function prototypes.
*/
diff --git a/src/java.instrument/share/classes/java/lang/instrument/Instrumentation.java b/src/java.instrument/share/classes/java/lang/instrument/Instrumentation.java
index 8bd4163726c..06a5bc6bc7a 100644
--- a/src/java.instrument/share/classes/java/lang/instrument/Instrumentation.java
+++ b/src/java.instrument/share/classes/java/lang/instrument/Instrumentation.java
@@ -222,13 +222,8 @@ public interface Instrumentation {
* Instances of the retransformed class are not affected.
*
*
- * The retransformation may change method bodies, the constant pool and
- * attributes (unless explicitly prohibited).
- * The retransformation must not add, remove or rename fields or methods, change the
- * signatures of methods, or change inheritance.
- * The retransformation must not change the NestHost
,
- * NestMembers
, or Record
attributes.
- * These restrictions may be lifted in future versions.
+ * The supported class file changes are described in
+ * JVM TI RetransformClasses.
* The class file bytes are not checked, verified and installed
* until after the transformations have been applied, if the resultant bytes are in
* error this method will throw an exception.
@@ -313,13 +308,8 @@ public interface Instrumentation {
* Instances of the redefined class are not affected.
*
*
- * The redefinition may change method bodies, the constant pool and attributes
- * (unless explicitly prohibited).
- * The redefinition must not add, remove or rename fields or methods, change the
- * signatures of methods, or change inheritance.
- * The redefinition must not change the NestHost
,
- * NestMembers
, or Record
attributes.
- * These restrictions may be lifted in future versions.
+ * The supported class file changes are described in
+ * JVM TI RedefineClasses.
* The class file bytes are not checked, verified and installed
* until after the transformations have been applied, if the resultant bytes are in
* error this method will throw an exception.
diff --git a/src/java.logging/share/classes/java/util/logging/Logger.java b/src/java.logging/share/classes/java/util/logging/Logger.java
index d75fd6f27b2..a271dcc902d 100644
--- a/src/java.logging/share/classes/java/util/logging/Logger.java
+++ b/src/java.logging/share/classes/java/util/logging/Logger.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -447,7 +447,7 @@ ConfigurationData merge(Logger systemPeer) {
private boolean anonymous;
// Cache to speed up behavior of findResourceBundle:
- private ResourceBundle catalog; // Cached resource bundle
+ private WeakReference catalogRef; // Cached resource bundle
private String catalogName; // name associated with catalog
private Locale catalogLocale; // locale associated with catalog
@@ -2122,6 +2122,11 @@ public boolean getUseParentHandlers() {
return config.useParentHandlers;
}
+ private ResourceBundle catalog() {
+ WeakReference ref = catalogRef;
+ return ref == null ? null : ref.get();
+ }
+
/**
* Private utility method to map a resource bundle name to an
* actual resource bundle, using a simple one-entry cache.
@@ -2161,13 +2166,14 @@ private synchronized ResourceBundle findResourceBundle(String name,
Locale currentLocale = Locale.getDefault();
final LoggerBundle lb = loggerBundle;
+ ResourceBundle catalog = catalog();
// Normally we should hit on our simple one entry cache.
if (lb.userBundle != null &&
name.equals(lb.resourceBundleName)) {
return lb.userBundle;
} else if (catalog != null && currentLocale.equals(catalogLocale)
- && name.equals(catalogName)) {
+ && name.equals(catalogName)) {
return catalog;
}
@@ -2187,6 +2193,7 @@ private synchronized ResourceBundle findResourceBundle(String name,
try {
Module mod = cl.getUnnamedModule();
catalog = RbAccess.RB_ACCESS.getBundle(name, currentLocale, mod);
+ catalogRef = new WeakReference<>(catalog);
catalogName = name;
catalogLocale = currentLocale;
return catalog;
@@ -2214,6 +2221,7 @@ private synchronized ResourceBundle findResourceBundle(String name,
// with the module's loader this time.
catalog = ResourceBundle.getBundle(name, currentLocale,
moduleCL);
+ catalogRef = new WeakReference<>(catalog);
catalogName = name;
catalogLocale = currentLocale;
return catalog;
@@ -2231,6 +2239,7 @@ private synchronized ResourceBundle findResourceBundle(String name,
try {
// Use the caller's module
catalog = RbAccess.RB_ACCESS.getBundle(name, currentLocale, callerModule);
+ catalogRef = new WeakReference<>(catalog);
catalogName = name;
catalogLocale = currentLocale;
return catalog;
diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Modules.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Modules.java
index 164851e96dd..30ed22e9020 100644
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Modules.java
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Modules.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -215,8 +215,6 @@ protected Modules(Context context) {
limitModsOpt = options.get(Option.LIMIT_MODULES);
moduleVersionOpt = options.get(Option.MODULE_VERSION);
}
- //where
- private static final String XMODULES_PREFIX = "-Xmodule:";
int depth = -1;
diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Profile.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Profile.java
index 0fbdfb6b53a..32f314f9852 100644
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Profile.java
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Profile.java
@@ -40,12 +40,9 @@
* deletion without notice.
*/
public enum Profile {
- COMPACT1("compact1", 1, Target.JDK1_8, Target.JDK1_9, Target.JDK1_10,
- Target.JDK1_11, Target.JDK1_12, Target.JDK1_13, Target.JDK1_14, Target.JDK1_15),
- COMPACT2("compact2", 2, Target.JDK1_8, Target.JDK1_9, Target.JDK1_10,
- Target.JDK1_11, Target.JDK1_12, Target.JDK1_13, Target.JDK1_14, Target.JDK1_15),
- COMPACT3("compact3", 3, Target.JDK1_8, Target.JDK1_9, Target.JDK1_10,
- Target.JDK1_11, Target.JDK1_12, Target.JDK1_13, Target.JDK1_14, Target.JDK1_15),
+ COMPACT1("compact1", 1, Target.JDK1_8),
+ COMPACT2("compact2", 2, Target.JDK1_8),
+ COMPACT3("compact3", 3, Target.JDK1_8),
DEFAULT {
@Override
diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Arguments.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Arguments.java
index dbd82e34ab4..00e2d435ddc 100644
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Arguments.java
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/main/Arguments.java
@@ -547,7 +547,8 @@ public boolean validate() {
String profileString = options.get(Option.PROFILE);
if (profileString != null) {
Profile profile = Profile.lookup(profileString);
- if (!profile.isValid(target)) {
+ if (target.compareTo(Target.JDK1_8) <= 0 && !profile.isValid(target)) {
+ // note: -profile not permitted for target >= 9, so error (below) not warning (here)
reportDiag(Warnings.ProfileTargetConflict(profile, target));
}
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/classfile/ClassLoaderData.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/classfile/ClassLoaderData.java
index af3d7d9cb2a..97fbdb34ffc 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/classfile/ClassLoaderData.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/classfile/ClassLoaderData.java
@@ -92,7 +92,11 @@ public ClassLoaderData next() {
public Klass find(String className) {
for (Klass l = getKlasses(); l != null; l = l.getNextLinkKlass()) {
if (l.getName().equals(className)) {
- return l;
+ if (l instanceof InstanceKlass && !((InstanceKlass)l).isLoaded()) {
+ return null; // don't return partially loaded classes
+ } else {
+ return l;
+ }
}
}
return null;
@@ -102,14 +106,17 @@ public Klass find(String className) {
array klasses */
public void classesDo(ClassLoaderDataGraph.ClassVisitor v) {
for (Klass l = getKlasses(); l != null; l = l.getNextLinkKlass()) {
+ // Only visit InstanceKlasses that are at least in the "loaded" init_state. Otherwise
+ // the InstanceKlass won't have some required fields initialized, which can cause problems.
+ if (l instanceof InstanceKlass && !((InstanceKlass)l).isLoaded()) {
+ continue;
+ }
v.visit(l);
}
}
/** Iterate over all klasses in the dictionary, including initiating loader. */
public void allEntriesDo(ClassLoaderDataGraph.ClassAndLoaderVisitor v) {
- for (Klass l = getKlasses(); l != null; l = l.getNextLinkKlass()) {
- dictionary().allEntriesDo(v, getClassLoader());
- }
+ dictionary().allEntriesDo(v, getClassLoader());
}
}
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Dictionary.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Dictionary.java
index 439bea8bf32..e6b8bdb05d9 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Dictionary.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Dictionary.java
@@ -65,6 +65,11 @@ public void allEntriesDo(ClassLoaderDataGraph.ClassAndLoaderVisitor v, Oop loade
for (DictionaryEntry probe = (DictionaryEntry) bucket(index); probe != null;
probe = (DictionaryEntry) probe.next()) {
Klass k = probe.klass();
+ // Only visit InstanceKlasses that are at least in the "loaded" init_state. Otherwise
+ // the InstanceKlass won't have some required fields initialized, which can cause problems.
+ if (k instanceof InstanceKlass && !((InstanceKlass)k).isLoaded()) {
+ continue;
+ }
v.visit(k, loader);
}
}
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
index 84c83dee609..9497e80944f 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java
@@ -145,6 +145,18 @@ private static synchronized void initialize(TypeDataBase db) throws WrongTypeExc
public InstanceKlass(Address addr) {
super(addr);
+
+ // If the class hasn't yet reached the "loaded" init state, then don't go any further
+ // or we'll run into problems trying to look at fields that are not yet setup.
+ // Attempted lookups of this InstanceKlass via ClassLoaderDataGraph, ClassLoaderData,
+ // and Dictionary will all refuse to return it. The main purpose of allowing this
+ // InstanceKlass to initialize is so ClassLoaderData.getKlasses() will succeed, allowing
+ // ClassLoaderData.classesDo() to iterate over all Klasses (skipping those that are
+ // not yet fully loaded).
+ if (!isLoaded()) {
+ return;
+ }
+
if (getJavaFieldsCount() != getAllFieldsCount()) {
// Exercise the injected field logic
for (int i = getJavaFieldsCount(); i < getAllFieldsCount(); i++) {
diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java
index 135c4f45457..eaae166e29d 100644
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java
@@ -76,7 +76,6 @@ public class VM {
private boolean isBigEndian;
/** This is only present if in a debugging system */
private JVMDebugger debugger;
- private long stackBias;
private long logAddressSize;
private Universe universe;
private ObjectHeap heap;
@@ -443,7 +442,6 @@ private VM(TypeDataBase db, JVMDebugger debugger, boolean isBigEndian) {
checkVMVersion(vmRelease);
- stackBias = db.lookupIntConstant("STACK_BIAS").intValue();
invocationEntryBCI = db.lookupIntConstant("InvocationEntryBci").intValue();
// We infer the presence of JVMTI from the presence of the InstanceKlass::_breakpoints field.
@@ -672,11 +670,6 @@ public long getIntSize() {
return db.getJIntType().getSize();
}
- /** NOTE: this offset is in BYTES in this system! */
- public long getStackBias() {
- return stackBias;
- }
-
/** Indicates whether the underlying machine supports the LP64 data
model. This is needed for conditionalizing code in a few places */
public boolean isLP64() {
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/AbstractLayout.java b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/AbstractLayout.java
index b38fcff8b9e..b5b0c97ec29 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/AbstractLayout.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/AbstractLayout.java
@@ -48,12 +48,10 @@
import static java.lang.constant.ConstantDescs.CD_long;
abstract class AbstractLayout implements MemoryLayout {
- // memory layout attribute key for layout name
- static final String NAME = "name";
private final OptionalLong size;
final long alignment;
- protected final Map attributes;
+ final Map attributes;
public AbstractLayout(OptionalLong size, long alignment, Map attributes) {
this.size = size;
@@ -63,12 +61,12 @@ public AbstractLayout(OptionalLong size, long alignment, Map
@Override
public AbstractLayout withName(String name) {
- return withAttribute(NAME, name);
+ return withAttribute(LAYOUT_NAME, name);
}
@Override
public final Optional name() {
- return attribute(NAME).map(String.class::cast);
+ return attribute(LAYOUT_NAME).map(String.class::cast);
}
@Override
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MappedMemorySegment.java b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MappedMemorySegment.java
index 9f3cda6e3cc..d84923d4f2c 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MappedMemorySegment.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MappedMemorySegment.java
@@ -32,8 +32,8 @@
* A mapped memory segment, that is, a memory segment backed by memory-mapped file.
*
* Mapped memory segments are created via the {@link MemorySegment#mapFromPath(Path, long, FileChannel.MapMode)}.
- * Mapped memory segments behave like ordinary segments, but also provide additional capabilities that are specific to
- * mapped memory segments, such as {@link #force()} and {@link #load()}.
+ * Mapped memory segments behave like ordinary segments, but provide additional capabilities to manipulate memory-mapped
+ * memory regions, such as {@link #force()} and {@link #load()}.
*
* All implementations of this interface must be value-based;
* use of identity-sensitive operations (including reference equality ({@code ==}), identity hash code, or synchronization) on
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryHandles.java b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryHandles.java
index 7c345a8923a..55677cbcead 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryHandles.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryHandles.java
@@ -286,7 +286,7 @@ public static VarHandle withOffset(VarHandle target, long bytesOffset) {
* immediately following the leading access coordinate of type {@link MemoryAddress}.
*
* @param target the target memory access handle to access after the scale adjustment.
- * @param bytesStride the stride, in bytes, by which to multiply the coordinate value. Must be greater than zero.
+ * @param bytesStride the stride, in bytes, by which to multiply the coordinate value.
* @return the adapted var handle.
* @throws IllegalArgumentException if the first access coordinate type is not of type {@link MemoryAddress}.
*/
@@ -459,7 +459,7 @@ public static VarHandle filterValue(VarHandle target, MethodHandle filterToTarge
* For the coordinate filters to be well formed, their types must be of the form {@code S1 -> T1, S2 -> T1 ... Sn -> Tn},
* where {@code T1, T2 ... Tn} are the coordinate types starting at position {@code pos} of the target var handle.
*
- * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode} and
+ * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode}) and
* atomic access guarantees as those featured by the target var handle.
*
* @param target the target var handle
@@ -471,9 +471,8 @@ public static VarHandle filterValue(VarHandle target, MethodHandle filterToTarge
* @throws IllegalArgumentException if the handles in {@code filters} are not well-formed, that is, they have types
* other than {@code S1 -> T1, S2 -> T2, ... Sn -> Tn} where {@code T1, T2 ... Tn} are the coordinate types starting
* at position {@code pos} of the target var handle, if {@code pos} is not between 0 and the target var handle coordinate arity, inclusive,
- * or if more filters are provided than the actual number of coordinate types, or if any of the filters throws any
- * checked exceptions.
- * available starting at {@code pos}.
+ * or if more filters are provided than the actual number of coordinate types available starting at {@code pos},
+ * or if any of the filters throws any checked exceptions.
*/
public static VarHandle filterCoordinates(VarHandle target, int pos, MethodHandle... filters) {
return JLI.filterCoordinates(target, pos, filters);
@@ -490,7 +489,7 @@ public static VarHandle filterCoordinates(VarHandle target, int pos, MethodHandl
* For the bound coordinates to be well formed, their types must be {@code T1, T2 ... Tn },
* where {@code T1, T2 ... Tn} are the coordinate types starting at position {@code pos} of the target var handle.
*
- * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode} and
+ * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode}) and
* atomic access guarantees as those featured by the target var handle.
*
* @param target the var handle to invoke after the bound coordinates are inserted
@@ -533,7 +532,7 @@ public static VarHandle insertCoordinates(VarHandle target, int pos, Object... v
* more than once in the array, and an incoming coordinate will be dropped
* if its index does not appear in the array.
*
- * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode} and
+ * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode}) and
* atomic access guarantees as those featured by the target var handle.
* @param target the var handle to invoke after the coordinates have been reordered
* @param newCoordinates the new coordinate types
@@ -557,7 +556,7 @@ public static VarHandle permuteCoordinates(VarHandle target, List> newC
* filter function and the target var handle is then called on the modified (usually shortened)
* coordinate list.
*
- * If {code R} is the return type of the filter (which cannot be void), the target var handle must accept a value of
+ * If {@code R} is the return type of the filter (which cannot be void), the target var handle must accept a value of
* type {@code R} as its coordinate in position {@code pos}, preceded and/or followed by
* any coordinate not passed to the filter.
* No coordinates are reordered, and the result returned from the filter
@@ -571,7 +570,7 @@ public static VarHandle permuteCoordinates(VarHandle target, List> newC
* coordinate type of the target var handle at position {@code pos}, and that target var handle
* coordinate is supplied by the return value of the filter.
*
- * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode} and
+ * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode}) and
* atomic access guarantees as those featured by the target var handle.
*
* @param target the var handle to invoke after the coordinates have been filtered
@@ -599,7 +598,7 @@ public static VarHandle collectCoordinates(VarHandle target, int pos, MethodHand
* target var handle's coordinate types. If {@code pos} is zero, the dummy coordinates will precede
* the target's real arguments; if {@code pos} is N they will come after.
*
- * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode} and
+ * The resulting var handle will feature the same access modes (see {@link java.lang.invoke.VarHandle.AccessMode}) and
* atomic access guarantees as those featured by the target var handle.
*
* @param target the var handle to invoke after the dummy coordinates are dropped
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryLayout.java b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryLayout.java
index c804e043923..4c3efa16668 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryLayout.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemoryLayout.java
@@ -168,6 +168,13 @@
* it follows that the memory access var handle {@code valueHandle} will feature an extra {@code long}
* access coordinate.
*
+ *
Layout attributes
+ *
+ * Layouts can be optionally associated with one or more attributes. A layout attribute forms a name/value
+ * pair, where the name is a {@link String} and the value is a {@link Constable}. The most common form of layout attribute
+ * is the layout name (see {@link #LAYOUT_NAME}), a custom name that can be associated to memory layouts and that can be referred to when
+ * constructing layout paths.
+ *
* @apiNote In the future, if the Java language permits, {@link MemoryLayout}
* may become a {@code sealed} interface, which would prohibit subclassing except by
* explicitly permitted types.
@@ -221,6 +228,11 @@ default long byteSize() {
/**
* Return the name (if any) associated with this layout.
+ *
+ * This is equivalent to the following code:
+ *
{@code
+ attribute(LAYOUT_NAME).map(String.class::cast);
+ * }
*
* @return the layout name (if any).
* @see MemoryLayout#withName(String)
@@ -229,6 +241,11 @@ default long byteSize() {
/**
* Creates a new layout which features the desired layout name.
+ *
+ * This is equivalent to the following code:
+ *
{@code
+ withAttribute(LAYOUT_NAME, name);
+ * }
*
* @param name the layout name.
* @return a new layout which is the same as this layout, except for the name associated to it.
@@ -286,26 +303,28 @@ default long byteAlignment() {
MemoryLayout withBitAlignment(long bitAlignment);
/**
- * Returns the attribute with the given name if it exists, or an empty optional
+ * Returns the attribute with the given name (if it exists).
*
- * @param name the name of the attribute
- * @return the optional attribute
+ * @param name the attribute name
+ * @return the attribute with the given name (if it exists).
*/
Optional attribute(String name);
/**
- * Returns a new MemoryLayout with the given additional attribute
+ * Returns a new memory layout which features the same attributes as this layout, plus the newly specified attribute.
+ * If this layout already contains an attribute with the same name, the existing attribute value is overwritten in the returned
+ * layout.
*
- * @param name the name of the attribute
- * @param value the value of the attribute
- * @return the new MemoryLayout
+ * @param name the attribute name.
+ * @param value the attribute value.
+ * @return a new memory layout which features the same attributes as this layout, plus the newly specified attribute.
*/
MemoryLayout withAttribute(String name, Constable value);
/**
- * Returns a stream of the names of the attributes of this layout
+ * Returns a stream of the attribute names associated with this layout.
*
- * @return the stream of names
+ * @return a stream of the attribute names associated with this layout.
*/
Stream attributes();
@@ -413,7 +432,7 @@ private static Z computePathOp(LayoutPath path, Function fina
}
/**
- * Is this a padding layout (e.g. a layout created from {@link #ofPaddingBits(long)} ?
+ * Is this a padding layout (e.g. a layout created from {@link #ofPaddingBits(long)}) ?
* @return true, if this layout is a padding layout.
*/
boolean isPadding();
@@ -616,4 +635,9 @@ static GroupLayout ofStruct(MemoryLayout... elements) {
static GroupLayout ofUnion(MemoryLayout... elements) {
return new GroupLayout(GroupLayout.Kind.UNION, List.of(elements));
}
+
+ /**
+ * Attribute name used to specify the name property of a memory layout (see {@link #name()} and {@link #withName(String)}).
+ */
+ String LAYOUT_NAME = "layout/name";
}
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemorySegment.java b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemorySegment.java
index 3a1ef0fe811..0c9fab19cce 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemorySegment.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/MemorySegment.java
@@ -31,7 +31,6 @@
import jdk.internal.foreign.AbstractMemorySegmentImpl;
import jdk.internal.foreign.HeapMemorySegmentImpl;
import jdk.internal.foreign.MappedMemorySegmentImpl;
-import jdk.internal.foreign.MemoryAddressImpl;
import jdk.internal.foreign.NativeMemorySegmentImpl;
import jdk.internal.foreign.Utils;
@@ -111,21 +110,22 @@
* work in parallel on disjoint segment slices (this assumes that the access mode {@link #ACQUIRE} is set).
* For instance, the following code can be used to sum all int values in a memory segment in parallel:
* {@code
+MemorySegment segment = ...
SequenceLayout SEQUENCE_LAYOUT = MemoryLayout.ofSequence(1024, MemoryLayouts.JAVA_INT);
VarHandle VH_int = SEQUENCE_LAYOUT.elementLayout().varHandle(int.class);
-int sum = StreamSupport.stream(segment.spliterator(SEQUENCE_LAYOUT), true)
- .mapToInt(segment -> (int)VH_int.get(segment.baseAddress))
+int sum = StreamSupport.stream(MemorySegment.spliterator(segment, SEQUENCE_LAYOUT), true)
+ .mapToInt(s -> (int)VH_int.get(s.baseAddress()))
.sum();
* }
*
*
*
* Memory segments supports zero or more access modes. Supported access modes are {@link #READ},
- * {@link #WRITE}, {@link #CLOSE} and {@link #ACQUIRE}. The set of access modes supported by a segment alters the
+ * {@link #WRITE}, {@link #CLOSE}, {@link #ACQUIRE} and {@link #HANDOFF}. The set of access modes supported by a segment alters the
* set of operations that are supported by that segment. For instance, attempting to call {@link #close()} on
* a segment which does not support the {@link #CLOSE} access mode will result in an exception.
*
- * The set of supported access modes can only be made stricter (by supporting less access modes). This means
+ * The set of supported access modes can only be made stricter (by supporting fewer access modes). This means
* that restricting the set of access modes supported by a segment before sharing it with other clients
* is generally a good practice if the creator of the segment wants to retain some control over how the segment
* is going to be accessed.
@@ -150,7 +150,7 @@
* To allow for interoperability with existing code, a byte buffer view can be obtained from a memory segment
* (see {@link #asByteBuffer()}). This can be useful, for instance, for those clients that want to keep using the
* {@link ByteBuffer} API, but need to operate on large memory segments. Byte buffers obtained in such a way support
- * the same spatial and temporal access restrictions associated to the memory address from which they originated.
+ * the same spatial and temporal access restrictions associated to the memory segment from which they originated.
*
* @apiNote In the future, if the Java language permits, {@link MemorySegment}
* may become a {@code sealed} interface, which would prohibit subclassing except by
@@ -177,7 +177,8 @@ public interface MemorySegment extends AutoCloseable {
* The returned spliterator splits the segment according to the specified sequence layout; that is,
* if the supplied layout is a sequence layout whose element count is {@code N}, then calling {@link Spliterator#trySplit()}
* will result in a spliterator serving approximatively {@code N/2} elements (depending on whether N is even or not).
- * As such, splitting is possible as long as {@code N >= 2}.
+ * As such, splitting is possible as long as {@code N >= 2}. The spliterator returns segments that feature the same
+ * access modes as the given segment less the {@link #CLOSE} access mode.
*
* The returned spliterator effectively allows to slice a segment into disjoint sub-segments, which can then
* be processed in parallel by multiple threads (if the access mode {@link #ACQUIRE} is set).
@@ -216,7 +217,7 @@ static Spliterator spliterator(S segment, SequenceL
* owned by {@code newOwner}.
* @throws IllegalStateException if this segment is not alive, or if access occurs from a thread other than the
* thread owning this segment, or if the segment cannot be closed because it is being operated upon by a different
- * thread (see {@link #spliterator(SequenceLayout)}).
+ * thread (see {@link #spliterator(MemorySegment, SequenceLayout)}).
* @throws NullPointerException if {@code newOwner == null}
* @throws IllegalArgumentException if the segment is already a confined segment owner by {@code newOnwer}.
* @throws UnsupportedOperationException if this segment does not support the {@link #HANDOFF} access mode.
@@ -231,13 +232,13 @@ static Spliterator spliterator(S segment, SequenceL
/**
* Obtains a segment view with specific access modes. Supported access modes are {@link #READ}, {@link #WRITE},
- * {@link #CLOSE} and {@link #ACQUIRE}. It is generally not possible to go from a segment with stricter access modes
+ * {@link #CLOSE}, {@link #ACQUIRE} and {@link #HANDOFF}. It is generally not possible to go from a segment with stricter access modes
* to one with less strict access modes. For instance, attempting to add {@link #WRITE} access mode to a read-only segment
* will be met with an exception.
* @param accessModes an ORed mask of zero or more access modes.
* @return a segment view with specific access modes.
- * @throws UnsupportedOperationException when {@code mask} is an access mask which is less strict than the one supported by this
- * segment.
+ * @throws IllegalArgumentException when {@code mask} is an access mask which is less strict than the one supported by this
+ * segment, or when {@code mask} contains bits not associated with any of the supported access modes.
*/
MemorySegment withAccessModes(int accessModes);
@@ -245,12 +246,13 @@ static Spliterator spliterator(S segment, SequenceL
* Does this segment support a given set of access modes?
* @param accessModes an ORed mask of zero or more access modes.
* @return true, if the access modes in {@code accessModes} are stricter than the ones supported by this segment.
+ * @throws IllegalArgumentException when {@code mask} contains bits not associated with any of the supported access modes.
*/
boolean hasAccessModes(int accessModes);
/**
* Returns the access modes associated with this segment; the result is represented as ORed values from
- * {@link #READ}, {@link #WRITE}, {@link #CLOSE} and {@link #ACQUIRE}.
+ * {@link #READ}, {@link #WRITE}, {@link #CLOSE}, {@link #ACQUIRE} and {@link #HANDOFF}.
* @return the access modes associated with this segment.
*/
int accessModes();
@@ -394,6 +396,10 @@ static Spliterator spliterator(S segment, SequenceL
* buffer. The segment starts relative to the buffer's position (inclusive)
* and ends relative to the buffer's limit (exclusive).
*
+ * The segment will feature all access modes, unless the given
+ * buffer is {@linkplain ByteBuffer#isReadOnly() read-only} in which case the segment will
+ * not feature the {@link #WRITE} access mode.
+ *
* The resulting memory segment keeps a reference to the backing buffer, to ensure it remains reachable
* for the life-time of the segment.
*
@@ -408,7 +414,7 @@ static MemorySegment ofByteBuffer(ByteBuffer bb) {
* Creates a new array memory segment that models the memory associated with a given heap-allocated byte array.
*
* The resulting memory segment keeps a reference to the backing array, to ensure it remains reachable
- * for the life-time of the segment.
+ * for the life-time of the segment. The segment will feature all access modes.
*
* @param arr the primitive array backing the array memory segment.
* @return a new array memory segment.
@@ -421,7 +427,7 @@ static MemorySegment ofArray(byte[] arr) {
* Creates a new array memory segment that models the memory associated with a given heap-allocated char array.
*
* The resulting memory segment keeps a reference to the backing array, to ensure it remains reachable
- * for the life-time of the segment.
+ * for the life-time of the segment. The segment will feature all access modes.
*
* @param arr the primitive array backing the array memory segment.
* @return a new array memory segment.
@@ -434,7 +440,7 @@ static MemorySegment ofArray(char[] arr) {
* Creates a new array memory segment that models the memory associated with a given heap-allocated short array.
*
* The resulting memory segment keeps a reference to the backing array, to ensure it remains reachable
- * for the life-time of the segment.
+ * for the life-time of the segment. The segment will feature all access modes.
*
* @param arr the primitive array backing the array memory segment.
* @return a new array memory segment.
@@ -447,7 +453,7 @@ static MemorySegment ofArray(short[] arr) {
* Creates a new array memory segment that models the memory associated with a given heap-allocated int array.
*
* The resulting memory segment keeps a reference to the backing array, to ensure it remains reachable
- * for the life-time of the segment.
+ * for the life-time of the segment. The segment will feature all access modes.
*
* @param arr the primitive array backing the array memory segment.
* @return a new array memory segment.
@@ -460,7 +466,7 @@ static MemorySegment ofArray(int[] arr) {
* Creates a new array memory segment that models the memory associated with a given heap-allocated float array.
*
* The resulting memory segment keeps a reference to the backing array, to ensure it remains reachable
- * for the life-time of the segment.
+ * for the life-time of the segment. The segment will feature all access modes.
*
* @param arr the primitive array backing the array memory segment.
* @return a new array memory segment.
@@ -473,7 +479,7 @@ static MemorySegment ofArray(float[] arr) {
* Creates a new array memory segment that models the memory associated with a given heap-allocated long array.
*
* The resulting memory segment keeps a reference to the backing array, to ensure it remains reachable
- * for the life-time of the segment.
+ * for the life-time of the segment. The segment will feature all access modes.
*
* @param arr the primitive array backing the array memory segment.
* @return a new array memory segment.
@@ -486,7 +492,7 @@ static MemorySegment ofArray(long[] arr) {
* Creates a new array memory segment that models the memory associated with a given heap-allocated double array.
*
* The resulting memory segment keeps a reference to the backing array, to ensure it remains reachable
- * for the life-time of the segment.
+ * for the life-time of the segment. The segment will feature all access modes.
*
* @param arr the primitive array backing the array memory segment.
* @return a new array memory segment.
@@ -537,6 +543,10 @@ static MemorySegment allocateNative(long bytesSize) {
/**
* Creates a new mapped memory segment that models a memory-mapped region of a file from a given path.
+ *
+ * The segment will feature all access modes, unless the given mapping mode
+ * is {@linkplain FileChannel.MapMode#READ_ONLY READ_ONLY}, in which case the segment will not feature
+ * the {@link #WRITE} access mode.
*
* @implNote When obtaining a mapped segment from a newly created file, the initialization state of the contents of the block
* of mapped memory associated with the returned mapped memory segment is unspecified and should not be relied upon.
@@ -556,7 +566,7 @@ static MappedMemorySegment mapFromPath(Path path, long bytesSize, FileChannel.Ma
/**
* Creates a new native memory segment that models a newly allocated block of off-heap memory with given size and
- * alignment constraint (in bytes).
+ * alignment constraint (in bytes). The segment will feature all access modes.
*
* @implNote The block of off-heap memory associated with the returned native memory segment is initialized to zero.
* Moreover, a client is responsible to call the {@link MemorySegment#close()} on a native memory segment,
@@ -586,7 +596,7 @@ static MemorySegment allocateNative(long bytesSize, long alignmentBytes) {
* bounds, and can therefore be closed; closing such a segment can optionally result in calling an user-provided cleanup
* action. This method can be very useful when interacting with custom native memory sources (e.g. custom allocators,
* GPU memory, etc.), where an address to some underlying memory region is typically obtained from native code
- * (often as a plain {@code long} value).
+ * (often as a plain {@code long} value). The segment will feature all access modes.
*
* This method is restricted. Restricted method are unsafe, and, if used incorrectly, their use might crash
* the JVM crash or, worse, silently result in memory corruption. Thus, clients should refrain from depending on
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/SequenceLayout.java b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/SequenceLayout.java
index 403a5fc192d..44a5e7bb941 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/SequenceLayout.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/SequenceLayout.java
@@ -114,7 +114,7 @@ public SequenceLayout withElementCount(long elementCount) {
* Returns a new sequence layout where element layouts in the flattened projection of this
* sequence layout (see {@link #flatten()}) are re-arranged into one or more nested sequence layouts
* according to the provided element counts. This transformation preserves the layout size;
- * that is, multiplying the provided element counts should yield the same element count
+ * that is, multiplying the provided element counts must yield the same element count
* as the flattened projection of this sequence layout.
*
* For instance, given a sequence layout of the kind:
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/package-info.java b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/package-info.java
index b7af42b6fb6..33990e517b0 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/package-info.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/incubator/foreign/package-info.java
@@ -42,15 +42,16 @@
* a given memory location. We then create a native memory segment, that is, a memory segment backed by
* off-heap memory; the size of the segment is 40 bytes, enough to store 10 values of the primitive type {@code int}.
* The segment is created inside a try-with-resources construct: this idiom ensures that all the memory resources
- * associated with the segment will be released at the end of the block. Inside the try-with-resources block, we initialize
+ * associated with the segment will be released at the end of the block, according to the semantics described in
+ * Section {@jls 14.20.3} of The Java™ Language Specification. Inside the try-with-resources block, we initialize
* the contents of the memory segment; more specifically, if we view the memory segment as a set of 10 adjacent slots,
* {@code s[i]}, where {@code 0 <= i < 10}, where the size of each slot is exactly 4 bytes, the initialization logic above will set each slot
* so that {@code s[i] = i}, again where {@code 0 <= i < 10}.
*
*
* The key abstractions introduced by this package are {@link jdk.incubator.foreign.MemorySegment} and {@link jdk.incubator.foreign.MemoryAddress}.
- * The first models a contiguous memory region, which can reside either inside or outside the Java heap; the latter models an address - that is,
- * an offset inside a given segment. A memory address represents the main access coordinate of a memory access var handle, which can be obtained
+ * The first models a contiguous memory region, which can reside either inside or outside the Java heap; the latter models an address - which can
+ * sometimes be expressed as an offset into a given segment. A memory address represents the main access coordinate of a memory access var handle, which can be obtained
* using the combinator methods defined in the {@link jdk.incubator.foreign.MemoryHandles} class. Finally, the {@link jdk.incubator.foreign.MemoryLayout} class
* hierarchy enables description of memory layouts and basic operations such as computing the size in bytes of a given
* layout, obtain its alignment requirements, and so on. Memory layouts also provide an alternate, more abstract way, to produce
@@ -74,7 +75,9 @@
*
* This API provides strong safety guarantees when it comes to memory access. First, when dereferencing a memory segment using
* a memory address, such an address is validated (upon access), to make sure that it does not point to a memory location
- * which resides outside the boundaries of the memory segment it refers to. We call this guarantee spatial safety.
+ * which resides outside the boundaries of the memory segment it refers to. We call this guarantee spatial safety;
+ * in other words, access to memory segments is bounds-checked, in the same way as array access is, as described in
+ * Section {@jls 15.10.4} of The Java™ Language Specification.
*
* Since memory segments can be closed (see above), a memory address is also validated (upon access) to make sure that
* the segment it belongs to has not been closed prematurely. We call this guarantee temporal safety. Note that,
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java
index d20dced47ff..2f804184494 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/AbstractMemorySegmentImpl.java
@@ -214,7 +214,7 @@ public Thread ownerThread() {
public AbstractMemorySegmentImpl withAccessModes(int accessModes) {
checkAccessModes(accessModes);
if ((~accessModes() & accessModes) != 0) {
- throw new UnsupportedOperationException("Cannot acquire more access modes");
+ throw new IllegalArgumentException("Cannot acquire more access modes");
}
return dup(0, length, (mask & ~ACCESS_MASK) | accessModes, scope);
}
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MappedMemorySegmentImpl.java b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MappedMemorySegmentImpl.java
index 71837756233..006ad844e50 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MappedMemorySegmentImpl.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MappedMemorySegmentImpl.java
@@ -104,8 +104,12 @@ public static MappedMemorySegment makeMappedSegment(Path path, long bytesSize, F
try (FileChannelImpl channelImpl = (FileChannelImpl)FileChannel.open(path, openOptions(mapMode))) {
UnmapperProxy unmapperProxy = channelImpl.mapInternal(mapMode, 0L, bytesSize);
MemoryScope scope = MemoryScope.create(null, unmapperProxy::unmap);
+ int modes = defaultAccessModes(bytesSize);
+ if (mapMode == FileChannel.MapMode.READ_ONLY) {
+ modes &= ~WRITE;
+ }
return new MappedMemorySegmentImpl(unmapperProxy.address(), unmapperProxy, bytesSize,
- defaultAccessModes(bytesSize), scope);
+ modes, scope);
}
}
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MemoryScope.java b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MemoryScope.java
index 42b4d49c208..43706a97588 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MemoryScope.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/MemoryScope.java
@@ -201,7 +201,7 @@ final void checkValidState() {
@ForceInline
private static void checkAliveConfined(MemoryScope scope) {
if (scope.closed) {
- throw new IllegalStateException("This scope is already closed");
+ throw new IllegalStateException("This segment is already closed");
}
}
@@ -309,4 +309,4 @@ void close() {
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/NativeMemorySegmentImpl.java b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/NativeMemorySegmentImpl.java
index df0301fc227..e650bbd1886 100644
--- a/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/NativeMemorySegmentImpl.java
+++ b/src/jdk.incubator.foreign/share/classes/jdk/internal/foreign/NativeMemorySegmentImpl.java
@@ -95,8 +95,7 @@ public static MemorySegment makeNativeSegment(long bytesSize, long alignmentByte
long alignedBuf = Utils.alignUp(buf, alignmentBytes);
MemoryScope scope = MemoryScope.create(null, () -> unsafe.freeMemory(buf));
MemorySegment segment = new NativeMemorySegmentImpl(buf, alignedSize,
- defaultAccessModes(alignedSize),
- scope);
+ defaultAccessModes(alignedSize), scope);
if (alignedSize != bytesSize) {
long delta = alignedBuf - buf;
segment = segment.asSlice(delta, bytesSize);
diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java
index b147b5b3aca..8f2bc6122b4 100644
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.aarch64/src/jdk/vm/ci/aarch64/AArch64.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -173,8 +173,7 @@ public enum CPUFeature {
CRC32,
LSE,
STXR_PREFETCH,
- A53MAC,
- DMB_ATOMICS
+ A53MAC
}
private final EnumSet features;
@@ -183,7 +182,6 @@ public enum CPUFeature {
* Set of flags to control code emission.
*/
public enum Flag {
- UseBarriersForVolatile,
UseCRC32,
UseNeon,
UseSIMDForMemoryOps,
diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java
index 2c96618ee47..650d3f048af 100644
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotJVMCIBackendFactory.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -80,9 +80,6 @@ private static EnumSet computeFeatures(@SuppressWarnings("un
if ((config.vmVersionFeatures & config.aarch64A53MAC) != 0) {
features.add(AArch64.CPUFeature.A53MAC);
}
- if ((config.vmVersionFeatures & config.aarch64DMB_ATOMICS) != 0) {
- features.add(AArch64.CPUFeature.DMB_ATOMICS);
- }
return features;
}
@@ -90,9 +87,6 @@ private static EnumSet computeFeatures(@SuppressWarnings("un
private static EnumSet computeFlags(@SuppressWarnings("unused") AArch64HotSpotVMConfig config) {
EnumSet flags = EnumSet.noneOf(AArch64.Flag.class);
- if (config.useBarriersForVolatile) {
- flags.add(AArch64.Flag.UseBarriersForVolatile);
- }
if (config.useCRC32) {
flags.add(AArch64.Flag.UseCRC32);
}
diff --git a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java
index c7efa031c14..fa28dacc6e5 100644
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot.aarch64/src/jdk/vm/ci/hotspot/aarch64/AArch64HotSpotVMConfig.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,6 @@ class AArch64HotSpotVMConfig extends HotSpotVMConfigAccess {
/*
* These flags are set based on the corresponding command line flags.
*/
- final boolean useBarriersForVolatile = getFlag("UseBarriersForVolatile", Boolean.class);
final boolean useCRC32 = getFlag("UseCRC32", Boolean.class);
final boolean useNeon = getFlag("UseNeon", Boolean.class);
final boolean useSIMDForMemoryOps = getFlag("UseSIMDForMemoryOps", Boolean.class);
@@ -71,6 +70,5 @@ class AArch64HotSpotVMConfig extends HotSpotVMConfigAccess {
final long aarch64LSE = getConstant("VM_Version::CPU_LSE", Long.class);
final long aarch64STXR_PREFETCH = getConstant("VM_Version::CPU_STXR_PREFETCH", Long.class);
final long aarch64A53MAC = getConstant("VM_Version::CPU_A53MAC", Long.class);
- final long aarch64DMB_ATOMICS = getConstant("VM_Version::CPU_DMB_ATOMICS", Long.class);
// Checkstyle: resume
}
diff --git a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java
index a1beae55ef8..4f2c3a99cc0 100644
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java
@@ -209,7 +209,7 @@ public final int logMinObjAlignment() {
public final int stackShadowPages = getFlag("StackShadowPages", Integer.class);
public final int stackReservedPages = getFlag("StackReservedPages", Integer.class, 0);
public final boolean useStackBanging = getFlag("UseStackBanging", Boolean.class);
- public final int stackBias = getConstant("STACK_BIAS", Integer.class);
+ public final int stackBias = getConstant("STACK_BIAS", Integer.class, 0);
public final int vmPageSize = getFieldValue("CompilerToVM::Data::vm_page_size", Integer.class, "int");
public final int markOffset = getFieldOffset("oopDesc::_mark", Integer.class, markWord);
diff --git a/src/jdk.jdi/share/classes/com/sun/jdi/VirtualMachine.java b/src/jdk.jdi/share/classes/com/sun/jdi/VirtualMachine.java
index 64281c70f34..21a77ce9a70 100644
--- a/src/jdk.jdi/share/classes/com/sun/jdi/VirtualMachine.java
+++ b/src/jdk.jdi/share/classes/com/sun/jdi/VirtualMachine.java
@@ -214,17 +214,9 @@ default List allModules() {
* attempting to add a method will throw this exception.
* If {@link #canUnrestrictedlyRedefineClasses()
* canUnrestrictedlyRedefineClasses()}
- * is false, attempting any of the following will throw
- * this exception
- *
- * - changing the schema (the fields)
- *
- changing the hierarchy (superclasses, interfaces)
- *
- deleting a method
- *
- changing class modifiers
- *
- changing method modifiers
- *
- changing the {@code NestHost}, {@code NestMembers}, or {@code Record} class attributes
- *
- *
+ * is false attempting any of the unsupported class file changes described
+ * in
+ * JVM TI RedefineClasses will throw this exception.
*
* @throws java.lang.NoClassDefFoundError if the bytes
* don't correspond to the reference type (the names
diff --git a/src/jdk.jfr/share/classes/jdk/jfr/internal/LongMap.java b/src/jdk.jfr/share/classes/jdk/jfr/internal/LongMap.java
index e4d83db107e..29a8d2736b1 100644
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/LongMap.java
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/LongMap.java
@@ -77,6 +77,13 @@ public void clearId(long id, int bitIndex) {
bitSet.set(bitSetIndex, false);
}
+ public void clearId(long id) {
+ int bitSetIndex = bitSetIndex(tableIndexOf(id), 0);
+ for (int i = 0; i < bitCount; i++) {
+ bitSet.set(bitSetIndex + i, false);
+ }
+ }
+
public boolean isSetId(long id, int bitIndex) {
int bitSetIndex = bitSetIndex(tableIndexOf(id), bitIndex);
return bitSet.get(bitSetIndex);
@@ -103,6 +110,9 @@ private int tableIndexOf(long id) {
}
public boolean hasKey(long id) {
+ if (keys == EMPTY_KEYS) {
+ return false;
+ }
int index = index(id);
while (true) {
if (objects[index] == null) {
diff --git a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ConstantMap.java b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ConstantMap.java
index 72b68092818..670df148b54 100644
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ConstantMap.java
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/consumer/ConstantMap.java
@@ -156,6 +156,9 @@ public void resolve() {
}
public void put(long key, Object value) {
+ if (objects.hasKey(key)) {
+ objects.clearId(key);
+ }
objects.put(key, value);
}
diff --git a/test/fmw/gtest/googlemock/CHANGES b/test/fmw/gtest/googlemock/CHANGES
deleted file mode 100644
index 4328ece3d3a..00000000000
--- a/test/fmw/gtest/googlemock/CHANGES
+++ /dev/null
@@ -1,126 +0,0 @@
-Changes for 1.7.0:
-
-* All new improvements in Google Test 1.7.0.
-* New feature: matchers DoubleNear(), FloatNear(),
- NanSensitiveDoubleNear(), NanSensitiveFloatNear(),
- UnorderedElementsAre(), UnorderedElementsAreArray(), WhenSorted(),
- WhenSortedBy(), IsEmpty(), and SizeIs().
-* Improvement: Google Mock can now be built as a DLL.
-* Improvement: when compiled by a C++11 compiler, matchers AllOf()
- and AnyOf() can accept an arbitrary number of matchers.
-* Improvement: when compiled by a C++11 compiler, matchers
- ElementsAreArray() can accept an initializer list.
-* Improvement: when exceptions are enabled, a mock method with no
- default action now throws instead crashing the test.
-* Improvement: added class testing::StringMatchResultListener to aid
- definition of composite matchers.
-* Improvement: function return types used in MOCK_METHOD*() macros can
- now contain unprotected commas.
-* Improvement (potentially breaking): EXPECT_THAT() and ASSERT_THAT()
- are now more strict in ensuring that the value type and the matcher
- type are compatible, catching potential bugs in tests.
-* Improvement: Pointee() now works on an optional.
-* Improvement: the ElementsAreArray() matcher can now take a vector or
- iterator range as input, and makes a copy of its input elements
- before the conversion to a Matcher.
-* Improvement: the Google Mock Generator can now generate mocks for
- some class templates.
-* Bug fix: mock object destruction triggerred by another mock object's
- destruction no longer hangs.
-* Improvement: Google Mock Doctor works better with newer Clang and
- GCC now.
-* Compatibility fixes.
-* Bug/warning fixes.
-
-Changes for 1.6.0:
-
-* Compilation is much faster and uses much less memory, especially
- when the constructor and destructor of a mock class are moved out of
- the class body.
-* New matchers: Pointwise(), Each().
-* New actions: ReturnPointee() and ReturnRefOfCopy().
-* CMake support.
-* Project files for Visual Studio 2010.
-* AllOf() and AnyOf() can handle up-to 10 arguments now.
-* Google Mock doctor understands Clang error messages now.
-* SetArgPointee<> now accepts string literals.
-* gmock_gen.py handles storage specifier macros and template return
- types now.
-* Compatibility fixes.
-* Bug fixes and implementation clean-ups.
-* Potentially incompatible changes: disables the harmful 'make install'
- command in autotools.
-
-Potentially breaking changes:
-
-* The description string for MATCHER*() changes from Python-style
- interpolation to an ordinary C++ string expression.
-* SetArgumentPointee is deprecated in favor of SetArgPointee.
-* Some non-essential project files for Visual Studio 2005 are removed.
-
-Changes for 1.5.0:
-
- * New feature: Google Mock can be safely used in multi-threaded tests
- on platforms having pthreads.
- * New feature: function for printing a value of arbitrary type.
- * New feature: function ExplainMatchResult() for easy definition of
- composite matchers.
- * The new matcher API lets user-defined matchers generate custom
- explanations more directly and efficiently.
- * Better failure messages all around.
- * NotNull() and IsNull() now work with smart pointers.
- * Field() and Property() now work when the matcher argument is a pointer
- passed by reference.
- * Regular expression matchers on all platforms.
- * Added GCC 4.0 support for Google Mock Doctor.
- * Added gmock_all_test.cc for compiling most Google Mock tests
- in a single file.
- * Significantly cleaned up compiler warnings.
- * Bug fixes, better test coverage, and implementation clean-ups.
-
- Potentially breaking changes:
-
- * Custom matchers defined using MatcherInterface or MakePolymorphicMatcher()
- need to be updated after upgrading to Google Mock 1.5.0; matchers defined
- using MATCHER or MATCHER_P* aren't affected.
- * Dropped support for 'make install'.
-
-Changes for 1.4.0 (we skipped 1.2.* and 1.3.* to match the version of
-Google Test):
-
- * Works in more environments: Symbian and minGW, Visual C++ 7.1.
- * Lighter weight: comes with our own implementation of TR1 tuple (no
- more dependency on Boost!).
- * New feature: --gmock_catch_leaked_mocks for detecting leaked mocks.
- * New feature: ACTION_TEMPLATE for defining templatized actions.
- * New feature: the .After() clause for specifying expectation order.
- * New feature: the .With() clause for specifying inter-argument
- constraints.
- * New feature: actions ReturnArg(), ReturnNew(...), and
- DeleteArg().
- * New feature: matchers Key(), Pair(), Args<...>(), AllArgs(), IsNull(),
- and Contains().
- * New feature: utility class MockFunction, useful for checkpoints, etc.
- * New feature: functions Value(x, m) and SafeMatcherCast(m).
- * New feature: copying a mock object is rejected at compile time.
- * New feature: a script for fusing all Google Mock and Google Test
- source files for easy deployment.
- * Improved the Google Mock doctor to diagnose more diseases.
- * Improved the Google Mock generator script.
- * Compatibility fixes for Mac OS X and gcc.
- * Bug fixes and implementation clean-ups.
-
-Changes for 1.1.0:
-
- * New feature: ability to use Google Mock with any testing framework.
- * New feature: macros for easily defining new matchers
- * New feature: macros for easily defining new actions.
- * New feature: more container matchers.
- * New feature: actions for accessing function arguments and throwing
- exceptions.
- * Improved the Google Mock doctor script for diagnosing compiler errors.
- * Bug fixes and implementation clean-ups.
-
-Changes for 1.0.0:
-
- * Initial Open Source release of Google Mock
diff --git a/test/fmw/gtest/googlemock/CONTRIBUTORS b/test/fmw/gtest/googlemock/CONTRIBUTORS
deleted file mode 100644
index 6e9ae362b60..00000000000
--- a/test/fmw/gtest/googlemock/CONTRIBUTORS
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file contains a list of people who've made non-trivial
-# contribution to the Google C++ Mocking Framework project. People
-# who commit code to the project are encouraged to add their names
-# here. Please keep the list sorted by first names.
-
-Benoit Sigoure
-Bogdan Piloca
-Chandler Carruth
-Dave MacLachlan
-David Anderson
-Dean Sturtevant
-Gene Volovich
-Hal Burch
-Jeffrey Yasskin
-Jim Keller
-Joe Walnes
-Jon Wray
-Keir Mierle
-Keith Ray
-Kostya Serebryany
-Lev Makhlis
-Manuel Klimek
-Mario Tanev
-Mark Paskin
-Markus Heule
-Matthew Simmons
-Mike Bland
-Neal Norwitz
-Nermin Ozkiranartli
-Owen Carlsen
-Paneendra Ba
-Paul Menage
-Piotr Kaminski
-Russ Rufer
-Sverre Sundsdal
-Takeshi Yoshino
-Vadim Berman
-Vlad Losev
-Wolfgang Klier
-Zhanyong Wan
diff --git a/test/fmw/gtest/googlemock/LICENSE b/test/fmw/gtest/googlemock/LICENSE
deleted file mode 100644
index 1941a11f8ce..00000000000
--- a/test/fmw/gtest/googlemock/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2008, Google Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/test/fmw/gtest/googlemock/README.md b/test/fmw/gtest/googlemock/README.md
deleted file mode 100644
index e5bb2116ac4..00000000000
--- a/test/fmw/gtest/googlemock/README.md
+++ /dev/null
@@ -1,322 +0,0 @@
-## Google Mock ##
-
-The Google C++ mocking framework.
-
-### Overview ###
-
-Google's framework for writing and using C++ mock classes.
-It can help you derive better designs of your system and write better tests.
-
-It is inspired by:
-
- * [jMock](http://www.jmock.org/),
- * [EasyMock](http://www.easymock.org/), and
- * [Hamcrest](http://code.google.com/p/hamcrest/),
-
-and designed with C++'s specifics in mind.
-
-Google mock:
-
- * lets you create mock classes trivially using simple macros.
- * supports a rich set of matchers and actions.
- * handles unordered, partially ordered, or completely ordered expectations.
- * is extensible by users.
-
-We hope you find it useful!
-
-### Features ###
-
- * Provides a declarative syntax for defining mocks.
- * Can easily define partial (hybrid) mocks, which are a cross of real
- and mock objects.
- * Handles functions of arbitrary types and overloaded functions.
- * Comes with a rich set of matchers for validating function arguments.
- * Uses an intuitive syntax for controlling the behavior of a mock.
- * Does automatic verification of expectations (no record-and-replay needed).
- * Allows arbitrary (partial) ordering constraints on
- function calls to be expressed,.
- * Lets an user extend it by defining new matchers and actions.
- * Does not use exceptions.
- * Is easy to learn and use.
-
-Please see the project page above for more information as well as the
-mailing list for questions, discussions, and development. There is
-also an IRC channel on OFTC (irc.oftc.net) #gtest available. Please
-join us!
-
-Please note that code under [scripts/generator](scripts/generator/) is
-from [cppclean](http://code.google.com/p/cppclean/) and released under
-the Apache License, which is different from Google Mock's license.
-
-## Getting Started ##
-
-If you are new to the project, we suggest that you read the user
-documentation in the following order:
-
- * Learn the [basics](../googletest/docs/primer.md) of
- Google Test, if you choose to use Google Mock with it (recommended).
- * Read [Google Mock for Dummies](../googlemock/docs/ForDummies.md).
- * Read the instructions below on how to build Google Mock.
-
-You can also watch Zhanyong's [talk](http://www.youtube.com/watch?v=sYpCyLI47rM) on Google Mock's usage and implementation.
-
-Once you understand the basics, check out the rest of the docs:
-
- * [CheatSheet](../googlemock/docs/CheatSheet.md) - all the commonly used stuff
- at a glance.
- * [CookBook](../googlemock/docs/CookBook.md) - recipes for getting things done,
- including advanced techniques.
-
-If you need help, please check the
-[KnownIssues](docs/KnownIssues.md) and
-[FrequentlyAskedQuestions](docs/FrequentlyAskedQuestions.md) before
-posting a question on the
-[discussion group](http://groups.google.com/group/googlemock).
-
-
-### Using Google Mock Without Google Test ###
-
-Google Mock is not a testing framework itself. Instead, it needs a
-testing framework for writing tests. Google Mock works seamlessly
-with [Google Test](https://github.com/google/googletest), but
-you can also use it with [any C++ testing framework](../googlemock/docs/ForDummies.md#using-google-mock-with-any-testing-framework).
-
-### Requirements for End Users ###
-
-Google Mock is implemented on top of [Google Test](
-http://github.com/google/googletest/), and depends on it.
-You must use the bundled version of Google Test when using Google Mock.
-
-You can also easily configure Google Mock to work with another testing
-framework, although it will still need Google Test. Please read
-["Using_Google_Mock_with_Any_Testing_Framework"](
- ../googlemock/docs/ForDummies.md#using-google-mock-with-any-testing-framework)
-for instructions.
-
-Google Mock depends on advanced C++ features and thus requires a more
-modern compiler. The following are needed to use Google Mock:
-
-#### Linux Requirements ####
-
- * GNU-compatible Make or "gmake"
- * POSIX-standard shell
- * POSIX(-2) Regular Expressions (regex.h)
- * C++98-standard-compliant compiler (e.g. GCC 3.4 or newer)
-
-#### Windows Requirements ####
-
- * Microsoft Visual C++ 8.0 SP1 or newer
-
-#### Mac OS X Requirements ####
-
- * Mac OS X 10.4 Tiger or newer
- * Developer Tools Installed
-
-### Requirements for Contributors ###
-
-We welcome patches. If you plan to contribute a patch, you need to
-build Google Mock and its tests, which has further requirements:
-
- * Automake version 1.9 or newer
- * Autoconf version 2.59 or newer
- * Libtool / Libtoolize
- * Python version 2.3 or newer (for running some of the tests and
- re-generating certain source files from templates)
-
-### Building Google Mock ###
-
-#### Using CMake ####
-
-If you have CMake available, it is recommended that you follow the
-[build instructions][gtest_cmakebuild]
-as described for Google Test.
-
-If are using Google Mock with an
-existing CMake project, the section
-[Incorporating Into An Existing CMake Project][gtest_incorpcmake]
-may be of particular interest.
-To make it work for Google Mock you will need to change
-
- target_link_libraries(example gtest_main)
-
-to
-
- target_link_libraries(example gmock_main)
-
-This works because `gmock_main` library is compiled with Google Test.
-
-#### Preparing to Build (Unix only) ####
-
-If you are using a Unix system and plan to use the GNU Autotools build
-system to build Google Mock (described below), you'll need to
-configure it now.
-
-To prepare the Autotools build system:
-
- cd googlemock
- autoreconf -fvi
-
-To build Google Mock and your tests that use it, you need to tell your
-build system where to find its headers and source files. The exact
-way to do it depends on which build system you use, and is usually
-straightforward.
-
-This section shows how you can integrate Google Mock into your
-existing build system.
-
-Suppose you put Google Mock in directory `${GMOCK_DIR}` and Google Test
-in `${GTEST_DIR}` (the latter is `${GMOCK_DIR}/gtest` by default). To
-build Google Mock, create a library build target (or a project as
-called by Visual Studio and Xcode) to compile
-
- ${GTEST_DIR}/src/gtest-all.cc and ${GMOCK_DIR}/src/gmock-all.cc
-
-with
-
- ${GTEST_DIR}/include and ${GMOCK_DIR}/include
-
-in the system header search path, and
-
- ${GTEST_DIR} and ${GMOCK_DIR}
-
-in the normal header search path. Assuming a Linux-like system and gcc,
-something like the following will do:
-
- g++ -isystem ${GTEST_DIR}/include -I${GTEST_DIR} \
- -isystem ${GMOCK_DIR}/include -I${GMOCK_DIR} \
- -pthread -c ${GTEST_DIR}/src/gtest-all.cc
- g++ -isystem ${GTEST_DIR}/include -I${GTEST_DIR} \
- -isystem ${GMOCK_DIR}/include -I${GMOCK_DIR} \
- -pthread -c ${GMOCK_DIR}/src/gmock-all.cc
- ar -rv libgmock.a gtest-all.o gmock-all.o
-
-(We need -pthread as Google Test and Google Mock use threads.)
-
-Next, you should compile your test source file with
-${GTEST\_DIR}/include and ${GMOCK\_DIR}/include in the header search
-path, and link it with gmock and any other necessary libraries:
-
- g++ -isystem ${GTEST_DIR}/include -isystem ${GMOCK_DIR}/include \
- -pthread path/to/your_test.cc libgmock.a -o your_test
-
-As an example, the make/ directory contains a Makefile that you can
-use to build Google Mock on systems where GNU make is available
-(e.g. Linux, Mac OS X, and Cygwin). It doesn't try to build Google
-Mock's own tests. Instead, it just builds the Google Mock library and
-a sample test. You can use it as a starting point for your own build
-script.
-
-If the default settings are correct for your environment, the
-following commands should succeed:
-
- cd ${GMOCK_DIR}/make
- make
- ./gmock_test
-
-If you see errors, try to tweak the contents of
-[make/Makefile](make/Makefile) to make them go away.
-
-### Windows ###
-
-The msvc/2005 directory contains VC++ 2005 projects and the msvc/2010
-directory contains VC++ 2010 projects for building Google Mock and
-selected tests.
-
-Change to the appropriate directory and run "msbuild gmock.sln" to
-build the library and tests (or open the gmock.sln in the MSVC IDE).
-If you want to create your own project to use with Google Mock, you'll
-have to configure it to use the `gmock_config` propety sheet. For that:
-
- * Open the Property Manager window (View | Other Windows | Property Manager)
- * Right-click on your project and select "Add Existing Property Sheet..."
- * Navigate to `gmock_config.vsprops` or `gmock_config.props` and select it.
- * In Project Properties | Configuration Properties | General | Additional
- Include Directories, type /include.
-
-### Tweaking Google Mock ###
-
-Google Mock can be used in diverse environments. The default
-configuration may not work (or may not work well) out of the box in
-some environments. However, you can easily tweak Google Mock by
-defining control macros on the compiler command line. Generally,
-these macros are named like `GTEST_XYZ` and you define them to either 1
-or 0 to enable or disable a certain feature.
-
-We list the most frequently used macros below. For a complete list,
-see file [${GTEST\_DIR}/include/gtest/internal/gtest-port.h](
-../googletest/include/gtest/internal/gtest-port.h).
-
-### Choosing a TR1 Tuple Library ###
-
-Google Mock uses the C++ Technical Report 1 (TR1) tuple library
-heavily. Unfortunately TR1 tuple is not yet widely available with all
-compilers. The good news is that Google Test 1.4.0+ implements a
-subset of TR1 tuple that's enough for Google Mock's need. Google Mock
-will automatically use that implementation when the compiler doesn't
-provide TR1 tuple.
-
-Usually you don't need to care about which tuple library Google Test
-and Google Mock use. However, if your project already uses TR1 tuple,
-you need to tell Google Test and Google Mock to use the same TR1 tuple
-library the rest of your project uses, or the two tuple
-implementations will clash. To do that, add
-
- -DGTEST_USE_OWN_TR1_TUPLE=0
-
-to the compiler flags while compiling Google Test, Google Mock, and
-your tests. If you want to force Google Test and Google Mock to use
-their own tuple library, just add
-
- -DGTEST_USE_OWN_TR1_TUPLE=1
-
-to the compiler flags instead.
-
-If you want to use Boost's TR1 tuple library with Google Mock, please
-refer to the Boost website (http://www.boost.org/) for how to obtain
-it and set it up.
-
-### As a Shared Library (DLL) ###
-
-Google Mock is compact, so most users can build and link it as a static
-library for the simplicity. Google Mock can be used as a DLL, but the
-same DLL must contain Google Test as well. See
-[Google Test's README][gtest_readme]
-for instructions on how to set up necessary compiler settings.
-
-### Tweaking Google Mock ###
-
-Most of Google Test's control macros apply to Google Mock as well.
-Please see [Google Test's README][gtest_readme] for how to tweak them.
-
-### Upgrading from an Earlier Version ###
-
-We strive to keep Google Mock releases backward compatible.
-Sometimes, though, we have to make some breaking changes for the
-users' long-term benefits. This section describes what you'll need to
-do if you are upgrading from an earlier version of Google Mock.
-
-#### Upgrading from 1.1.0 or Earlier ####
-
-You may need to explicitly enable or disable Google Test's own TR1
-tuple library. See the instructions in section "[Choosing a TR1 Tuple
-Library](#choosing-a-tr1-tuple-library)".
-
-#### Upgrading from 1.4.0 or Earlier ####
-
-On platforms where the pthread library is available, Google Test and
-Google Mock use it in order to be thread-safe. For this to work, you
-may need to tweak your compiler and/or linker flags. Please see the
-"[Multi-threaded Tests](../googletest/README.md#multi-threaded-tests)" section in file Google Test's README for what you may need to do.
-
-If you have custom matchers defined using `MatcherInterface` or
-`MakePolymorphicMatcher()`, you'll need to update their definitions to
-use the new matcher API (
-[monomorphic](./docs/CookBook.md#writing-new-monomorphic-matchers),
-[polymorphic](./docs/CookBook.md#writing-new-polymorphic-matchers)).
-Matchers defined using `MATCHER()` or `MATCHER_P*()` aren't affected.
-
-Happy testing!
-
-[gtest_readme]: ../googletest/README.md "googletest"
-[gtest_cmakebuild]: ../googletest/README.md#using-cmake "Using CMake"
-[gtest_incorpcmake]: ../googletest/README.md#incorporating-into-an-existing-cmake-project "Incorporating Into An Existing CMake Project"
diff --git a/test/fmw/gtest/googlemock/include/gmock/gmock-actions.h b/test/fmw/gtest/googlemock/include/gmock/gmock-actions.h
deleted file mode 100644
index b82313d5b05..00000000000
--- a/test/fmw/gtest/googlemock/include/gmock/gmock-actions.h
+++ /dev/null
@@ -1,1263 +0,0 @@
-// Copyright 2007, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// Google Mock - a framework for writing C++ mock classes.
-//
-// This file implements some commonly used actions.
-
-// GOOGLETEST_CM0002 DO NOT DELETE
-
-#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
-#define GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_
-
-#ifndef _WIN32_WCE
-# include
-#endif
-
-#include
-#include
-
-#include "gmock/internal/gmock-internal-utils.h"
-#include "gmock/internal/gmock-port.h"
-
-#if GTEST_LANG_CXX11 // Defined by gtest-port.h via gmock-port.h.
-#include
-#include
-#endif // GTEST_LANG_CXX11
-
-namespace testing {
-
-// To implement an action Foo, define:
-// 1. a class FooAction that implements the ActionInterface interface, and
-// 2. a factory function that creates an Action object from a
-// const FooAction*.
-//
-// The two-level delegation design follows that of Matcher, providing
-// consistency for extension developers. It also eases ownership
-// management as Action objects can now be copied like plain values.
-
-namespace internal {
-
-template
-class ActionAdaptor;
-
-// BuiltInDefaultValueGetter::Get() returns a
-// default-constructed T value. BuiltInDefaultValueGetter::Get() crashes with an error.
-//
-// This primary template is used when kDefaultConstructible is true.
-template
-struct BuiltInDefaultValueGetter {
- static T Get() { return T(); }
-};
-template
-struct BuiltInDefaultValueGetter {
- static T Get() {
- Assert(false, __FILE__, __LINE__,
- "Default action undefined for the function return type.");
- return internal::Invalid();
- // The above statement will never be reached, but is required in
- // order for this function to compile.
- }
-};
-
-// BuiltInDefaultValue::Get() returns the "built-in" default value
-// for type T, which is NULL when T is a raw pointer type, 0 when T is
-// a numeric type, false when T is bool, or "" when T is string or
-// std::string. In addition, in C++11 and above, it turns a
-// default-constructed T value if T is default constructible. For any
-// other type T, the built-in default T value is undefined, and the
-// function will abort the process.
-template
-class BuiltInDefaultValue {
- public:
-#if GTEST_LANG_CXX11
- // This function returns true iff type T has a built-in default value.
- static bool Exists() {
- return ::std::is_default_constructible::value;
- }
-
- static T Get() {
- return BuiltInDefaultValueGetter<
- T, ::std::is_default_constructible::value>::Get();
- }
-
-#else // GTEST_LANG_CXX11
- // This function returns true iff type T has a built-in default value.
- static bool Exists() {
- return false;
- }
-
- static T Get() {
- return BuiltInDefaultValueGetter::Get();
- }
-
-#endif // GTEST_LANG_CXX11
-};
-
-// This partial specialization says that we use the same built-in
-// default value for T and const T.
-template
-class BuiltInDefaultValue {
- public:
- static bool Exists() { return BuiltInDefaultValue::Exists(); }
- static T Get() { return BuiltInDefaultValue::Get(); }
-};
-
-// This partial specialization defines the default values for pointer
-// types.
-template
-class BuiltInDefaultValue {
- public:
- static bool Exists() { return true; }
- static T* Get() { return NULL; }
-};
-
-// The following specializations define the default values for
-// specific types we care about.
-#define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \
- template <> \
- class BuiltInDefaultValue { \
- public: \
- static bool Exists() { return true; } \
- static type Get() { return value; } \
- }
-
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, ); // NOLINT
-#if GTEST_HAS_GLOBAL_STRING
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::string, "");
-#endif // GTEST_HAS_GLOBAL_STRING
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::std::string, "");
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(bool, false);
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned char, '\0');
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed char, '\0');
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(char, '\0');
-
-// There's no need for a default action for signed wchar_t, as that
-// type is the same as wchar_t for gcc, and invalid for MSVC.
-//
-// There's also no need for a default action for unsigned wchar_t, as
-// that type is the same as unsigned int for gcc, and invalid for
-// MSVC.
-#if GMOCK_WCHAR_T_IS_NATIVE_
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(wchar_t, 0U); // NOLINT
-#endif
-
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U); // NOLINT
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0); // NOLINT
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U);
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0);
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(UInt64, 0);
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(Int64, 0);
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0);
-GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0);
-
-#undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_
-
-} // namespace internal
-
-// When an unexpected function call is encountered, Google Mock will
-// let it return a default value if the user has specified one for its
-// return type, or if the return type has a built-in default value;
-// otherwise Google Mock won't know what value to return and will have
-// to abort the process.
-//
-// The DefaultValue class allows a user to specify the
-// default value for a type T that is both copyable and publicly
-// destructible (i.e. anything that can be used as a function return
-// type). The usage is:
-//
-// // Sets the default value for type T to be foo.
-// DefaultValue::Set(foo);
-template
-class DefaultValue {
- public:
- // Sets the default value for type T; requires T to be
- // copy-constructable and have a public destructor.
- static void Set(T x) {
- delete producer_;
- producer_ = new FixedValueProducer(x);
- }
-
- // Provides a factory function to be called to generate the default value.
- // This method can be used even if T is only move-constructible, but it is not
- // limited to that case.
- typedef T (*FactoryFunction)();
- static void SetFactory(FactoryFunction factory) {
- delete producer_;
- producer_ = new FactoryValueProducer(factory);
- }
-
- // Unsets the default value for type T.
- static void Clear() {
- delete producer_;
- producer_ = NULL;
- }
-
- // Returns true iff the user has set the default value for type T.
- static bool IsSet() { return producer_ != NULL; }
-
- // Returns true if T has a default return value set by the user or there
- // exists a built-in default value.
- static bool Exists() {
- return IsSet() || internal::BuiltInDefaultValue::Exists();
- }
-
- // Returns the default value for type T if the user has set one;
- // otherwise returns the built-in default value. Requires that Exists()
- // is true, which ensures that the return value is well-defined.
- static T Get() {
- return producer_ == NULL ?
- internal::BuiltInDefaultValue::Get() : producer_->Produce();
- }
-
- private:
- class ValueProducer {
- public:
- virtual ~ValueProducer() {}
- virtual T Produce() = 0;
- };
-
- class FixedValueProducer : public ValueProducer {
- public:
- explicit FixedValueProducer(T value) : value_(value) {}
- virtual T Produce() { return value_; }
-
- private:
- const T value_;
- GTEST_DISALLOW_COPY_AND_ASSIGN_(FixedValueProducer);
- };
-
- class FactoryValueProducer : public ValueProducer {
- public:
- explicit FactoryValueProducer(FactoryFunction factory)
- : factory_(factory) {}
- virtual T Produce() { return factory_(); }
-
- private:
- const FactoryFunction factory_;
- GTEST_DISALLOW_COPY_AND_ASSIGN_(FactoryValueProducer);
- };
-
- static ValueProducer* producer_;
-};
-
-// This partial specialization allows a user to set default values for
-// reference types.
-template
-class DefaultValue {
- public:
- // Sets the default value for type T&.
- static void Set(T& x) { // NOLINT
- address_ = &x;
- }
-
- // Unsets the default value for type T&.
- static void Clear() {
- address_ = NULL;
- }
-
- // Returns true iff the user has set the default value for type T&.
- static bool IsSet() { return address_ != NULL; }
-
- // Returns true if T has a default return value set by the user or there
- // exists a built-in default value.
- static bool Exists() {
- return IsSet() || internal::BuiltInDefaultValue::Exists();
- }
-
- // Returns the default value for type T& if the user has set one;
- // otherwise returns the built-in default value if there is one;
- // otherwise aborts the process.
- static T& Get() {
- return address_ == NULL ?
- internal::BuiltInDefaultValue::Get() : *address_;
- }
-
- private:
- static T* address_;
-};
-
-// This specialization allows DefaultValue::Get() to
-// compile.
-template <>
-class DefaultValue {
- public:
- static bool Exists() { return true; }
- static void Get() {}
-};
-
-// Points to the user-set default value for type T.
-template
-typename DefaultValue::ValueProducer* DefaultValue::producer_ = NULL;
-
-// Points to the user-set default value for type T&.
-template
-T* DefaultValue::address_ = NULL;
-
-// Implement this interface to define an action for function type F.
-template
-class ActionInterface {
- public:
- typedef typename internal::Function::Result Result;
- typedef typename internal::Function::ArgumentTuple ArgumentTuple;
-
- ActionInterface() {}
- virtual ~ActionInterface() {}
-
- // Performs the action. This method is not const, as in general an
- // action can have side effects and be stateful. For example, a
- // get-the-next-element-from-the-collection action will need to
- // remember the current element.
- virtual Result Perform(const ArgumentTuple& args) = 0;
-
- private:
- GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionInterface);
-};
-
-// An Action is a copyable and IMMUTABLE (except by assignment)
-// object that represents an action to be taken when a mock function
-// of type F is called. The implementation of Action is just a
-// linked_ptr to const ActionInterface, so copying is fairly cheap.
-// Don't inherit from Action!
-//
-// You can view an object implementing ActionInterface as a
-// concrete action (including its current state), and an Action
-// object as a handle to it.
-template
-class Action {
- public:
- typedef typename internal::Function::Result Result;
- typedef typename internal::Function::ArgumentTuple ArgumentTuple;
-
- // Constructs a null Action. Needed for storing Action objects in
- // STL containers.
- Action() {}
-
-#if GTEST_LANG_CXX11
- // Construct an Action from a specified callable.
- // This cannot take std::function directly, because then Action would not be
- // directly constructible from lambda (it would require two conversions).
- template , G>::value>::type>
- Action(G&& fun) : fun_(::std::forward(fun)) {} // NOLINT
-#endif
-
- // Constructs an Action from its implementation.
- explicit Action(ActionInterface* impl) : impl_(impl) {}
-
- // This constructor allows us to turn an Action object into an
- // Action, as long as F's arguments can be implicitly converted
- // to Func's and Func's return type can be implicitly converted to
- // F's.
- template
- explicit Action(const Action& action);
-
- // Returns true iff this is the DoDefault() action.
- bool IsDoDefault() const {
-#if GTEST_LANG_CXX11
- return impl_ == nullptr && fun_ == nullptr;
-#else
- return impl_ == NULL;
-#endif
- }
-
- // Performs the action. Note that this method is const even though
- // the corresponding method in ActionInterface is not. The reason
- // is that a const Action means that it cannot be re-bound to
- // another concrete action, not that the concrete action it binds to
- // cannot change state. (Think of the difference between a const
- // pointer and a pointer to const.)
- Result Perform(ArgumentTuple args) const {
- if (IsDoDefault()) {
- internal::IllegalDoDefault(__FILE__, __LINE__);
- }
-#if GTEST_LANG_CXX11
- if (fun_ != nullptr) {
- return internal::Apply(fun_, ::std::move(args));
- }
-#endif
- return impl_->Perform(args);
- }
-
- private:
- template
- friend class internal::ActionAdaptor;
-
- template
- friend class Action;
-
- // In C++11, Action can be implemented either as a generic functor (through
- // std::function), or legacy ActionInterface. In C++98, only ActionInterface
- // is available. The invariants are as follows:
- // * in C++98, impl_ is null iff this is the default action
- // * in C++11, at most one of fun_ & impl_ may be nonnull; both are null iff
- // this is the default action
-#if GTEST_LANG_CXX11
- ::std::function fun_;
-#endif
- internal::linked_ptr > impl_;
-};
-
-// The PolymorphicAction class template makes it easy to implement a
-// polymorphic action (i.e. an action that can be used in mock
-// functions of than one type, e.g. Return()).
-//
-// To define a polymorphic action, a user first provides a COPYABLE
-// implementation class that has a Perform() method template:
-//
-// class FooAction {
-// public:
-// template
-// Result Perform(const ArgumentTuple& args) const {
-// // Processes the arguments and returns a result, using
-// // tr1::get(args) to get the N-th (0-based) argument in the tuple.
-// }
-// ...
-// };
-//
-// Then the user creates the polymorphic action using
-// MakePolymorphicAction(object) where object has type FooAction. See
-// the definition of Return(void) and SetArgumentPointee(value) for
-// complete examples.
-template
-class PolymorphicAction {
- public:
- explicit PolymorphicAction(const Impl& impl) : impl_(impl) {}
-
- template
- operator Action() const {
- return Action(new MonomorphicImpl(impl_));
- }
-
- private:
- template
- class MonomorphicImpl : public ActionInterface {
- public:
- typedef typename internal::Function::Result Result;
- typedef typename internal::Function::ArgumentTuple ArgumentTuple;
-
- explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {}
-
- virtual Result Perform(const ArgumentTuple& args) {
- return impl_.template Perform(args);
- }
-
- private:
- Impl impl_;
-
- GTEST_DISALLOW_ASSIGN_(MonomorphicImpl);
- };
-
- Impl impl_;
-
- GTEST_DISALLOW_ASSIGN_(PolymorphicAction);
-};
-
-// Creates an Action from its implementation and returns it. The
-// created Action object owns the implementation.
-template
-Action MakeAction(ActionInterface* impl) {
- return Action(impl);
-}
-
-// Creates a polymorphic action from its implementation. This is
-// easier to use than the PolymorphicAction constructor as it
-// doesn't require you to explicitly write the template argument, e.g.
-//
-// MakePolymorphicAction(foo);
-// vs
-// PolymorphicAction(foo);
-template
-inline PolymorphicAction MakePolymorphicAction(const Impl& impl) {
- return PolymorphicAction(impl);
-}
-
-namespace internal {
-
-// Allows an Action object to pose as an Action