diff --git a/.hgtags b/.hgtags index 3fb0d5822f7..c667a67a43d 100644 --- a/.hgtags +++ b/.hgtags @@ -658,3 +658,5 @@ a32f58c6b8be81877411767de7ba9c4cf087c1b5 jdk-15+31 4a8fd81d64bafa523cddb45f82805536edace106 jdk-16+6 6b65f4e7a975628df51ef755b02642075390041d jdk-15+33 c3a4a7ea7c304cabdacdc31741eb94c51351668d jdk-16+7 +b0817631d2f4395508cb10e81c3858a94d9ae4de jdk-15+34 +0a73d6f3aab48ff6d7e61e47f0bc2d87a054f217 jdk-16+8 diff --git a/bin/idea.sh b/bin/idea.sh index ad89aedacbc..35162ce943c 100644 --- a/bin/idea.sh +++ b/bin/idea.sh @@ -147,13 +147,7 @@ add_replacement() { add_replacement "###MODULE_NAMES###" "$MODULE_NAMES" add_replacement "###VCS_TYPE###" "$VCS_TYPE" SPEC_DIR=`dirname $SPEC` -if [ "x$CYGPATH" = "x" ]; then - add_replacement "###BUILD_DIR###" "$SPEC_DIR" - add_replacement "###JTREG_HOME###" "$JT_HOME" - add_replacement "###IMAGES_DIR###" "$SPEC_DIR/images/jdk" - add_replacement "###ROOT_DIR###" "$TOPLEVEL_DIR" - add_replacement "###IDEA_DIR###" "$IDEA_OUTPUT" -else +if [ "x$CYGPATH" != "x" ]; then add_replacement "###BUILD_DIR###" "`cygpath -am $SPEC_DIR`" add_replacement "###IMAGES_DIR###" "`cygpath -am $SPEC_DIR`/images/jdk" add_replacement "###ROOT_DIR###" "`cygpath -am $TOPLEVEL_DIR`" @@ -163,6 +157,22 @@ else else add_replacement "###JTREG_HOME###" "`cygpath -am $JT_HOME`" fi +elif [ "x$WSL_DISTRO_NAME" != "x" ]; then + add_replacement "###BUILD_DIR###" "`wslpath -am $SPEC_DIR`" + add_replacement "###IMAGES_DIR###" "`wslpath -am $SPEC_DIR`/images/jdk" + add_replacement "###ROOT_DIR###" "`wslpath -am $TOPLEVEL_DIR`" + add_replacement "###IDEA_DIR###" "`wslpath -am $IDEA_OUTPUT`" + if [ "x$JT_HOME" = "x" ]; then + add_replacement "###JTREG_HOME###" "" + else + add_replacement "###JTREG_HOME###" "`wslpath -am $JT_HOME`" + fi +else + add_replacement "###BUILD_DIR###" "$SPEC_DIR" + add_replacement "###JTREG_HOME###" "$JT_HOME" + add_replacement "###IMAGES_DIR###" "$SPEC_DIR/images/jdk" + add_replacement "###ROOT_DIR###" "$TOPLEVEL_DIR" + add_replacement "###IDEA_DIR###" "$IDEA_OUTPUT" fi SOURCE_PREFIX="" for root in $MODULE_ROOTS; do if [ "x$CYGPATH" != "x" ]; then - root=`cygpath -am $root` + root=`cygpath -am $root` + elif [ "x$WSL_DISTRO_NAME" != "x" ]; then + root=`wslpath -am $root` fi SOURCES=$SOURCES" $SOURCE_PREFIX""$root""$SOURCE_POSTFIX" done @@ -196,16 +208,30 @@ fi CP=$ANT_HOME/lib/ant.jar rm -rf $CLASSES; mkdir $CLASSES -if [ "x$CYGPATH" = "x" ] ; then ## CYGPATH may be set in env.cfg - JAVAC_SOURCE_FILE=$IDEA_OUTPUT/src/idea/IdeaLoggerWrapper.java - JAVAC_SOURCE_PATH=$IDEA_OUTPUT/src - JAVAC_CLASSES=$CLASSES - JAVAC_CP=$CP -else +if [ "x$CYGPATH" != "x" ] ; then ## CYGPATH may be set in env.cfg JAVAC_SOURCE_FILE=`cygpath -am $IDEA_OUTPUT/src/idea/IdeaLoggerWrapper.java` JAVAC_SOURCE_PATH=`cygpath -am $IDEA_OUTPUT/src` JAVAC_CLASSES=`cygpath -am $CLASSES` JAVAC_CP=`cygpath -am $CP` + JAVAC=javac +elif [ "x$WSL_DISTRO_NAME" != "x" ]; then + JAVAC_SOURCE_FILE=`realpath --relative-to=./ $IDEA_OUTPUT/src/idea/IdeaLoggerWrapper.java` + JAVAC_SOURCE_PATH=`realpath --relative-to=./ $IDEA_OUTPUT/src` + JAVAC_CLASSES=`realpath --relative-to=./ $CLASSES` + ANT_TEMP=`mktemp -d -p ./` + cp $ANT_HOME/lib/ant.jar $ANT_TEMP/ant.jar + JAVAC_CP=$ANT_TEMP/ant.jar + JAVAC=javac.exe +else + JAVAC_SOURCE_FILE=$IDEA_OUTPUT/src/idea/IdeaLoggerWrapper.java + JAVAC_SOURCE_PATH=$IDEA_OUTPUT/src + JAVAC_CLASSES=$CLASSES + JAVAC_CP=$CP + JAVAC=javac fi -$BOOT_JDK/bin/javac -d $JAVAC_CLASSES -sourcepath $JAVAC_SOURCE_PATH -cp $JAVAC_CP $JAVAC_SOURCE_FILE +$BOOT_JDK/bin/$JAVAC -d $JAVAC_CLASSES -sourcepath $JAVAC_SOURCE_PATH -cp $JAVAC_CP $JAVAC_SOURCE_FILE + +if [ "x$WSL_DISTRO_NAME" != "x" ]; then + rm -rf $ANT_TEMP +fi \ No newline at end of file diff --git a/make/autoconf/basic_tools.m4 b/make/autoconf/basic_tools.m4 index 01289305baf..9f0eea42d84 100644 --- a/make/autoconf/basic_tools.m4 +++ b/make/autoconf/basic_tools.m4 @@ -72,7 +72,6 @@ AC_DEFUN_ONCE([BASIC_SETUP_FUNDAMENTAL_TOOLS], UTIL_REQUIRE_PROGS(UNAME, uname) UTIL_REQUIRE_PROGS(UNIQ, uniq) UTIL_REQUIRE_PROGS(WC, wc) - UTIL_REQUIRE_PROGS(WHICH, which) UTIL_REQUIRE_PROGS(XARGS, xargs) # Then required tools that require some special treatment. diff --git a/make/autoconf/configure b/make/autoconf/configure index 953ebb6f1f3..f635c19b644 100644 --- a/make/autoconf/configure +++ b/make/autoconf/configure @@ -78,11 +78,11 @@ generated_script="$build_support_dir/generated-configure.sh" ### autoconf_missing_help() { - APT_GET="`which apt-get 2> /dev/null | grep -v '^no apt-get in'`" - YUM="`which yum 2> /dev/null | grep -v '^no yum in'`" - BREW="`which brew 2> /dev/null | grep -v '^no brew in'`" - ZYPPER="`which zypper 2> /dev/null | grep -v '^no zypper in'`" - CYGWIN="`which cygpath 2> /dev/null | grep -v '^no cygpath in'`" + APT_GET="`type -p apt-get 2> /dev/null`" + YUM="`type -p yum 2> /dev/null`" + BREW="`type -p brew 2> /dev/null`" + ZYPPER="`type -p zypper 2> /dev/null`" + CYGWIN="`type -p cygpath 2> /dev/null`" if test "x$ZYPPER" != x; then PKGHANDLER_COMMAND="sudo zypper install autoconf" @@ -111,7 +111,7 @@ generate_configure_script() { exit 1 fi else - AUTOCONF="`which autoconf 2> /dev/null | grep -v '^no autoconf in'`" + AUTOCONF="`type -p autoconf 2> /dev/null`" if test "x$AUTOCONF" = x; then echo echo "Autoconf is not found on the PATH, and AUTOCONF is not set." diff --git a/make/autoconf/util.m4 b/make/autoconf/util.m4 index 3ab9bf5f5f3..0addb9c3a16 100644 --- a/make/autoconf/util.m4 +++ b/make/autoconf/util.m4 @@ -601,7 +601,7 @@ AC_DEFUN([UTIL_REQUIRE_BUILTIN_PROGS], UTIL_SETUP_TOOL($1, [AC_PATH_PROGS($1, $2, , $3)]) if test "x[$]$1" = x; then AC_MSG_NOTICE([Required tool $2 not found in PATH, checking built-in]) - if command -v $2 > /dev/null 2>&1; then + if type -p $2 > /dev/null 2>&1; then AC_MSG_NOTICE([Found $2 as shell built-in. Using it]) $1="$2" else diff --git a/make/autoconf/util_windows.m4 b/make/autoconf/util_windows.m4 index 27854978cc1..868c436bcb3 100644 --- a/make/autoconf/util_windows.m4 +++ b/make/autoconf/util_windows.m4 @@ -242,7 +242,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_CYGWIN], new_path=`$CYGPATH -u "$path"` # Now try to locate executable using which - new_path=`$WHICH "$new_path" 2> /dev/null` + new_path=`type -p "$new_path" 2> /dev/null` # bat and cmd files are not always considered executable in cygwin causing which # to not find them if test "x$new_path" = x \ @@ -258,7 +258,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_CYGWIN], path="$complete" arguments="EOL" new_path=`$CYGPATH -u "$path"` - new_path=`$WHICH "$new_path" 2> /dev/null` + new_path=`type -p "$new_path" 2> /dev/null` # bat and cmd files are not always considered executable in cygwin causing which # to not find them if test "x$new_path" = x \ @@ -324,7 +324,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_MSYS], UTIL_REWRITE_AS_UNIX_PATH(new_path) # Now try to locate executable using which - new_path=`$WHICH "$new_path" 2> /dev/null` + new_path=`type -p "$new_path" 2> /dev/null` if test "x$new_path" = x; then # Oops. Which didn't find the executable. @@ -336,7 +336,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_MSYS], new_path="$path" UTIL_REWRITE_AS_UNIX_PATH(new_path) - new_path=`$WHICH "$new_path" 2> /dev/null` + new_path=`type -p "$new_path" 2> /dev/null` # bat and cmd files are not always considered executable in MSYS causing which # to not find them if test "x$new_path" = x \ @@ -392,7 +392,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_WSL], # Now try to locate executable using which new_path_bak="$new_path" - new_path=`$WHICH "$new_path" 2> /dev/null` + new_path=`type -p "$new_path" 2> /dev/null` # bat and cmd files are not considered executable in WSL if test "x$new_path" = x \ && test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \ @@ -409,7 +409,7 @@ AC_DEFUN([UTIL_FIXUP_EXECUTABLE_WSL], new_path="$path" UTIL_REWRITE_AS_UNIX_PATH([new_path]) new_path_bak="$new_path" - new_path=`$WHICH "$new_path" 2> /dev/null` + new_path=`type -p "$new_path" 2> /dev/null` # bat and cmd files are not considered executable in WSL if test "x$new_path" = x \ && test "x`$ECHO \"$path\" | $GREP -i -e \"\\.bat$\" -e \"\\.cmd$\"`" != x \ diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js index b81d258b752..2786f2f952d 100644 --- a/make/conf/jib-profiles.js +++ b/make/conf/jib-profiles.js @@ -1062,15 +1062,15 @@ var getJibProfilesDependencies = function (input, common) { jcov: { // Until an official build of JCov is available, use custom - // build to support classfile version 57. - // See CODETOOLS-7902358 for more info. + // build to support classfile version 60. + // See CODETOOLS-7902734 for more info. // server: "jpg", // product: "jcov", // version: "3.0", // build_number: "b07", // file: "bundles/jcov-3_0.zip", organization: common.organization, - revision: "3.0-59-support+1.0", + revision: "3.0-60-support+1.0", ext: "zip", environment_name: "JCOV_HOME", }, diff --git a/make/data/cacerts/entrustrootcag4 b/make/data/cacerts/entrustrootcag4 new file mode 100644 index 00000000000..67a1475837b --- /dev/null +++ b/make/data/cacerts/entrustrootcag4 @@ -0,0 +1,43 @@ +Owner: CN=Entrust Root Certification Authority - G4, OU="(c) 2015 Entrust, Inc. - for authorized use only", OU=See www.entrust.net/legal-terms, O="Entrust, Inc.", C=US +Issuer: CN=Entrust Root Certification Authority - G4, OU="(c) 2015 Entrust, Inc. - for authorized use only", OU=See www.entrust.net/legal-terms, O="Entrust, Inc.", C=US +Serial number: d9b5437fafa9390f000000005565ad58 +Valid from: Wed May 27 11:11:16 GMT 2015 until: Sun Dec 27 11:41:16 GMT 2037 +Signature algorithm name: SHA256withRSA +Subject Public Key Algorithm: 4096-bit RSA key +Version: 3 +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw +gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL +Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg +MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw +BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0 +MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1 +c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ +bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ +2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E +T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j +5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM +C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T +DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX +wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A +2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm +nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8 +dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl +N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj +c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS +5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS +Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr +hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/ +B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI +AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw +H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+ +b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk +2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol +IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk +5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY +n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw== +-----END CERTIFICATE----- diff --git a/make/data/cacerts/sslrooteccca b/make/data/cacerts/sslrooteccca new file mode 100644 index 00000000000..9943012ed8e --- /dev/null +++ b/make/data/cacerts/sslrooteccca @@ -0,0 +1,23 @@ +Owner: CN=SSL.com Root Certification Authority ECC, O=SSL Corporation, L=Houston, ST=Texas, C=US +Issuer: CN=SSL.com Root Certification Authority ECC, O=SSL Corporation, L=Houston, ST=Texas, C=US +Serial number: 75e6dfcbc1685ba8 +Valid from: Fri Feb 12 18:14:03 GMT 2016 until: Tue Feb 12 18:14:03 GMT 2041 +Signature algorithm name: SHA256withECDSA +Subject Public Key Algorithm: 384-bit EC key +Version: 3 +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC +VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T +U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz +WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0 +b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS +b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI +7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg +CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD +VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T +kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+ +gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- diff --git a/make/data/cacerts/sslrootevrsaca b/make/data/cacerts/sslrootevrsaca new file mode 100644 index 00000000000..c009aa081e1 --- /dev/null +++ b/make/data/cacerts/sslrootevrsaca @@ -0,0 +1,41 @@ +Owner: CN=SSL.com EV Root Certification Authority RSA R2, O=SSL Corporation, L=Houston, ST=Texas, C=US +Issuer: CN=SSL.com EV Root Certification Authority RSA R2, O=SSL Corporation, L=Houston, ST=Texas, C=US +Serial number: 56b629cd34bc78f6 +Valid from: Wed May 31 18:14:37 GMT 2017 until: Fri May 30 18:14:37 GMT 2042 +Signature algorithm name: SHA256withRSA +Subject Public Key Algorithm: 4096-bit RSA key +Version: 3 +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV +BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE +CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy +MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G +A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD +DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq +M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf +OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa +4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9 +HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR +aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA +b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ +Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV +PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO +pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu +UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY +MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4 +9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW +s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5 +Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg +cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM +79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz +/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt +ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm +Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK +QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ +w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi +S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07 +mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- diff --git a/make/data/cacerts/sslrootrsaca b/make/data/cacerts/sslrootrsaca new file mode 100644 index 00000000000..b7982751e24 --- /dev/null +++ b/make/data/cacerts/sslrootrsaca @@ -0,0 +1,41 @@ +Owner: CN=SSL.com Root Certification Authority RSA, O=SSL Corporation, L=Houston, ST=Texas, C=US +Issuer: CN=SSL.com Root Certification Authority RSA, O=SSL Corporation, L=Houston, ST=Texas, C=US +Serial number: 7b2c9bd316803299 +Valid from: Fri Feb 12 17:39:39 GMT 2016 until: Tue Feb 12 17:39:39 GMT 2041 +Signature algorithm name: SHA256withRSA +Subject Public Key Algorithm: 4096-bit RSA key +Version: 3 +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE +BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK +DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz +OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv +dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN +AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R +xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX +qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC +C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3 +6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh +/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF +YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E +JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc +US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8 +ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm ++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi +M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G +A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV +cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc +Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs +PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/ +q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0 +cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr +a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I +H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y +K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu +nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf +oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY +Ic2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- diff --git a/make/hotspot/symbols/symbols-linux b/make/hotspot/symbols/symbols-linux index 0efd2dba97f..bbb0d35115f 100644 --- a/make/hotspot/symbols/symbols-linux +++ b/make/hotspot/symbols/symbols-linux @@ -22,6 +22,7 @@ # JVM_handle_linux_signal +JVM_IsUseContainerSupport numa_error numa_warn sysThreadAvailableStackWithSlack diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp index 6fc203452d0..a1482bc4d61 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp @@ -1553,7 +1553,7 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) { block_comment(buffer); } #endif - assert(imm64 < (1ul << 48), "48-bit overflow in address constant"); + assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); movz(r, imm64 & 0xffff); imm64 >>= 16; movk(r, imm64 & 0xffff, 16); diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp index 32b9aed8958..6787aeffd18 100644 --- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp @@ -65,7 +65,7 @@ // Table with p(r) polynomial coefficients // and table representation of logarithm values (hi and low parts) -__attribute__ ((aligned(64))) juint _L_tbl[] = +ATTRIBUTE_ALIGNED(64) juint _L_tbl[] = { // coefficients of p(r) polynomial: // _coeff[] diff --git a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp index c2a278d3a7a..b2d0d5dbff8 100644 --- a/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/stubRoutines_aarch64.cpp @@ -62,7 +62,7 @@ bool StubRoutines::aarch64::_completed = false; /** * crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h */ -juint StubRoutines::aarch64::_crc_table[] ATTRIBUTE_ALIGNED(4096) = +ATTRIBUTE_ALIGNED(4096) juint StubRoutines::aarch64::_crc_table[] = { // Table 0 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, @@ -289,11 +289,11 @@ juint StubRoutines::aarch64::_crc_table[] ATTRIBUTE_ALIGNED(4096) = }; // Accumulation coefficients for adler32 upper 16 bits -jubyte StubRoutines::aarch64::_adler_table[] __attribute__ ((aligned(64))) = { +ATTRIBUTE_ALIGNED(64) jubyte StubRoutines::aarch64::_adler_table[] = { 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 }; -juint StubRoutines::aarch64::_npio2_hw[] __attribute__ ((aligned(64))) = { +ATTRIBUTE_ALIGNED(64) juint StubRoutines::aarch64::_npio2_hw[] = { // first, various coefficient values: 0.5, invpio2, pio2_1, pio2_1t, pio2_2, // pio2_2t, pio2_3, pio2_3t // This is a small optimization wich keeping double[8] values in int[] table @@ -325,7 +325,7 @@ juint StubRoutines::aarch64::_npio2_hw[] __attribute__ ((aligned(64))) = { // Coefficients for sin(x) polynomial approximation: S1..S6. // See kernel_sin comments in macroAssembler_aarch64_trig.cpp for details -jdouble StubRoutines::aarch64::_dsin_coef[] __attribute__ ((aligned(64))) = { +ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_dsin_coef[] = { -1.66666666666666324348e-01, // 0xBFC5555555555549 8.33333333332248946124e-03, // 0x3F8111111110F8A6 -1.98412698298579493134e-04, // 0xBF2A01A019C161D5 @@ -336,7 +336,7 @@ jdouble StubRoutines::aarch64::_dsin_coef[] __attribute__ ((aligned(64))) = { // Coefficients for cos(x) polynomial approximation: C1..C6. // See kernel_cos comments in macroAssembler_aarch64_trig.cpp for details -jdouble StubRoutines::aarch64::_dcos_coef[] __attribute__ ((aligned(64))) = { +ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_dcos_coef[] = { 4.16666666666666019037e-02, // c0x3FA555555555554C -1.38888888888741095749e-03, // 0xBF56C16C16C15177 2.48015872894767294178e-05, // 0x3EFA01A019CB1590 @@ -351,7 +351,7 @@ jdouble StubRoutines::aarch64::_dcos_coef[] __attribute__ ((aligned(64))) = { // Converted to double to avoid unnecessary conversion in code // NOTE: table looks like original int table: {0xA2F983, 0x6E4E44,...} with // only (double) conversion added -jdouble StubRoutines::aarch64::_two_over_pi[] __attribute__ ((aligned(64))) = { +ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_two_over_pi[] = { (double)0xA2F983, (double)0x6E4E44, (double)0x1529FC, (double)0x2757D1, (double)0xF534DD, (double)0xC0DB62, (double)0x95993C, (double)0x439041, (double)0xFE5163, (double)0xABDEBB, (double)0xC561B7, (double)0x246E3A, (double)0x424DD2, (double)0xE00649, (double)0x2EEA09, (double)0xD1921C, (double)0xFE1DEB, (double)0x1CB129, @@ -366,7 +366,7 @@ jdouble StubRoutines::aarch64::_two_over_pi[] __attribute__ ((aligned(64))) = { }; // Pi over 2 value -jdouble StubRoutines::aarch64::_pio2[] __attribute__ ((aligned(64))) = { +ATTRIBUTE_ALIGNED(64) jdouble StubRoutines::aarch64::_pio2[] = { 1.57079625129699707031e+00, // 0x3FF921FB40000000 7.54978941586159635335e-08, // 0x3E74442D00000000 5.39030252995776476554e-15, // 0x3CF8469880000000 diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp index b04ff9d261b..2d05442a940 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -429,38 +429,6 @@ class StubGenerator: public StubCodeGenerator { return start; } - - //---------------------------------------------------------------------------------------------------- - // Implementation of int32_t atomic_xchg(int32_t exchange_value, volatile int32_t* dest) - // used by Atomic::xchg(volatile int32_t* dest, int32_t exchange_value) - // - // xchg exists as far back as 8086, lock needed for MP only - // Stack layout immediately after call: - // - // 0 [ret addr ] <--- rsp - // 1 [ ex ] - // 2 [ dest ] - // - // Result: *dest <- ex, return (old *dest) - // - // Note: win32 does not currently use this code - - address generate_atomic_xchg() { - StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); - address start = __ pc(); - - __ push(rdx); - Address exchange(rsp, 2 * wordSize); - Address dest_addr(rsp, 3 * wordSize); - __ movl(rax, exchange); - __ movptr(rdx, dest_addr); - __ xchgl(rax, Address(rdx, 0)); - __ pop(rdx); - __ ret(0); - - return start; - } - //---------------------------------------------------------------------------------------------------- // Support for void verify_mxcsr() // @@ -3797,9 +3765,6 @@ class StubGenerator: public StubCodeGenerator { // is referenced by megamorphic call StubRoutines::_catch_exception_entry = generate_catch_exception(); - // These are currently used by Solaris/Intel - StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); - // platform dependent create_control_words(); diff --git a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp index 19ff2cc9e4e..f42a9c492d4 100644 --- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp +++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp @@ -567,170 +567,6 @@ class StubGenerator: public StubCodeGenerator { return start; } - // Implementation of jint atomic_xchg(jint add_value, volatile jint* dest) - // used by Atomic::xchg(volatile jint* dest, jint exchange_value) - // - // Arguments : - // c_rarg0: exchange_value - // c_rarg0: dest - // - // Result: - // *dest <- ex, return (orig *dest) - address generate_atomic_xchg() { - StubCodeMark mark(this, "StubRoutines", "atomic_xchg"); - address start = __ pc(); - - __ movl(rax, c_rarg0); // Copy to eax we need a return value anyhow - __ xchgl(rax, Address(c_rarg1, 0)); // automatic LOCK - __ ret(0); - - return start; - } - - // Implementation of intptr_t atomic_xchg(jlong add_value, volatile jlong* dest) - // used by Atomic::xchg(volatile jlong* dest, jlong exchange_value) - // - // Arguments : - // c_rarg0: exchange_value - // c_rarg1: dest - // - // Result: - // *dest <- ex, return (orig *dest) - address generate_atomic_xchg_long() { - StubCodeMark mark(this, "StubRoutines", "atomic_xchg_long"); - address start = __ pc(); - - __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow - __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK - __ ret(0); - - return start; - } - - // Support for jint atomic::atomic_cmpxchg(jint exchange_value, volatile jint* dest, - // jint compare_value) - // - // Arguments : - // c_rarg0: exchange_value - // c_rarg1: dest - // c_rarg2: compare_value - // - // Result: - // if ( compare_value == *dest ) { - // *dest = exchange_value - // return compare_value; - // else - // return *dest; - address generate_atomic_cmpxchg() { - StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); - address start = __ pc(); - - __ movl(rax, c_rarg2); - __ lock(); - __ cmpxchgl(c_rarg0, Address(c_rarg1, 0)); - __ ret(0); - - return start; - } - - // Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest, - // int8_t compare_value) - // - // Arguments : - // c_rarg0: exchange_value - // c_rarg1: dest - // c_rarg2: compare_value - // - // Result: - // if ( compare_value == *dest ) { - // *dest = exchange_value - // return compare_value; - // else - // return *dest; - address generate_atomic_cmpxchg_byte() { - StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte"); - address start = __ pc(); - - __ movsbq(rax, c_rarg2); - __ lock(); - __ cmpxchgb(c_rarg0, Address(c_rarg1, 0)); - __ ret(0); - - return start; - } - - // Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value, - // volatile int64_t* dest, - // int64_t compare_value) - // Arguments : - // c_rarg0: exchange_value - // c_rarg1: dest - // c_rarg2: compare_value - // - // Result: - // if ( compare_value == *dest ) { - // *dest = exchange_value - // return compare_value; - // else - // return *dest; - address generate_atomic_cmpxchg_long() { - StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); - address start = __ pc(); - - __ movq(rax, c_rarg2); - __ lock(); - __ cmpxchgq(c_rarg0, Address(c_rarg1, 0)); - __ ret(0); - - return start; - } - - // Implementation of jint atomic_add(jint add_value, volatile jint* dest) - // used by Atomic::add(volatile jint* dest, jint add_value) - // - // Arguments : - // c_rarg0: add_value - // c_rarg1: dest - // - // Result: - // *dest += add_value - // return *dest; - address generate_atomic_add() { - StubCodeMark mark(this, "StubRoutines", "atomic_add"); - address start = __ pc(); - - __ movl(rax, c_rarg0); - __ lock(); - __ xaddl(Address(c_rarg1, 0), c_rarg0); - __ addl(rax, c_rarg0); - __ ret(0); - - return start; - } - - // Implementation of intptr_t atomic_add(intptr_t add_value, volatile intptr_t* dest) - // used by Atomic::add(volatile intptr_t* dest, intptr_t add_value) - // - // Arguments : - // c_rarg0: add_value - // c_rarg1: dest - // - // Result: - // *dest += add_value - // return *dest; - address generate_atomic_add_long() { - StubCodeMark mark(this, "StubRoutines", "atomic_add_long"); - address start = __ pc(); - - __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow - __ lock(); - __ xaddptr(Address(c_rarg1, 0), c_rarg0); - __ addptr(rax, c_rarg0); - __ ret(0); - - return start; - } - // Support for intptr_t OrderAccess::fence() // // Arguments : @@ -6490,13 +6326,6 @@ address generate_avx_ghash_processBlocks() { StubRoutines::_catch_exception_entry = generate_catch_exception(); // atomic calls - StubRoutines::_atomic_xchg_entry = generate_atomic_xchg(); - StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long(); - StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg(); - StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte(); - StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long(); - StubRoutines::_atomic_add_entry = generate_atomic_add(); - StubRoutines::_atomic_add_long_entry = generate_atomic_add_long(); StubRoutines::_fence_entry = generate_orderaccess_fence(); // platform dependent diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index d4f42c8841a..83e27166ad1 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -2380,7 +2380,8 @@ LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptio //----------------------------------------------------------------------------- LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; - DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; + PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord; + DWORD exception_code = exception_record->ExceptionCode; #ifdef _M_AMD64 address pc = (address) exceptionInfo->ContextRecord->Rip; #else @@ -2399,9 +2400,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { // This is safe to do because we have a new/unique ExceptionInformation // code for this condition. if (exception_code == EXCEPTION_ACCESS_VIOLATION) { - PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; - int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; - address addr = (address) exceptionRecord->ExceptionInformation[1]; + int exception_subcode = (int) exception_record->ExceptionInformation[0]; + address addr = (address) exception_record->ExceptionInformation[1]; if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { int page_size = os::vm_page_size(); @@ -2465,7 +2465,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { // Last unguard failed or not unguarding tty->print_raw_cr("Execution protection violation"); - report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, + report_error(t, exception_code, addr, exception_record, exceptionInfo->ContextRecord); return EXCEPTION_CONTINUE_SEARCH; } @@ -2481,14 +2481,14 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { if (t != NULL && t->is_Java_thread()) { JavaThread* thread = (JavaThread*) t; bool in_java = thread->thread_state() == _thread_in_Java; + bool in_native = thread->thread_state() == _thread_in_native; + bool in_vm = thread->thread_state() == _thread_in_vm; // Handle potential stack overflows up front. if (exception_code == EXCEPTION_STACK_OVERFLOW) { if (thread->stack_guards_enabled()) { if (in_java) { frame fr; - PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; - address addr = (address) exceptionRecord->ExceptionInformation[1]; if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { assert(fr.is_java_frame(), "Must be a Java frame"); SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); @@ -2497,7 +2497,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { // Yellow zone violation. The o/s has unprotected the first yellow // zone page for us. Note: must call disable_stack_yellow_zone to // update the enabled status, even if the zone contains only one page. - assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); + assert(!in_vm, "Undersized StackShadowPages"); thread->disable_stack_yellow_reserved_zone(); // If not in java code, return and hope for the best. return in_java @@ -2507,15 +2507,14 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { // Fatal red zone violation. thread->disable_stack_red_zone(); tty->print_raw_cr("An unrecoverable stack overflow has occurred."); - report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, + report_error(t, exception_code, pc, exception_record, exceptionInfo->ContextRecord); return EXCEPTION_CONTINUE_SEARCH; } } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { - // Either stack overflow or null pointer exception. if (in_java) { - PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; - address addr = (address) exceptionRecord->ExceptionInformation[1]; + // Either stack overflow or null pointer exception. + address addr = (address) exception_record->ExceptionInformation[1]; address stack_end = thread->stack_end(); if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { // Stack overflow. @@ -2534,47 +2533,38 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { return Handle_Exception(exceptionInfo, stub); } } - { #ifdef _WIN64 - // If it's a legal stack address map the entire region in - // - PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; - address addr = (address) exceptionRecord->ExceptionInformation[1]; - if (thread->is_in_usable_stack(addr)) { - addr = (address)((uintptr_t)addr & - (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); - os::commit_memory((char *)addr, thread->stack_base() - addr, - !ExecMem); - return EXCEPTION_CONTINUE_EXECUTION; - } else + // If it's a legal stack address map the entire region in + if (thread->is_in_usable_stack(addr)) { + addr = (address)((uintptr_t)addr & + (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); + os::commit_memory((char *)addr, thread->stack_base() - addr, + !ExecMem); + return EXCEPTION_CONTINUE_EXECUTION; + } #endif - { - // Null pointer exception. - if (MacroAssembler::uses_implicit_null_check((void*)addr)) { - address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); - if (stub != NULL) return Handle_Exception(exceptionInfo, stub); - } - report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, - exceptionInfo->ContextRecord); - return EXCEPTION_CONTINUE_SEARCH; - } + // Null pointer exception. + if (MacroAssembler::uses_implicit_null_check((void*)addr)) { + address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); + if (stub != NULL) return Handle_Exception(exceptionInfo, stub); } + report_error(t, exception_code, pc, exception_record, + exceptionInfo->ContextRecord); + return EXCEPTION_CONTINUE_SEARCH; } #ifdef _WIN64 // Special care for fast JNI field accessors. // jni_fast_GetField can trap at certain pc's if a GC kicks // in and the heap gets shrunk before the field access. - if (exception_code == EXCEPTION_ACCESS_VIOLATION) { - address addr = JNI_FastGetField::find_slowcase_pc(pc); - if (addr != (address)-1) { - return Handle_Exception(exceptionInfo, addr); - } + address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc); + if (slowcase_pc != (address)-1) { + return Handle_Exception(exceptionInfo, slowcase_pc); } #endif // Stack overflow or null pointer exception in native code. - report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, + report_error(t, exception_code, pc, exception_record, exceptionInfo->ContextRecord); return EXCEPTION_CONTINUE_SEARCH; } // /EXCEPTION_ACCESS_VIOLATION @@ -2588,11 +2578,8 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; } - bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc); - if (((thread->thread_state() == _thread_in_vm || - thread->thread_state() == _thread_in_native || - is_unsafe_arraycopy) && - thread->doing_unsafe_access()) || + bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc); + if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) || (nm != NULL && nm->has_unsafe_access())) { address next_pc = Assembler::locate_next_instruction(pc); if (is_unsafe_arraycopy) { @@ -2612,16 +2599,14 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { } // switch } - if (((thread->thread_state() == _thread_in_Java) || - (thread->thread_state() == _thread_in_native)) && - exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { + if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { LONG result=Handle_FLT_Exception(exceptionInfo); if (result==EXCEPTION_CONTINUE_EXECUTION) return result; } } if (exception_code != EXCEPTION_BREAKPOINT) { - report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, + report_error(t, exception_code, pc, exception_record, exceptionInfo->ContextRecord); } return EXCEPTION_CONTINUE_SEARCH; @@ -5002,7 +4987,7 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); - if (hFile == NULL) { + if (hFile == INVALID_HANDLE_VALUE) { log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); return NULL; } diff --git a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp index d428724de41..2f46c0cd89d 100644 --- a/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/atomic_windows_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #ifndef OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP #define OS_CPU_WINDOWS_X86_ATOMIC_WINDOWS_X86_HPP +#include #include "runtime/os.hpp" // Note that in MSVC, volatile memory accesses are explicitly @@ -38,21 +39,6 @@ template<> inline void ScopedFence::prefix() { } template<> inline void ScopedFence::prefix() { } template<> inline void ScopedFence::postfix() { OrderAccess::fence(); } -// The following alternative implementations are needed because -// Windows 95 doesn't support (some of) the corresponding Windows NT -// calls. Furthermore, these versions allow inlining in the caller. -// (More precisely: The documentation for InterlockedExchange says -// it is supported for Windows 95. However, when single-stepping -// through the assembly code we cannot step into the routine and -// when looking at the routine address we see only garbage code. -// Better safe then sorry!). Was bug 7/31/98 (gri). -// -// Performance note: On uniprocessors, the 'lock' prefixes are not -// necessary (and expensive). We should generate separate cases if -// this becomes a performance problem. - -#pragma warning(disable: 4035) // Disables warnings reporting missing return statement - template struct Atomic::PlatformAdd { template @@ -64,140 +50,70 @@ struct Atomic::PlatformAdd { } }; -#ifdef AMD64 -template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { - return add_using_helper(os::atomic_add_func, dest, add_value); -} - -template<> -template -inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { - return add_using_helper(os::atomic_add_long_func, dest, add_value); -} - -#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \ - template<> \ - template \ - inline T Atomic::PlatformXchg::operator()(T volatile* dest, \ - T exchange_value, \ - atomic_memory_order order) const { \ - STATIC_ASSERT(ByteSize == sizeof(T)); \ - return xchg_using_helper(StubName, dest, exchange_value); \ +// The Interlocked* APIs only take long and will not accept __int32. That is +// acceptable on Windows, since long is a 32-bits integer type. + +#define DEFINE_INTRINSIC_ADD(IntrinsicName, IntrinsicType) \ + template<> \ + template \ + inline D Atomic::PlatformAdd::add_and_fetch(D volatile* dest, \ + I add_value, \ + atomic_memory_order order) const { \ + STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(D)); \ + return PrimitiveConversions::cast( \ + IntrinsicName(reinterpret_cast(dest), \ + PrimitiveConversions::cast(add_value))); \ } -DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func) -DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func) - -#undef DEFINE_STUB_XCHG - -#define DEFINE_STUB_CMPXCHG(ByteSize, StubType, StubName) \ - template<> \ - template \ - inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, \ - T compare_value, \ - T exchange_value, \ - atomic_memory_order order) const { \ - STATIC_ASSERT(ByteSize == sizeof(T)); \ - return cmpxchg_using_helper(StubName, dest, compare_value, exchange_value); \ +DEFINE_INTRINSIC_ADD(InterlockedAdd, long) +DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64) + +#undef DEFINE_INTRINSIC_ADD + +#define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \ + template<> \ + template \ + inline T Atomic::PlatformXchg::operator()(T volatile* dest, \ + T exchange_value, \ + atomic_memory_order order) const { \ + STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ + return PrimitiveConversions::cast( \ + IntrinsicName(reinterpret_cast(dest), \ + PrimitiveConversions::cast(exchange_value))); \ } -DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func) -DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func) -DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func) - -#undef DEFINE_STUB_CMPXCHG - -#else // !AMD64 - -template<> -template -inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, - atomic_memory_order order) const { - STATIC_ASSERT(4 == sizeof(I)); - STATIC_ASSERT(4 == sizeof(D)); - __asm { - mov edx, dest; - mov eax, add_value; - mov ecx, eax; - lock xadd dword ptr [edx], eax; - add eax, ecx; +DEFINE_INTRINSIC_XCHG(InterlockedExchange, long) +DEFINE_INTRINSIC_XCHG(InterlockedExchange64, __int64) + +#undef DEFINE_INTRINSIC_XCHG + +// Note: the order of the parameters is different between +// Atomic::PlatformCmpxchg<*>::operator() and the +// InterlockedCompareExchange* API. + +#define DEFINE_INTRINSIC_CMPXCHG(IntrinsicName, IntrinsicType) \ + template<> \ + template \ + inline T Atomic::PlatformCmpxchg::operator()(T volatile* dest, \ + T compare_value, \ + T exchange_value, \ + atomic_memory_order order) const { \ + STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ + return PrimitiveConversions::cast( \ + IntrinsicName(reinterpret_cast(dest), \ + PrimitiveConversions::cast(exchange_value), \ + PrimitiveConversions::cast(compare_value))); \ } -} -template<> -template -inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, - T exchange_value, - atomic_memory_order order) const { - STATIC_ASSERT(4 == sizeof(T)); - // alternative for InterlockedExchange - __asm { - mov eax, exchange_value; - mov ecx, dest; - xchg eax, dword ptr [ecx]; - } -} +DEFINE_INTRINSIC_CMPXCHG(_InterlockedCompareExchange8, char) // Use the intrinsic as InterlockedCompareExchange8 does not exist +DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange, long) +DEFINE_INTRINSIC_CMPXCHG(InterlockedCompareExchange64, __int64) -template<> -template -inline T Atomic::PlatformCmpxchg<1>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { - STATIC_ASSERT(1 == sizeof(T)); - // alternative for InterlockedCompareExchange - __asm { - mov edx, dest - mov cl, exchange_value - mov al, compare_value - lock cmpxchg byte ptr [edx], cl - } -} +#undef DEFINE_INTRINSIC_CMPXCHG -template<> -template -inline T Atomic::PlatformCmpxchg<4>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { - STATIC_ASSERT(4 == sizeof(T)); - // alternative for InterlockedCompareExchange - __asm { - mov edx, dest - mov ecx, exchange_value - mov eax, compare_value - lock cmpxchg dword ptr [edx], ecx - } -} +#ifndef AMD64 -template<> -template -inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, - T compare_value, - T exchange_value, - atomic_memory_order order) const { - STATIC_ASSERT(8 == sizeof(T)); - int32_t ex_lo = (int32_t)exchange_value; - int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 ); - int32_t cmp_lo = (int32_t)compare_value; - int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 ); - __asm { - push ebx - push edi - mov eax, cmp_lo - mov edx, cmp_hi - mov edi, dest - mov ebx, ex_lo - mov ecx, ex_hi - lock cmpxchg8b qword ptr [edi] - pop edi - pop ebx - } -} +#pragma warning(disable: 4035) // Disables warnings reporting missing return statement template<> template @@ -228,11 +144,8 @@ inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, } } -#endif // AMD64 - #pragma warning(default: 4035) // Enables warnings reporting missing return statement -#ifndef AMD64 template<> struct Atomic::PlatformOrderedStore<1, RELEASE_X_FENCE> { diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp index 576ba18799a..3d7f2401c72 100644 --- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp +++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp @@ -211,138 +211,6 @@ bool os::register_code_area(char *low, char *high) { return true; } -// Atomics and Stub Functions - -typedef int32_t xchg_func_t (int32_t, volatile int32_t*); -typedef int64_t xchg_long_func_t (int64_t, volatile int64_t*); -typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t); -typedef int8_t cmpxchg_byte_func_t (int8_t, volatile int8_t*, int8_t); -typedef int64_t cmpxchg_long_func_t (int64_t, volatile int64_t*, int64_t); -typedef int32_t add_func_t (int32_t, volatile int32_t*); -typedef int64_t add_long_func_t (int64_t, volatile int64_t*); - -#ifdef AMD64 - -int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) { - // try to use the stub: - xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry()); - - if (func != NULL) { - os::atomic_xchg_func = func; - return (*func)(exchange_value, dest); - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - int32_t old_value = *dest; - *dest = exchange_value; - return old_value; -} - -int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) { - // try to use the stub: - xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry()); - - if (func != NULL) { - os::atomic_xchg_long_func = func; - return (*func)(exchange_value, dest); - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - int64_t old_value = *dest; - *dest = exchange_value; - return old_value; -} - - -int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) { - // try to use the stub: - cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry()); - - if (func != NULL) { - os::atomic_cmpxchg_func = func; - return (*func)(exchange_value, dest, compare_value); - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - int32_t old_value = *dest; - if (old_value == compare_value) - *dest = exchange_value; - return old_value; -} - -int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) { - // try to use the stub: - cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry()); - - if (func != NULL) { - os::atomic_cmpxchg_byte_func = func; - return (*func)(exchange_value, dest, compare_value); - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - int8_t old_value = *dest; - if (old_value == compare_value) - *dest = exchange_value; - return old_value; -} - -#endif // AMD64 - -int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) { - // try to use the stub: - cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry()); - - if (func != NULL) { - os::atomic_cmpxchg_long_func = func; - return (*func)(exchange_value, dest, compare_value); - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - int64_t old_value = *dest; - if (old_value == compare_value) - *dest = exchange_value; - return old_value; -} - -#ifdef AMD64 - -int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) { - // try to use the stub: - add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry()); - - if (func != NULL) { - os::atomic_add_func = func; - return (*func)(add_value, dest); - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - return (*dest) += add_value; -} - -int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) { - // try to use the stub: - add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry()); - - if (func != NULL) { - os::atomic_add_long_func = func; - return (*func)(add_value, dest); - } - assert(Threads::number_of_threads() == 0, "for bootstrap only"); - - return (*dest) += add_value; -} - -xchg_func_t* os::atomic_xchg_func = os::atomic_xchg_bootstrap; -xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstrap; -cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap; -cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap; -add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; -add_long_func_t* os::atomic_add_long_func = os::atomic_add_long_bootstrap; - -#endif // AMD64 - -cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; - #ifdef AMD64 /* * Windows/x64 does not use stack frames the way expected by Java: diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp index 3ced4dca668..7fdd068219c 100644 --- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,34 +28,6 @@ // // NOTE: we are back in class os here, not win32 // -#ifdef AMD64 - static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*); - static int64_t (*atomic_xchg_long_func) (int64_t, volatile int64_t*); - - static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t); - static int8_t (*atomic_cmpxchg_byte_func) (int8_t, volatile int8_t*, int8_t); - static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t); - - static int32_t (*atomic_add_func) (int32_t, volatile int32_t*); - static int64_t (*atomic_add_long_func) (int64_t, volatile int64_t*); - - static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*); - static int64_t atomic_xchg_long_bootstrap (int64_t, volatile int64_t*); - - static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t); - static int8_t atomic_cmpxchg_byte_bootstrap(int8_t, volatile int8_t*, int8_t); -#else - - static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t); - -#endif // AMD64 - - static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t); - -#ifdef AMD64 - static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*); - static int64_t atomic_add_long_bootstrap (int64_t, volatile int64_t*); -#endif // AMD64 static void setup_fpu(); static bool supports_sse() { return true; } diff --git a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp index 7d9048c57c6..a1f0ff39150 100644 --- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp +++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp @@ -63,33 +63,22 @@ bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) // we try to glean some information out of the CONTEXT // if we were running Java code when SIGPROF came in. if (isInJava) { - CONTEXT* uc = (CONTEXT*)ucontext; - -#ifdef AMD64 - intptr_t* ret_fp = (intptr_t*) uc->Rbp; - intptr_t* ret_sp = (intptr_t*) uc->Rsp; - address addr = (address)uc->Rip; -#else - intptr_t* ret_fp = (intptr_t*) uc->Ebp; - intptr_t* ret_sp = (intptr_t*) uc->Esp; - address addr = (address)uc->Eip; -#endif // AMD64 - if (addr == NULL || ret_sp == NULL ) { + frame ret_frame = os::fetch_frame_from_context(ucontext); + if (ret_frame.pc() == NULL || ret_frame.sp() == NULL ) { // CONTEXT wasn't useful return false; } - if (MetaspaceShared::is_in_trampoline_frame(addr)) { + if (MetaspaceShared::is_in_trampoline_frame(ret_frame.pc())) { // In the middle of a trampoline call. Bail out for safety. // This happens rarely so shouldn't affect profiling. return false; } - frame ret_frame(ret_sp, ret_fp, addr); if (!ret_frame.safe_for_sender(jt)) { #if COMPILER2_OR_JVMCI // C2 and JVMCI use ebp as a general register see if NULL fp helps - frame ret_frame2(ret_sp, NULL, addr); + frame ret_frame2(ret_frame.sp(), NULL, ret_frame.pc()); if (!ret_frame2.safe_for_sender(jt)) { // nothing else to try if the frame isn't good return false; diff --git a/src/hotspot/share/ci/ciReplay.cpp b/src/hotspot/share/ci/ciReplay.cpp index d4e57426918..4a4af56cd25 100644 --- a/src/hotspot/share/ci/ciReplay.cpp +++ b/src/hotspot/share/ci/ciReplay.cpp @@ -1219,8 +1219,8 @@ void* ciReplay::load_inline_data(ciMethod* method, int entry_bci, int comp_level } int ciReplay::replay_impl(TRAPS) { - HandleMark hm; - ResourceMark rm; + HandleMark hm(THREAD); + ResourceMark rm(THREAD); if (ReplaySuppressInitializers > 2) { // ReplaySuppressInitializers > 2 means that we want to allow diff --git a/src/hotspot/share/classfile/classLoaderDataGraph.cpp b/src/hotspot/share/classfile/classLoaderDataGraph.cpp index 9b104d9ba4d..a10a073413b 100644 --- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp @@ -306,14 +306,14 @@ LockedClassesDo::~LockedClassesDo() { // unloading can remove entries concurrently soon. class ClassLoaderDataGraphIterator : public StackObj { ClassLoaderData* _next; + Thread* _thread; HandleMark _hm; // clean up handles when this is done. Handle _holder; - Thread* _thread; NoSafepointVerifier _nsv; // No safepoints allowed in this scope // unless verifying at a safepoint. public: - ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head) { + ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head), _thread(Thread::current()), _hm(_thread) { _thread = Thread::current(); assert_locked_or_safepoint(ClassLoaderDataGraph_lock); } diff --git a/src/hotspot/share/classfile/klassFactory.cpp b/src/hotspot/share/classfile/klassFactory.cpp index 8efe2b0ca0c..111aa8a6309 100644 --- a/src/hotspot/share/classfile/klassFactory.cpp +++ b/src/hotspot/share/classfile/klassFactory.cpp @@ -173,8 +173,8 @@ InstanceKlass* KlassFactory::create_from_stream(ClassFileStream* stream, assert(loader_data != NULL, "invariant"); assert(THREAD->is_Java_thread(), "must be a JavaThread"); - ResourceMark rm; - HandleMark hm; + ResourceMark rm(THREAD); + HandleMark hm(THREAD); JvmtiCachedClassFileData* cached_class_file = NULL; diff --git a/src/hotspot/share/classfile/systemDictionaryShared.cpp b/src/hotspot/share/classfile/systemDictionaryShared.cpp index 7bc73067d81..e0d11f8ee23 100644 --- a/src/hotspot/share/classfile/systemDictionaryShared.cpp +++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp @@ -1773,7 +1773,7 @@ bool SystemDictionaryShared::check_linking_constraints(InstanceKlass* klass, TRA RunTimeSharedClassInfo* info = RunTimeSharedClassInfo::get_for(klass); assert(info != NULL, "Sanity"); if (info->_num_loader_constraints > 0) { - HandleMark hm; + HandleMark hm(THREAD); for (int i = 0; i < info->_num_loader_constraints; i++) { RunTimeSharedClassInfo::RTLoaderConstraint* lc = info->loader_constraint_at(i); Symbol* name = lc->constraint_name(); diff --git a/src/hotspot/share/code/nmethod.cpp b/src/hotspot/share/code/nmethod.cpp index 4b29eb48558..37580aec37c 100644 --- a/src/hotspot/share/code/nmethod.cpp +++ b/src/hotspot/share/code/nmethod.cpp @@ -882,7 +882,6 @@ void nmethod::log_identity(xmlStream* log) const { void nmethod::log_new_nmethod() const { if (LogCompilation && xtty != NULL) { ttyLocker ttyl; - HandleMark hm; xtty->begin_elem("nmethod"); log_identity(xtty); xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", p2i(code_begin()), size()); @@ -952,7 +951,6 @@ void nmethod::print_nmethod(bool printmethod) { // Print the header part, then print the requested information. // This is both handled in decode2(). if (printmethod) { - HandleMark hm; ResourceMark m; if (is_compiled_by_c1()) { tty->cr(); @@ -2429,6 +2427,7 @@ void nmethod::verify() { void nmethod::verify_interrupt_point(address call_site) { + // Verify IC only when nmethod installation is finished. if (!is_not_installed()) { if (CompiledICLocker::is_safe(this)) { @@ -2439,6 +2438,8 @@ void nmethod::verify_interrupt_point(address call_site) { } } + HandleMark hm(Thread::current()); + PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); assert(pd != NULL, "PcDesc must exist"); for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), @@ -2578,7 +2579,6 @@ void nmethod::print(outputStream* st) const { } void nmethod::print_code() { - HandleMark hm; ResourceMark m; ttyLocker ttyl; // Call the specialized decode method of this class. @@ -2608,7 +2608,6 @@ void nmethod::print_dependencies() { // Print the oops from the underlying CodeBlob. void nmethod::print_oops(outputStream* st) { - HandleMark hm; ResourceMark m; st->print("Oops:"); if (oops_begin() < oops_end()) { @@ -2634,7 +2633,6 @@ void nmethod::print_oops(outputStream* st) { // Print metadata pool. void nmethod::print_metadata(outputStream* st) { - HandleMark hm; ResourceMark m; st->print("Metadata:"); if (metadata_begin() < metadata_end()) { diff --git a/src/hotspot/share/code/scopeDesc.cpp b/src/hotspot/share/code/scopeDesc.cpp index 19b6d040ab1..4d2c5284950 100644 --- a/src/hotspot/share/code/scopeDesc.cpp +++ b/src/hotspot/share/code/scopeDesc.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -256,7 +256,9 @@ void ScopeDesc::print_on(outputStream* st, PcDesc* pd) const { #endif void ScopeDesc::verify() { - ResourceMark rm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); guarantee(method()->is_method(), "type check"); // check if we have any illegal elements on the expression stack diff --git a/src/hotspot/share/code/vtableStubs.cpp b/src/hotspot/share/code/vtableStubs.cpp index fd862e0d779..546bc799d88 100644 --- a/src/hotspot/share/code/vtableStubs.cpp +++ b/src/hotspot/share/code/vtableStubs.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -323,7 +323,6 @@ void VtableStubs::vtable_stub_do(void f(VtableStub*)) { extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int index) { ResourceMark rm; - HandleMark hm; Klass* klass = receiver->klass(); InstanceKlass* ik = InstanceKlass::cast(klass); klassVtable vt = ik->vtable(); diff --git a/src/hotspot/share/compiler/abstractCompiler.hpp b/src/hotspot/share/compiler/abstractCompiler.hpp index 922121ec29e..f00863e9b2a 100644 --- a/src/hotspot/share/compiler/abstractCompiler.hpp +++ b/src/hotspot/share/compiler/abstractCompiler.hpp @@ -31,7 +31,6 @@ typedef void (*initializer)(void); -#if INCLUDE_JVMCI // Per-compiler statistics class CompilerStatistics { friend class VMStructs; @@ -58,17 +57,20 @@ class CompilerStatistics { Data _osr; // stats for OSR compilations int _nmethods_size; // int _nmethods_code_size; - int bytes_per_second() { + + double total_time() { return _standard._time.seconds() + _osr._time.seconds(); } + + double bytes_per_second() { int bytes = _standard._bytes + _osr._bytes; if (bytes == 0) { - return 0; + return 0.0; } - double seconds = _standard._time.seconds() + _osr._time.seconds(); - return seconds == 0.0 ? 0 : (int) (bytes / seconds); + double seconds = total_time(); + return seconds == 0.0 ? 0.0 : (bytes / seconds); } + CompilerStatistics() : _nmethods_size(0), _nmethods_code_size(0) {} }; -#endif // INCLUDE_JVMCI class AbstractCompiler : public CHeapObj { private: @@ -86,9 +88,7 @@ class AbstractCompiler : public CHeapObj { private: const CompilerType _type; -#if INCLUDE_JVMCI CompilerStatistics _stats; -#endif public: AbstractCompiler(CompilerType type) : _num_compiler_threads(0), _compiler_state(uninitialized), _type(type) {} @@ -176,9 +176,7 @@ class AbstractCompiler : public CHeapObj { ShouldNotReachHere(); } -#if INCLUDE_JVMCI CompilerStatistics* stats() { return &_stats; } -#endif }; #endif // SHARE_COMPILER_ABSTRACTCOMPILER_HPP diff --git a/src/hotspot/share/compiler/compileBroker.cpp b/src/hotspot/share/compiler/compileBroker.cpp index 1546fc1282e..992c37088b0 100644 --- a/src/hotspot/share/compiler/compileBroker.cpp +++ b/src/hotspot/share/compiler/compileBroker.cpp @@ -181,6 +181,8 @@ int CompileBroker::_sum_nmethod_code_size = 0; long CompileBroker::_peak_compilation_time = 0; +CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; + CompileQueue* CompileBroker::_c2_compile_queue = NULL; CompileQueue* CompileBroker::_c1_compile_queue = NULL; @@ -1599,6 +1601,7 @@ static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10; * @return true if this thread needs to free/recycle the task */ bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) { + assert(UseJVMCICompiler, "sanity"); MonitorLocker ml(thread, task->lock()); int progress_wait_attempts = 0; int methods_compiled = jvmci->methods_compiled(); @@ -2458,6 +2461,7 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time methodHandle method (thread, task->method()); uint compile_id = task->compile_id(); bool is_osr = (task->osr_bci() != standard_entry_bci); + const int comp_level = task->comp_level(); nmethod* code = task->code(); CompilerCounters* counters = thread->counters(); @@ -2506,25 +2510,34 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); } -#if INCLUDE_JVMCI - AbstractCompiler* comp = compiler(task->comp_level()); + // Collect statistic per compilation level + if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) { + CompilerStatistics* stats = &_stats_per_level[comp_level-1]; + if (is_osr) { + stats->_osr.update(time, bytes_compiled); + } else { + stats->_standard.update(time, bytes_compiled); + } + stats->_nmethods_size += code->total_size(); + stats->_nmethods_code_size += code->insts_size(); + } else { + assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); + } + + // Collect statistic per compiler + AbstractCompiler* comp = compiler(comp_level); if (comp) { CompilerStatistics* stats = comp->stats(); - if (stats) { - if (is_osr) { - stats->_osr.update(time, bytes_compiled); - } else { - stats->_standard.update(time, bytes_compiled); - } - stats->_nmethods_size += code->total_size(); - stats->_nmethods_code_size += code->insts_size(); - } else { // if (!stats) - assert(false, "Compiler statistics object must exist"); + if (is_osr) { + stats->_osr.update(time, bytes_compiled); + } else { + stats->_standard.update(time, bytes_compiled); } + stats->_nmethods_size += code->total_size(); + stats->_nmethods_code_size += code->insts_size(); } else { // if (!comp) assert(false, "Compiler object must exist"); } -#endif // INCLUDE_JVMCI } if (UsePerfData) { @@ -2543,9 +2556,10 @@ void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time } if (CITimeEach) { - float bytes_per_sec = 1.0 * (method->code_size() + task->num_inlined_bytecodes()) / time.seconds(); - tty->print_cr("%3d seconds: %f bytes/sec : %f (bytes %d + %d inlined)", - compile_id, time.seconds(), bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); + double compile_time = time.seconds(); + double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time; + tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)", + compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); } // Collect counts of successful compilations @@ -2580,81 +2594,53 @@ const char* CompileBroker::compiler_name(int comp_level) { } } -#if INCLUDE_JVMCI -void CompileBroker::print_times(AbstractCompiler* comp) { - CompilerStatistics* stats = comp->stats(); - if (stats) { - tty->print_cr(" %s {speed: %d bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}", - comp->name(), stats->bytes_per_second(), +void CompileBroker::print_times(const char* name, CompilerStatistics* stats) { + tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %d bytes, %d methods; osr: %6.3f s, %d bytes, %d methods; nmethods_size: %d bytes; nmethods_code_size: %d bytes}", + name, stats->bytes_per_second(), stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, stats->_nmethods_size, stats->_nmethods_code_size); - } else { // if (!stats) - assert(false, "Compiler statistics object must exist"); - } - comp->print_timers(); } -#endif // INCLUDE_JVMCI void CompileBroker::print_times(bool per_compiler, bool aggregate) { -#if INCLUDE_JVMCI - elapsedTimer standard_compilation; - elapsedTimer total_compilation; - elapsedTimer osr_compilation; - - int standard_bytes_compiled = 0; - int osr_bytes_compiled = 0; - - int standard_compile_count = 0; - int osr_compile_count = 0; - int total_compile_count = 0; - - int nmethods_size = 0; - int nmethods_code_size = 0; - bool printedHeader = false; - - for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { - AbstractCompiler* comp = _compilers[i]; - if (comp != NULL) { - if (per_compiler && aggregate && !printedHeader) { - printedHeader = true; - tty->cr(); - tty->print_cr("Individual compiler times (for compiled methods only)"); - tty->print_cr("------------------------------------------------"); - tty->cr(); - } - CompilerStatistics* stats = comp->stats(); - - if (stats) { - standard_compilation.add(stats->_standard._time); - osr_compilation.add(stats->_osr._time); - - standard_bytes_compiled += stats->_standard._bytes; - osr_bytes_compiled += stats->_osr._bytes; - - standard_compile_count += stats->_standard._count; - osr_compile_count += stats->_osr._count; - - nmethods_size += stats->_nmethods_size; - nmethods_code_size += stats->_nmethods_code_size; - } else { // if (!stats) - assert(false, "Compiler statistics object must exist"); - } - - if (per_compiler) { - print_times(comp); + if (per_compiler) { + if (aggregate) { + tty->cr(); + tty->print_cr("Individual compiler times (for compiled methods only)"); + tty->print_cr("------------------------------------------------"); + tty->cr(); + } + for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { + AbstractCompiler* comp = _compilers[i]; + if (comp != NULL) { + print_times(comp->name(), comp->stats()); } } + if (aggregate) { + tty->cr(); + tty->print_cr("Individual compilation Tier times (for compiled methods only)"); + tty->print_cr("------------------------------------------------"); + tty->cr(); + } + char tier_name[256]; + for (int tier = CompLevel_simple; tier <= CompLevel_highest_tier; tier++) { + CompilerStatistics* stats = &_stats_per_level[tier-1]; + sprintf(tier_name, "Tier%d", tier); + print_times(tier_name, stats); + } } - total_compile_count = osr_compile_count + standard_compile_count; - total_compilation.add(osr_compilation); - total_compilation.add(standard_compilation); +#if INCLUDE_JVMCI // In hosted mode, print the JVMCI compiler specific counters manually. - if (!UseJVMCICompiler) { + if (EnableJVMCI && !UseJVMCICompiler) { JVMCICompiler::print_compilation_timers(); } -#else // INCLUDE_JVMCI +#endif + + if (!aggregate) { + return; + } + elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; elapsedTimer total_compilation = CompileBroker::_t_total_compilation; @@ -2665,14 +2651,12 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) { int standard_compile_count = CompileBroker::_total_standard_compile_count; int osr_compile_count = CompileBroker::_total_osr_compile_count; int total_compile_count = CompileBroker::_total_compile_count; + int total_bailout_count = CompileBroker::_total_bailout_count; + int total_invalidated_count = CompileBroker::_total_invalidated_count; int nmethods_size = CompileBroker::_sum_nmethod_code_size; int nmethods_code_size = CompileBroker::_sum_nmethod_size; -#endif // INCLUDE_JVMCI - if (!aggregate) { - return; - } tty->cr(); tty->print_cr("Accumulated compiler times"); tty->print_cr("----------------------------------------------------------"); @@ -2681,16 +2665,16 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) { tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds()); tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s", standard_compilation.seconds(), - standard_compilation.seconds() / standard_compile_count); + standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count); tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s", CompileBroker::_t_bailedout_compilation.seconds(), - CompileBroker::_t_bailedout_compilation.seconds() / CompileBroker::_total_bailout_count); + total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count); tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s", osr_compilation.seconds(), - osr_compilation.seconds() / osr_compile_count); + osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count); tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s", CompileBroker::_t_invalidated_compilation.seconds(), - CompileBroker::_t_invalidated_compilation.seconds() / CompileBroker::_total_invalidated_count); + total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count); AbstractCompiler *comp = compiler(CompLevel_simple); if (comp != NULL) { diff --git a/src/hotspot/share/compiler/compileBroker.hpp b/src/hotspot/share/compiler/compileBroker.hpp index ca4d5ada926..b3be2ec649b 100644 --- a/src/hotspot/share/compiler/compileBroker.hpp +++ b/src/hotspot/share/compiler/compileBroker.hpp @@ -224,6 +224,8 @@ class CompileBroker: AllStatic { static int _sum_nmethod_code_size; static long _peak_compilation_time; + static CompilerStatistics _stats_per_level[]; + static volatile int _print_compilation_warning; static Handle create_thread_oop(const char* name, TRAPS); @@ -371,10 +373,8 @@ class CompileBroker: AllStatic { // Redefine Classes support static void mark_on_stack(); -#if INCLUDE_JVMCI // Print curent compilation time stats for a given compiler - static void print_times(AbstractCompiler* comp); -#endif + static void print_times(const char* name, CompilerStatistics* stats); // Print a detailed accounting of compilation time static void print_times(bool per_compiler = true, bool aggregate = true); diff --git a/src/hotspot/share/compiler/compileTask.hpp b/src/hotspot/share/compiler/compileTask.hpp index aca640a8e71..fd8c4603329 100644 --- a/src/hotspot/share/compiler/compileTask.hpp +++ b/src/hotspot/share/compiler/compileTask.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,15 +42,12 @@ class CompileTask : public CHeapObj { public: // Different reasons for a compilation - // The order is important - Reason_Whitebox and higher can not become - // stale, see CompileTask::can_become_stale() - // Also mapped to reason_names[] + // The order is important - mapped to reason_names[] enum CompileReason { Reason_None, Reason_InvocationCount, // Simple/StackWalk-policy Reason_BackedgeCount, // Simple/StackWalk-policy Reason_Tiered, // Tiered-policy - Reason_CTW, // Compile the world Reason_Replay, // ciReplay Reason_Whitebox, // Whitebox API Reason_MustBeCompiled, // Used for -Xcomp or AlwaysCompileLoopMethods (see CompilationPolicy::must_be_compiled()) @@ -64,7 +61,6 @@ class CompileTask : public CHeapObj { "count", "backedge_count", "tiered", - "CTW", "replay", "whitebox", "must_be_compiled", @@ -137,7 +133,6 @@ class CompileTask : public CHeapObj { bool should_wait_for_compilation() const { // Wait for blocking compilation to finish. switch (_compile_reason) { - case Reason_CTW: case Reason_Replay: case Reason_Whitebox: case Reason_Bootstrap: diff --git a/src/hotspot/share/compiler/compilerDirectives.cpp b/src/hotspot/share/compiler/compilerDirectives.cpp index e74d54b6a28..b940a7e8e36 100644 --- a/src/hotspot/share/compiler/compilerDirectives.cpp +++ b/src/hotspot/share/compiler/compilerDirectives.cpp @@ -274,6 +274,49 @@ DirectiveSet::~DirectiveSet() { } } +// A smart pointer of DirectiveSet. It uses Copy-on-Write strategy to avoid cloning. +// It provides 2 accesses of the underlying raw pointer. +// 1) operator->() returns a pointer to a constant DirectiveSet. It's read-only. +// 2) cloned() returns a pointer that points to the cloned DirectiveSet. +// Users should only use cloned() when they need to update DirectiveSet. +// +// In the end, users need invoke commit() to finalize the pending changes. +// If cloning happens, the smart pointer will return the new pointer after releasing the original +// one on DirectivesStack. If cloning doesn't happen, it returns the original intact pointer. +class DirectiveSetPtr { + private: + DirectiveSet* _origin; + DirectiveSet* _clone; + NONCOPYABLE(DirectiveSetPtr); + + public: + DirectiveSetPtr(DirectiveSet* origin): _origin(origin), _clone(nullptr) { + assert(origin != nullptr, "DirectiveSetPtr cannot be initialized with a NULL pointer."); + } + + DirectiveSet const* operator->() { + return (_clone == nullptr) ? _origin : _clone; + } + + DirectiveSet* cloned() { + if (_clone == nullptr) { + _clone = DirectiveSet::clone(_origin); + } + return _clone; + } + + DirectiveSet* commit() { + if (_clone != nullptr) { + // We are returning a (parentless) copy. The originals parent don't need to account for this. + DirectivesStack::release(_origin); + _origin = _clone; + _clone = nullptr; + } + + return _origin; + } +}; + // Backward compatibility for CompileCommands // Breaks the abstraction and causes lots of extra complexity // - if some option is changed we need to copy directiveset since it no longer can be shared @@ -285,46 +328,39 @@ DirectiveSet* DirectiveSet::compilecommand_compatibility_init(const methodHandle // Only set a flag if it has not been modified and value changes. // Only copy set if a flag needs to be set if (!CompilerDirectivesIgnoreCompileCommandsOption && CompilerOracle::has_any_option()) { - DirectiveSet* set = DirectiveSet::clone(this); - - bool changed = false; // Track if we actually change anything + DirectiveSetPtr set(this); // All CompileCommands are not equal so this gets a bit verbose // When CompileCommands have been refactored less clutter will remain. if (CompilerOracle::should_break_at(method)) { if (!_modified[BreakAtCompileIndex]) { - set->BreakAtCompileOption = true; - changed = true; + set.cloned()->BreakAtCompileOption = true; } if (!_modified[BreakAtExecuteIndex]) { - set->BreakAtExecuteOption = true; - changed = true; + set.cloned()->BreakAtExecuteOption = true; } } if (!_modified[LogIndex]) { bool log = CompilerOracle::should_log(method); if (log != set->LogOption) { - set->LogOption = log; - changed = true; + set.cloned()->LogOption = log; } } if (CompilerOracle::should_print(method)) { if (!_modified[PrintAssemblyIndex]) { - set->PrintAssemblyOption = true; - changed = true; + set.cloned()->PrintAssemblyOption = true; } } // Exclude as in should not compile == Enabled if (CompilerOracle::should_exclude(method)) { if (!_modified[ExcludeIndex]) { - set->ExcludeOption = true; - changed = true; + set.cloned()->ExcludeOption = true; } } // inline and dontinline (including exclude) are implemented in the directiveset accessors -#define init_default_cc(name, type, dvalue, cc_flag) { type v; if (!_modified[name##Index] && CompilerOracle::has_option_value(method, #cc_flag, v) && v != this->name##Option) { set->name##Option = v; changed = true;} } +#define init_default_cc(name, type, dvalue, cc_flag) { type v; if (!_modified[name##Index] && CompilerOracle::has_option_value(method, #cc_flag, v) && v != this->name##Option) { set.cloned()->name##Option = v; } } compilerdirectives_common_flags(init_default_cc) compilerdirectives_c2_flags(init_default_cc) compilerdirectives_c1_flags(init_default_cc) @@ -338,14 +374,14 @@ DirectiveSet* DirectiveSet::compilecommand_compatibility_init(const methodHandle ControlIntrinsicIter iter(option_value); if (need_reset) { - set->_intrinsic_control_words.fill_in(TriBool()); + set.cloned()->_intrinsic_control_words.fill_in(TriBool()); need_reset = false; } while (*iter != NULL) { vmIntrinsics::ID id = vmIntrinsics::find_id(*iter); if (id != vmIntrinsics::_none) { - set->_intrinsic_control_words[id] = iter.is_enabled(); + set.cloned()->_intrinsic_control_words[id] = iter.is_enabled(); } ++iter; @@ -358,29 +394,21 @@ DirectiveSet* DirectiveSet::compilecommand_compatibility_init(const methodHandle ControlIntrinsicIter iter(option_value, true/*disable_all*/); if (need_reset) { - set->_intrinsic_control_words.fill_in(TriBool()); + set.cloned()->_intrinsic_control_words.fill_in(TriBool()); need_reset = false; } while (*iter != NULL) { vmIntrinsics::ID id = vmIntrinsics::find_id(*iter); if (id != vmIntrinsics::_none) { - set->_intrinsic_control_words[id] = false; + set.cloned()->_intrinsic_control_words[id] = false; } ++iter; } } - - if (!changed) { - // We didn't actually update anything, discard. - delete set; - } else { - // We are returning a (parentless) copy. The originals parent don't need to account for this. - DirectivesStack::release(this); - return set; - } + return set.commit(); } // Nothing changed return this; diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 4ad484c33c6..87f290fa609 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -3416,7 +3416,6 @@ class G1STWRefProcTaskProxy: public AbstractGangTask { virtual void work(uint worker_id) { // The reference processing task executed by a single worker. ResourceMark rm; - HandleMark hm; G1STWIsAliveClosure is_alive(_g1h); @@ -3789,7 +3788,6 @@ class G1EvacuateRegionsBaseTask : public AbstractGangTask { { ResourceMark rm; - HandleMark hm; G1ParScanThreadState* pss = _per_thread_states->state_for_worker(worker_id); pss->set_ref_discoverer(_g1h->ref_processor_stw()); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index da6816acaf4..5d1c7f9a877 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -1479,7 +1479,6 @@ class G1CMRefProcTaskProxy : public AbstractGangTask { virtual void work(uint worker_id) { ResourceMark rm; - HandleMark hm; G1CMTask* task = _cm->task(worker_id); G1CMIsAliveClosure g1_is_alive(_g1h); G1CMKeepAliveAndDrainClosure g1_par_keep_alive(_cm, task, false /* is_serial */); @@ -1508,7 +1507,6 @@ void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task, uint ergo_workers) void G1ConcurrentMark::weak_refs_work(bool clear_all_soft_refs) { ResourceMark rm; - HandleMark hm; // Is alive closure. G1CMIsAliveClosure g1_is_alive(_g1h); @@ -1755,7 +1753,6 @@ class G1CMRemarkTask : public AbstractGangTask { task->record_start_time(); { ResourceMark rm; - HandleMark hm; G1RemarkThreadsClosure threads_f(G1CollectedHeap::heap(), task); Threads::threads_do(&threads_f); @@ -1779,7 +1776,6 @@ class G1CMRemarkTask : public AbstractGangTask { void G1ConcurrentMark::finalize_marking() { ResourceMark rm; - HandleMark hm; _g1h->ensure_parsability(false); diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp index 742190434f7..445b4906e50 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp @@ -153,7 +153,7 @@ void G1ConcurrentMarkThread::run_service() { GCTraceConcTime(Info, gc) tt("Concurrent Cycle"); { ResourceMark rm; - HandleMark hm; + double cycle_start = os::elapsedVTime(); { diff --git a/src/hotspot/share/gc/g1/g1FullCollector.cpp b/src/hotspot/share/gc/g1/g1FullCollector.cpp index 0242e45eef5..9846ba62593 100644 --- a/src/hotspot/share/gc/g1/g1FullCollector.cpp +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp @@ -277,7 +277,6 @@ void G1FullCollector::verify_after_marking() { return; } - HandleMark hm; // handle scope #if COMPILER2_OR_JVMCI DerivedPointerTableDeactivate dpt_deact; #endif diff --git a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp index fc3330f2d85..62f0b5d41f5 100644 --- a/src/hotspot/share/gc/g1/g1HeapVerifier.cpp +++ b/src/hotspot/share/gc/g1/g1HeapVerifier.cpp @@ -449,7 +449,6 @@ class G1ParVerifyTask: public AbstractGangTask { } void work(uint worker_id) { - HandleMark hm; VerifyRegionClosure blk(true, _vo); _g1h->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id); if (blk.failures()) { @@ -619,7 +618,6 @@ double G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* ms if (should_verify(type) && _g1h->total_collections() >= VerifyGCStartAt) { double verify_start = os::elapsedTime(); - HandleMark hm; // Discard invalid handles created during verification prepare_for_verify(); Universe::verify(vo, msg); verify_time_ms = (os::elapsedTime() - verify_start) * 1000; diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 7367395edbc..a6ec83af345 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -1008,7 +1008,6 @@ void PSParallelCompact::pre_compact() heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification Universe::verify("Before GC"); } @@ -1789,7 +1788,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { { ResourceMark rm; - HandleMark hm; const uint active_workers = WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().total_workers(), @@ -1946,7 +1944,6 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) { #endif // ASSERT if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification Universe::verify("After GC"); } diff --git a/src/hotspot/share/gc/parallel/psScavenge.cpp b/src/hotspot/share/gc/parallel/psScavenge.cpp index 633eb0e1d85..e44cf30348a 100644 --- a/src/hotspot/share/gc/parallel/psScavenge.cpp +++ b/src/hotspot/share/gc/parallel/psScavenge.cpp @@ -430,13 +430,11 @@ bool PSScavenge::invoke_no_policy() { heap->ensure_parsability(true); // retire TLABs if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification Universe::verify("Before GC"); } { ResourceMark rm; - HandleMark hm; GCTraceCPUTime tcpu; GCTraceTime(Info, gc) tm("Pause Young", NULL, gc_cause, true); @@ -714,7 +712,6 @@ bool PSScavenge::invoke_no_policy() { } if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { - HandleMark hm; // Discard invalid handles created during verification Universe::verify("After GC"); } diff --git a/src/hotspot/share/gc/shared/collectedHeap.cpp b/src/hotspot/share/gc/shared/collectedHeap.cpp index fec5c3b76a4..0b7c47ea859 100644 --- a/src/hotspot/share/gc/shared/collectedHeap.cpp +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp @@ -229,20 +229,21 @@ CollectedHeap::CollectedHeap() : // heap lock is already held and that we are executing in // the context of the vm thread. void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { - assert(Thread::current()->is_VM_thread(), "Precondition#1"); + Thread* thread = Thread::current(); + assert(thread->is_VM_thread(), "Precondition#1"); assert(Heap_lock->is_locked(), "Precondition#2"); GCCauseSetter gcs(this, cause); switch (cause) { case GCCause::_heap_inspection: case GCCause::_heap_dump: case GCCause::_metadata_GC_threshold : { - HandleMark hm; + HandleMark hm(thread); do_full_collection(false); // don't clear all soft refs break; } case GCCause::_archive_time_gc: case GCCause::_metadata_GC_clear_soft_refs: { - HandleMark hm; + HandleMark hm(thread); do_full_collection(true); // do clear all soft refs break; } @@ -412,14 +413,14 @@ CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) { DEBUG_ONLY(fill_args_check(start, words);) - HandleMark hm; // Free handles before leaving. + HandleMark hm(Thread::current()); // Free handles before leaving. fill_with_object_impl(start, words, zap); } void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) { DEBUG_ONLY(fill_args_check(start, words);) - HandleMark hm; // Free handles before leaving. + HandleMark hm(Thread::current()); // Free handles before leaving. // Multiple objects may be required depending on the filler array maximum size. Fill // the range up to that with objects that are filler_array_max_size sized. The diff --git a/src/hotspot/share/gc/shared/gcVMOperations.cpp b/src/hotspot/share/gc/shared/gcVMOperations.cpp index 13f95089c0d..d046e79347c 100644 --- a/src/hotspot/share/gc/shared/gcVMOperations.cpp +++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp @@ -130,7 +130,6 @@ bool VM_GC_HeapInspection::collect() { } void VM_GC_HeapInspection::doit() { - HandleMark hm; Universe::heap()->ensure_parsability(false); // must happen, even if collection does // not happen (e.g. due to GCLocker) // or _full_gc being false diff --git a/src/hotspot/share/gc/shared/genCollectedHeap.cpp b/src/hotspot/share/gc/shared/genCollectedHeap.cpp index b57d31d75f6..5b30fc4d2fe 100644 --- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp @@ -317,7 +317,6 @@ HeapWord* GenCollectedHeap::mem_allocate_work(size_t size, // Loop until the allocation is satisfied, or unsatisfied after GC. for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) { - HandleMark hm; // Discard any handles allocated in each iteration. // First allocation attempt is lock-free. Generation *young = _young_gen; @@ -477,7 +476,6 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz log_trace(gc)("%s invoke=%d size=" SIZE_FORMAT, heap()->is_young_gen(gen) ? "Young" : "Old", gen->stat_record()->invocations, size * HeapWordSize); if (run_verification && VerifyBeforeGC) { - HandleMark hm; // Discard invalid handles created during verification Universe::verify("Before GC"); } COMPILER2_OR_JVMCI_PRESENT(DerivedPointerTable::clear()); @@ -502,7 +500,6 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz // weak refs more uniform (and indeed remove such concerns // from GCH). XXX - HandleMark hm; // Discard invalid handles created during gc save_marks(); // save marks for all gens // We want to discover references, but not process them yet. // This mode is disabled in process_discovered_references if the @@ -535,7 +532,6 @@ void GenCollectedHeap::collect_generation(Generation* gen, bool full, size_t siz update_gc_stats(gen, full); if (run_verification && VerifyAfterGC) { - HandleMark hm; // Discard invalid handles created during verification Universe::verify("After GC"); } } diff --git a/src/hotspot/share/gc/shared/oopStorageSet.hpp b/src/hotspot/share/gc/shared/oopStorageSet.hpp index b4952b8358e..ff8a43bd65a 100644 --- a/src/hotspot/share/gc/shared/oopStorageSet.hpp +++ b/src/hotspot/share/gc/shared/oopStorageSet.hpp @@ -38,7 +38,7 @@ class OopStorageSet : public AllStatic { public: // Must be updated when new OopStorages are introduced static const uint strong_count = 2; - static const uint weak_count = 4; + static const uint weak_count = 4 JFR_ONLY(+ 1); static const uint all_count = strong_count + weak_count; private: diff --git a/src/hotspot/share/gc/shared/weakProcessorPhases.cpp b/src/hotspot/share/gc/shared/weakProcessorPhases.cpp index 2c2fc41662b..86d82290324 100644 --- a/src/hotspot/share/gc/shared/weakProcessorPhases.cpp +++ b/src/hotspot/share/gc/shared/weakProcessorPhases.cpp @@ -27,19 +27,15 @@ #include "utilities/debug.hpp" #include "utilities/macros.hpp" -#if INCLUDE_JFR -#include "jfr/jfr.hpp" -#endif // INCLUDE_JFR - #if INCLUDE_JVMTI #include "prims/jvmtiExport.hpp" #endif // INCLUDE_JVMTI -// serial_phase_count is 0 if JFR and JVMTI are both not built, +// serial_phase_count is 0 if JVMTI is not built, // requiring some code to be careful to avoid tautological checks // that some compilers warn about. -#define HAVE_SERIAL_PHASES (INCLUDE_JVMTI || INCLUDE_JFR) +#define HAVE_SERIAL_PHASES INCLUDE_JVMTI WeakProcessorPhases::Phase WeakProcessorPhases::serial_phase(uint value) { #if HAVE_SERIAL_PHASES @@ -109,7 +105,6 @@ void WeakProcessorPhases::Iterator::verify_dereferenceable() const { const char* WeakProcessorPhases::description(Phase phase) { switch (phase) { JVMTI_ONLY(case jvmti: return "JVMTI weak processing";) - JFR_ONLY(case jfr: return "JFR weak processing";) default: ShouldNotReachHere(); return "Invalid serial weak processing phase"; @@ -119,7 +114,6 @@ const char* WeakProcessorPhases::description(Phase phase) { WeakProcessorPhases::Processor WeakProcessorPhases::processor(Phase phase) { switch (phase) { JVMTI_ONLY(case jvmti: return &JvmtiExport::weak_oops_do;) - JFR_ONLY(case jfr: return &Jfr::weak_oops_do;) default: ShouldNotReachHere(); return NULL; diff --git a/src/hotspot/share/gc/shared/weakProcessorPhases.hpp b/src/hotspot/share/gc/shared/weakProcessorPhases.hpp index 3103373001c..8730a8a590f 100644 --- a/src/hotspot/share/gc/shared/weakProcessorPhases.hpp +++ b/src/hotspot/share/gc/shared/weakProcessorPhases.hpp @@ -41,15 +41,14 @@ class WeakProcessorPhases : AllStatic { typedef void (*Processor)(BoolObjectClosure*, OopClosure*); enum Phase { - // Serial phases. - JVMTI_ONLY(jvmti JFR_ONLY(COMMA)) - JFR_ONLY(jfr) + // Serial phase. + JVMTI_ONLY(jvmti) // Additional implicit phase values follow for oopstorages. }; static const uint serial_phase_start = 0; - static const uint serial_phase_count = 0 JVMTI_ONLY(+ 1) JFR_ONLY(+ 1); + static const uint serial_phase_count = 0 JVMTI_ONLY(+ 1); static const uint oopstorage_phase_start = serial_phase_count; static const uint oopstorage_phase_count = OopStorageSet::weak_count; static const uint phase_count = serial_phase_count + oopstorage_phase_count; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp index 81181e731ee..54582ee10de 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentMark.cpp @@ -654,8 +654,9 @@ class ShenandoahRefProcTaskProxy : public AbstractGangTask { } void work(uint worker_id) { - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahParallelWorkerSession worker_session(worker_id); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp index 1e07ed35d12..c354f510c93 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahPhaseTimings.hpp @@ -25,7 +25,6 @@ #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHPHASETIMINGS_HPP #define SHARE_GC_SHENANDOAH_SHENANDOAHPHASETIMINGS_HPP -#include "jfr/jfrEvents.hpp" #include "gc/shenandoah/shenandoahNumberSeq.hpp" #include "gc/shared/workerDataArray.hpp" #include "memory/allocation.hpp" @@ -43,7 +42,6 @@ class outputStream; f(CNT_PREFIX ## ObjectSynchronizerRoots, DESC_PREFIX "Synchronizer Roots") \ f(CNT_PREFIX ## CLDGRoots, DESC_PREFIX "CLDG Roots") \ f(CNT_PREFIX ## JVMTIWeakRoots, DESC_PREFIX "JVMTI Weak Roots") \ - f(CNT_PREFIX ## JFRWeakRoots, DESC_PREFIX "JFR Weak Roots") \ f(CNT_PREFIX ## StringDedupTableRoots, DESC_PREFIX "Dedup Table Roots") \ f(CNT_PREFIX ## StringDedupQueueRoots, DESC_PREFIX "Dedup Queue Roots") \ f(CNT_PREFIX ## FinishQueues, DESC_PREFIX "Finish Queues") \ diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp index 1e71a4863bf..4f8cebab6f9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.cpp @@ -35,7 +35,6 @@ #include "gc/shenandoah/shenandoahPhaseTimings.hpp" #include "gc/shenandoah/shenandoahStringDedup.hpp" #include "gc/shenandoah/shenandoahVMOperations.hpp" -#include "jfr/jfr.hpp" #include "memory/iterator.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" @@ -81,15 +80,8 @@ ShenandoahJVMTIWeakRoot::ShenandoahJVMTIWeakRoot(ShenandoahPhaseTimings::Phase p } #endif // INCLUDE_JVMTI -#if INCLUDE_JFR -ShenandoahJFRWeakRoot::ShenandoahJFRWeakRoot(ShenandoahPhaseTimings::Phase phase) : - ShenandoahWeakSerialRoot(&Jfr::weak_oops_do, phase, ShenandoahPhaseTimings::JFRWeakRoots) { -} -#endif // INCLUDE_JFR - void ShenandoahSerialWeakRoots::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id) { JVMTI_ONLY(_jvmti_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);) - JFR_ONLY(_jfr_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);) } void ShenandoahSerialWeakRoots::weak_oops_do(OopClosure* cl, uint worker_id) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp index bf20e8f9e9d..efafc134ce3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahRootProcessor.hpp @@ -79,21 +79,12 @@ class ShenandoahJVMTIWeakRoot : public ShenandoahWeakSerialRoot { }; #endif // INCLUDE_JVMTI -#if INCLUDE_JFR -class ShenandoahJFRWeakRoot : public ShenandoahWeakSerialRoot { -public: - ShenandoahJFRWeakRoot(ShenandoahPhaseTimings::Phase phase); -}; -#endif // INCLUDE_JFR - class ShenandoahSerialWeakRoots { private: JVMTI_ONLY(ShenandoahJVMTIWeakRoot _jvmti_weak_roots;) - JFR_ONLY(ShenandoahJFRWeakRoot _jfr_weak_roots;) public: ShenandoahSerialWeakRoots(ShenandoahPhaseTimings::Phase phase) JVMTI_ONLY(: _jvmti_weak_roots(phase)) - JFR_ONLY(NOT_JVMTI(:) JVMTI_ONLY(COMMA) _jfr_weak_roots(phase)) {}; void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* keep_alive, uint worker_id); diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp index ce786cc30fd..05b295c0c7f 100644 --- a/src/hotspot/share/gc/z/zArguments.cpp +++ b/src/hotspot/share/gc/z/zArguments.cpp @@ -99,18 +99,10 @@ void ZArguments::initialize() { FLAG_SET_DEFAULT(VerifyDuringStartup, false); FLAG_SET_DEFAULT(VerifyBeforeExit, false); - // Verification before heap iteration not (yet) supported, for the - // same reason we need fixup_partial_loads - FLAG_SET_DEFAULT(VerifyBeforeIteration, false); - if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) { FLAG_SET_DEFAULT(ZVerifyRoots, true); FLAG_SET_DEFAULT(ZVerifyObjects, true); } - - // Verification of stacks not (yet) supported, for the same reason - // we need fixup_partial_loads - DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false)); } size_t ZArguments::conservative_max_heap_alignment() { diff --git a/src/hotspot/share/gc/z/zHeap.hpp b/src/hotspot/share/gc/z/zHeap.hpp index 2f488bfca68..f8505274ebb 100644 --- a/src/hotspot/share/gc/z/zHeap.hpp +++ b/src/hotspot/share/gc/z/zHeap.hpp @@ -64,7 +64,6 @@ class ZHeap { void flip_to_remapped(); void out_of_memory(); - void fixup_partial_loads(); public: static ZHeap* heap(); diff --git a/src/hotspot/share/gc/z/zRootsIterator.cpp b/src/hotspot/share/gc/z/zRootsIterator.cpp index a62392dba50..53b76f1f62c 100644 --- a/src/hotspot/share/gc/z/zRootsIterator.cpp +++ b/src/hotspot/share/gc/z/zRootsIterator.cpp @@ -50,9 +50,6 @@ #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" #include "utilities/debug.hpp" -#if INCLUDE_JFR -#include "jfr/jfr.hpp" -#endif static const ZStatSubPhase ZSubPhasePauseRootsSetup("Pause Roots Setup"); static const ZStatSubPhase ZSubPhasePauseRoots("Pause Roots"); @@ -74,7 +71,6 @@ static const ZStatSubPhase ZSubPhasePauseWeakRootsSetup("Pause Weak Roots Setup" static const ZStatSubPhase ZSubPhasePauseWeakRoots("Pause Weak Roots"); static const ZStatSubPhase ZSubPhasePauseWeakRootsTeardown("Pause Weak Roots Teardown"); static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport"); -static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak"); static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots"); static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet"); @@ -295,8 +291,7 @@ void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) { } ZWeakRootsIterator::ZWeakRootsIterator() : - _jvmti_weak_export(this), - _jfr_weak(this) { + _jvmti_weak_export(this) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ZStatTimer timer(ZSubPhasePauseWeakRootsSetup); } @@ -310,17 +305,9 @@ void ZWeakRootsIterator::do_jvmti_weak_export(BoolObjectClosure* is_alive, ZRoot JvmtiExport::weak_oops_do(is_alive, cl); } -void ZWeakRootsIterator::do_jfr_weak(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) { -#if INCLUDE_JFR - ZStatTimer timer(ZSubPhasePauseWeakRootsJFRWeak); - Jfr::weak_oops_do(is_alive, cl); -#endif -} - void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) { ZStatTimer timer(ZSubPhasePauseWeakRoots); _jvmti_weak_export.weak_oops_do(is_alive, cl); - _jfr_weak.weak_oops_do(is_alive, cl); } void ZWeakRootsIterator::oops_do(ZRootsIteratorClosure* cl) { diff --git a/src/hotspot/share/gc/z/zRootsIterator.hpp b/src/hotspot/share/gc/z/zRootsIterator.hpp index 82ca26bfdde..0f2496e0be7 100644 --- a/src/hotspot/share/gc/z/zRootsIterator.hpp +++ b/src/hotspot/share/gc/z/zRootsIterator.hpp @@ -170,10 +170,8 @@ class ZConcurrentRootsIteratorClaimNone : public ZConcurrentRootsIterator { class ZWeakRootsIterator { private: void do_jvmti_weak_export(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl); - void do_jfr_weak(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl); ZSerialWeakOopsDo _jvmti_weak_export; - ZSerialWeakOopsDo _jfr_weak; public: ZWeakRootsIterator(); diff --git a/src/hotspot/share/include/jvm.h b/src/hotspot/share/include/jvm.h index 0f9202f5946..a4b97139e58 100644 --- a/src/hotspot/share/include/jvm.h +++ b/src/hotspot/share/include/jvm.h @@ -158,6 +158,9 @@ JVM_MaxMemory(void); JNIEXPORT jint JNICALL JVM_ActiveProcessorCount(void); +JNIEXPORT jboolean JNICALL +JVM_IsUseContainerSupport(void); + JNIEXPORT void * JNICALL JVM_LoadLibrary(const char *name); diff --git a/src/hotspot/share/interpreter/interpreterRuntime.cpp b/src/hotspot/share/interpreter/interpreterRuntime.cpp index 39c48978c91..c2d8c559564 100644 --- a/src/hotspot/share/interpreter/interpreterRuntime.cpp +++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp @@ -1349,8 +1349,6 @@ JRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, addre address mdp2 = mdo->bci_to_dp(bci); if (mdp != mdp2) { ResourceMark rm; - ResetNoHandleMark rnm; // In a LEAF entry. - HandleMark hm; tty->print_cr("FAILED verify : actual mdp %p expected mdp %p @ bci %d", mdp, mdp2, bci); int current_di = mdo->dp_to_di(mdp); int expected_di = mdo->dp_to_di(mdp2); @@ -1371,7 +1369,6 @@ JRT_END JRT_ENTRY(void, InterpreterRuntime::update_mdp_for_ret(JavaThread* thread, int return_bci)) assert(ProfileInterpreter, "must be profiling interpreter"); ResourceMark rm(thread); - HandleMark hm(thread); LastFrameAccessor last_frame(thread); assert(last_frame.is_interpreted_frame(), "must come from interpreter"); MethodData* h_mdo = last_frame.method()->method_data(); @@ -1733,8 +1730,6 @@ JRT_LEAF(void, InterpreterRuntime::popframe_move_outgoing_args(JavaThread* threa if (src_address == dest_address) { return; } - ResetNoHandleMark rnm; // In a LEAF entry. - HandleMark hm; ResourceMark rm; LastFrameAccessor last_frame(thread); assert(last_frame.is_interpreted_frame(), ""); diff --git a/src/hotspot/share/interpreter/oopMapCache.cpp b/src/hotspot/share/interpreter/oopMapCache.cpp index 0dca8d758f5..152d59d643f 100644 --- a/src/hotspot/share/interpreter/oopMapCache.cpp +++ b/src/hotspot/share/interpreter/oopMapCache.cpp @@ -321,7 +321,6 @@ void OopMapCacheEntry::fill_for_native(const methodHandle& mh) { void OopMapCacheEntry::fill(const methodHandle& method, int bci) { - HandleMark hm; // Flush entry to deallocate an existing entry flush(); set_method(method()); diff --git a/src/hotspot/share/jfr/jfr.cpp b/src/hotspot/share/jfr/jfr.cpp index c7388267996..7fb4eb29c85 100644 --- a/src/hotspot/share/jfr/jfr.cpp +++ b/src/hotspot/share/jfr/jfr.cpp @@ -102,12 +102,6 @@ void Jfr::on_vm_error_report(outputStream* st) { } } -void Jfr::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { - if (LeakProfiler::is_running()) { - LeakProfiler::weak_oops_do(is_alive, f); - } -} - bool Jfr::on_flight_recorder_option(const JavaVMOption** option, char* delimiter) { return JfrOptionSet::parse_flight_recorder_option(option, delimiter); } diff --git a/src/hotspot/share/jfr/jfr.hpp b/src/hotspot/share/jfr/jfr.hpp index 0e9e9035de7..a249089d570 100644 --- a/src/hotspot/share/jfr/jfr.hpp +++ b/src/hotspot/share/jfr/jfr.hpp @@ -28,9 +28,7 @@ #include "jni.h" #include "memory/allocation.hpp" -class BoolObjectClosure; class JavaThread; -class OopClosure; class Thread; extern "C" void JNICALL jfr_register_natives(JNIEnv*, jclass); @@ -53,7 +51,6 @@ class Jfr : AllStatic { static bool on_flight_recorder_option(const JavaVMOption** option, char* delimiter); static bool on_start_flight_recording_option(const JavaVMOption** option, char* delimiter); static void on_vm_error_report(outputStream* st); - static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); static void exclude_thread(Thread* thread); static bool is_excluded(Thread* thread); static void include_thread(Thread* thread); diff --git a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp index 2530f9ad6ae..423f0a0be1b 100644 --- a/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp +++ b/src/hotspot/share/jfr/jni/jfrJavaSupport.cpp @@ -835,7 +835,7 @@ void JfrJavaSupport::on_thread_start(Thread* t) { return; } DEBUG_ONLY(check_new_unstarted_java_thread(t);) - HandleMark hm; + HandleMark hm(t); if (check_exclusion_state_on_thread_start((JavaThread*)t)) { JfrThreadLocal::exclude(t); } diff --git a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp index e6f064ee0e0..cf611d299dc 100644 --- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp +++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.cpp @@ -89,14 +89,6 @@ void LeakProfiler::emit_events(int64_t cutoff_ticks, bool emit_all, bool skip_bf ObjectSampler::release(); } -void LeakProfiler::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { - assert(SafepointSynchronize::is_at_safepoint(), - "Leak Profiler::oops_do(...) may only be called during safepoint"); - if (is_running()) { - ObjectSampler::weak_oops_do(is_alive, f); - } -} - void LeakProfiler::sample(HeapWord* object, size_t size, JavaThread* thread) { assert(is_running(), "invariant"); assert(thread != NULL, "invariant"); diff --git a/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp b/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp index 36e07bbd3a3..c541ff1086d 100644 --- a/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp +++ b/src/hotspot/share/jfr/leakprofiler/leakProfiler.hpp @@ -27,8 +27,6 @@ #include "memory/allocation.hpp" -class BoolObjectClosure; -class OopClosure; class JavaThread; class LeakProfiler : public AllStatic { @@ -39,9 +37,6 @@ class LeakProfiler : public AllStatic { static void emit_events(int64_t cutoff_ticks, bool emit_all, bool skip_bfs); static void sample(HeapWord* object, size_t size, JavaThread* thread); - - // Called by GC - static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); }; #endif // SHARE_JFR_LEAKPROFILER_LEAKPROFILER_HPP diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp index c56043c2445..725eb0b672e 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp @@ -23,16 +23,36 @@ */ #include "precompiled.hpp" #include "jfr/leakprofiler/sampling/objectSample.hpp" -#include "oops/access.inline.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" +#include "oops/weakHandle.inline.hpp" +#include "runtime/handles.inline.hpp" + +void ObjectSample::reset() { + release(); + set_stack_trace_id(0); + set_stack_trace_hash(0); + release_references(); +} const oop ObjectSample::object() const { - return NativeAccess::oop_load(&_object); + return _object.resolve(); +} + +bool ObjectSample::is_dead() const { + return _object.peek() == NULL; } -const oop ObjectSample::object_raw() const { - return RawAccess<>::oop_load(&_object); +const oop* ObjectSample::object_addr() const { + return _object.ptr_raw(); } void ObjectSample::set_object(oop object) { - NativeAccess::oop_store(&_object, object); + assert(_object.is_empty(), "should be empty"); + Handle h(Thread::current(), object); + _object = WeakHandle(ObjectSampler::oop_storage(), h); +} + +void ObjectSample::release() { + _object.release(ObjectSampler::oop_storage()); + _object = WeakHandle(); } diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp index 4f77d400f27..9401c77113a 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp @@ -31,6 +31,7 @@ #include "jfr/utilities/jfrTypes.hpp" #include "memory/allocation.hpp" #include "oops/oop.hpp" +#include "oops/weakHandle.hpp" #include "utilities/ticks.hpp" /* @@ -48,7 +49,7 @@ class ObjectSample : public JfrCHeapObj { JfrBlobHandle _stacktrace; JfrBlobHandle _thread; JfrBlobHandle _type_set; - oop _object; + WeakHandle _object; Ticks _allocation_time; traceid _stack_trace_id; traceid _thread_id; @@ -64,12 +65,7 @@ class ObjectSample : public JfrCHeapObj { _type_set.~JfrBlobHandle(); } - void reset() { - _object = NULL; - set_stack_trace_id(0); - set_stack_trace_hash(0); - release_references(); - } + void reset(); public: ObjectSample() : _next(NULL), @@ -77,7 +73,6 @@ class ObjectSample : public JfrCHeapObj { _stacktrace(), _thread(), _type_set(), - _object(NULL), _allocation_time(), _stack_trace_id(0), _thread_id(0), @@ -103,17 +98,14 @@ class ObjectSample : public JfrCHeapObj { _previous = prev; } - bool is_dead() const { - return object() == NULL; - } + bool is_dead() const; const oop object() const; - const oop object_raw() const; void set_object(oop object); - const oop* object_addr() const { - return &_object; - } + const oop* object_addr() const; + + void release(); int index() const { return _index; diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp index 3782371ce09..9c956edf73d 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp @@ -23,6 +23,8 @@ */ #include "precompiled.hpp" +#include "gc/shared/oopStorage.hpp" +#include "gc/shared/oopStorageSet.hpp" #include "jfr/jfrEvents.hpp" #include "jfr/leakprofiler/sampling/objectSample.hpp" #include "jfr/leakprofiler/sampling/objectSampler.hpp" @@ -41,6 +43,40 @@ #include "runtime/safepoint.hpp" #include "runtime/thread.hpp" +// Timestamp of when the gc last processed the set of sampled objects. +static JfrTicks _last_sweep; + +// Condition variable to communicate that some sampled objects have been cleared by the gc +// and can therefore be removed from the sample priority queue. +static bool volatile _dead_samples = false; + +// The OopStorage instance is used to hold weak references to sampled objects. +// It is constructed and registered during VM initialization. This is a singleton +// that persist independent of the state of the ObjectSampler. +static OopStorage* _oop_storage = NULL; + +OopStorage* ObjectSampler::oop_storage() { return _oop_storage; } + +// Callback invoked by the GC after an iteration over the oop storage +// that may have cleared dead referents. num_dead is the number of entries +// already NULL or cleared by the iteration. +void ObjectSampler::oop_storage_gc_notification(size_t num_dead) { + if (num_dead != 0) { + // The ObjectSampler instance may have already been cleaned or a new + // instance was created concurrently. This allows for a small race where cleaning + // could be done again. + Atomic::store(&_dead_samples, true); + _last_sweep = JfrTicks::now(); + } +} + +bool ObjectSampler::create_oop_storage() { + _oop_storage = OopStorageSet::create_weak("Weak JFR Old Object Samples"); + assert(_oop_storage != NULL, "invariant"); + _oop_storage->register_num_dead_callback(&oop_storage_gc_notification); + return true; +} + static ObjectSampler* _instance = NULL; static ObjectSampler& instance() { @@ -49,13 +85,14 @@ static ObjectSampler& instance() { } ObjectSampler::ObjectSampler(size_t size) : - _priority_queue(new SamplePriorityQueue(size)), - _list(new SampleList(size)), - _last_sweep(JfrTicks::now()), - _total_allocated(0), - _threshold(0), - _size(size), - _dead_samples(false) {} + _priority_queue(new SamplePriorityQueue(size)), + _list(new SampleList(size)), + _total_allocated(0), + _threshold(0), + _size(size) { + _last_sweep = JfrTicks::now(); + Atomic::store(&_dead_samples, false); +} ObjectSampler::~ObjectSampler() { delete _priority_queue; @@ -66,6 +103,7 @@ ObjectSampler::~ObjectSampler() { bool ObjectSampler::create(size_t size) { assert(SafepointSynchronize::is_at_safepoint(), "invariant"); + assert(_oop_storage != NULL, "should be already created"); assert(_instance == NULL, "invariant"); _instance = new ObjectSampler(size); return _instance != NULL; @@ -92,13 +130,11 @@ void ObjectSampler::destroy() { static volatile int _lock = 0; ObjectSampler* ObjectSampler::acquire() { - assert(is_created(), "invariant"); while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {} return _instance; } void ObjectSampler::release() { - assert(is_created(), "invariant"); OrderAccess::fence(); _lock = 0; } @@ -150,9 +186,11 @@ void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, Java assert(thread != NULL, "invariant"); assert(thread->jfr_thread_local()->has_thread_blob(), "invariant"); - if (_dead_samples) { + if (Atomic::load(&_dead_samples)) { + // There's a small race where a GC scan might reset this to true, potentially + // causing a back-to-back scavenge. + Atomic::store(&_dead_samples, false); scavenge(); - assert(!_dead_samples, "invariant"); } _total_allocated += allocated; @@ -199,12 +237,13 @@ void ObjectSampler::scavenge() { } current = next; } - _dead_samples = false; } void ObjectSampler::remove_dead(ObjectSample* sample) { assert(sample != NULL, "invariant"); assert(sample->is_dead(), "invariant"); + sample->release(); + ObjectSample* const previous = sample->prev(); // push span onto previous if (previous != NULL) { @@ -216,27 +255,6 @@ void ObjectSampler::remove_dead(ObjectSample* sample) { _list->release(sample); } -void ObjectSampler::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { - assert(is_created(), "invariant"); - assert(SafepointSynchronize::is_at_safepoint(), "invariant"); - ObjectSampler& sampler = instance(); - ObjectSample* current = sampler._list->last(); - while (current != NULL) { - if (current->_object != NULL) { - if (is_alive->do_object_b(current->object_raw())) { - // The weakly referenced object is alive, update pointer - f->do_oop(const_cast(current->object_addr())); - } else { - // clear existing field to assist GC barriers - current->_object = NULL; - sampler._dead_samples = true; - } - } - current = current->next(); - } - sampler._last_sweep = JfrTicks::now(); -} - ObjectSample* ObjectSampler::last() const { return _list->last(); } @@ -267,6 +285,6 @@ ObjectSample* ObjectSampler::item_at(int index) { ); } -const JfrTicks& ObjectSampler::last_sweep() const { +const JfrTicks& ObjectSampler::last_sweep() { return _last_sweep; } diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp index 4e75210488f..962735a309c 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.hpp @@ -30,9 +30,8 @@ typedef u8 traceid; -class BoolObjectClosure; class JavaThread; -class OopClosure; +class OopStorage; class ObjectSample; class SampleList; class SamplePriorityQueue; @@ -41,17 +40,17 @@ class SamplePriorityQueue; // making sure the samples are evenly distributed as // new entries are added and removed. class ObjectSampler : public CHeapObj { + friend class JfrRecorder; friend class LeakProfiler; + friend class ObjectSample; friend class StartOperation; friend class StopOperation; private: SamplePriorityQueue* _priority_queue; SampleList* _list; - JfrTicks _last_sweep; size_t _total_allocated; size_t _threshold; size_t _size; - bool _dead_samples; // Lifecycle explicit ObjectSampler(size_t size); @@ -66,24 +65,26 @@ class ObjectSampler : public CHeapObj { void scavenge(); void remove_dead(ObjectSample* sample); - // Called by GC - static void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f); - const ObjectSample* item_at(int index) const; ObjectSample* item_at(int index); int item_count() const; + // OopStorage + static bool create_oop_storage(); + static OopStorage* oop_storage(); + // Invoked by the GC post oop storage processing. + static void oop_storage_gc_notification(size_t num_dead); + public: static ObjectSampler* sampler(); // For operations that require exclusive access (non-safepoint) static ObjectSampler* acquire(); static void release(); - + static const JfrTicks& last_sweep(); const ObjectSample* first() const; ObjectSample* last() const; const ObjectSample* last_resolved() const; void set_last_resolved(const ObjectSample* sample); - const JfrTicks& last_sweep() const; }; #endif // SHARE_JFR_LEAKPROFILER_SAMPLING_OBJECTSAMPLER_HPP diff --git a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp index 6a899563641..254b5b5c27e 100644 --- a/src/hotspot/share/jfr/recorder/jfrRecorder.cpp +++ b/src/hotspot/share/jfr/recorder/jfrRecorder.cpp @@ -27,6 +27,7 @@ #include "jfr/dcmd/jfrDcmds.hpp" #include "jfr/instrumentation/jfrJvmtiAgent.hpp" #include "jfr/jni/jfrJavaSupport.hpp" +#include "jfr/leakprofiler/sampling/objectSampler.hpp" #include "jfr/periodic/jfrOSInterface.hpp" #include "jfr/periodic/sampling/jfrThreadSampler.hpp" #include "jfr/recorder/jfrRecorder.hpp" @@ -73,12 +74,20 @@ bool JfrRecorder::is_enabled() { return _enabled; } +bool JfrRecorder::create_oop_storages() { + // currently only a single weak oop storage for Leak Profiler + return ObjectSampler::create_oop_storage(); +} + bool JfrRecorder::on_create_vm_1() { if (!is_disabled()) { if (FlightRecorder || StartFlightRecording != NULL) { enable(); } } + if (!create_oop_storages()) { + return false; + } // fast time initialization return JfrTime::initialize(); } @@ -246,8 +255,9 @@ bool JfrRecorder::is_created() { } bool JfrRecorder::create_components() { - ResourceMark rm; - HandleMark hm; + // Move these down into the functions that might create handles! + ResourceMark rm(Thread::current()); + HandleMark hm(Thread::current()); if (!create_java_event_writer()) { return false; diff --git a/src/hotspot/share/jfr/recorder/jfrRecorder.hpp b/src/hotspot/share/jfr/recorder/jfrRecorder.hpp index 3b00303ea25..1fbda6544ef 100644 --- a/src/hotspot/share/jfr/recorder/jfrRecorder.hpp +++ b/src/hotspot/share/jfr/recorder/jfrRecorder.hpp @@ -45,6 +45,7 @@ class JfrRecorder : public JfrCHeapObj { static bool create_chunk_repository(); static bool create_java_event_writer(); static bool create_jvmti_agent(); + static bool create_oop_storages(); static bool create_os_interface(); static bool create_post_box(); static bool create_recorder_thread(); diff --git a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp index a9fe041b6f3..f4a25d0e2f4 100644 --- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp +++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -788,7 +788,7 @@ int CodeInstaller::estimate_stubs_size(JVMCI_TRAPS) { // perform data and call relocation on the CodeBuffer JVMCI::CodeInstallResult CodeInstaller::initialize_buffer(CodeBuffer& buffer, bool check_size, JVMCI_TRAPS) { - HandleMark hm; + HandleMark hm(Thread::current()); JVMCIObjectArray sites = this->sites(); int locs_buffer_size = JVMCIENV->get_length(sites) * (relocInfo::length_limit + sizeof(relocInfo)); diff --git a/src/hotspot/share/jvmci/jvmciCompiler.cpp b/src/hotspot/share/jvmci/jvmciCompiler.cpp index 712f2c5ba3a..f3e366a4621 100644 --- a/src/hotspot/share/jvmci/jvmciCompiler.cpp +++ b/src/hotspot/share/jvmci/jvmciCompiler.cpp @@ -57,8 +57,8 @@ void JVMCICompiler::bootstrap(TRAPS) { return; } _bootstrapping = true; - ResourceMark rm; - HandleMark hm; + ResourceMark rm(THREAD); + HandleMark hm(THREAD); if (PrintBootstrap) { tty->print("Bootstrapping JVMCI"); } @@ -144,11 +144,15 @@ void JVMCICompiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci, // Print compilation timers and statistics void JVMCICompiler::print_timers() { + tty->print_cr(" JVMCI Compile Time: %7.3f s", stats()->total_time()); print_compilation_timers(); } // Print compilation timers and statistics void JVMCICompiler::print_compilation_timers() { - JVMCI_event_1("JVMCICompiler::print_timers"); - tty->print_cr(" JVMCI code install time: %6.3f s", _codeInstallTimer.seconds()); + double code_install_time = _codeInstallTimer.seconds(); + if (code_install_time != 0.0) { + tty->cr(); + tty->print_cr(" JVMCI code install time: %6.3f s", code_install_time); + } } diff --git a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp index 339324215dd..74035e538fe 100644 --- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp @@ -819,7 +819,7 @@ C2V_END C2V_VMENTRY_0(jint, installCode, (JNIEnv *env, jobject, jobject target, jobject compiled_code, jobject installed_code, jlong failed_speculations_address, jbyteArray speculations_obj)) - HandleMark hm; + HandleMark hm(THREAD); JNIHandleMark jni_hm(thread); JVMCIObject target_handle = JVMCIENV->wrap(target); @@ -880,7 +880,7 @@ C2V_END C2V_VMENTRY_0(jint, getMetadata, (JNIEnv *env, jobject, jobject target, jobject compiled_code, jobject metadata)) #if INCLUDE_AOT - HandleMark hm; + HandleMark hm(THREAD); assert(JVMCIENV->is_hotspot(), "AOT code is executed only in HotSpot mode"); JVMCIObject target_handle = JVMCIENV->wrap(target); @@ -968,7 +968,7 @@ C2V_VMENTRY(void, resetCompilationStatistics, (JNIEnv* env, jobject)) C2V_END C2V_VMENTRY_NULL(jobject, disassembleCodeBlob, (JNIEnv* env, jobject, jobject installedCode)) - HandleMark hm; + HandleMark hm(THREAD); if (installedCode == NULL) { JVMCI_THROW_MSG_NULL(NullPointerException, "installedCode is null"); @@ -1006,7 +1006,7 @@ C2V_VMENTRY_NULL(jobject, disassembleCodeBlob, (JNIEnv* env, jobject, jobject in C2V_END C2V_VMENTRY_NULL(jobject, getStackTraceElement, (JNIEnv* env, jobject, jobject jvmci_method, int bci)) - HandleMark hm; + HandleMark hm(THREAD); methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method)); JVMCIObject element = JVMCIENV->new_StackTraceElement(method, bci, JVMCI_CHECK_NULL); @@ -1018,7 +1018,7 @@ C2V_VMENTRY_NULL(jobject, executeHotSpotNmethod, (JNIEnv* env, jobject, jobject // and the return value would have to be wrapped as a JavaConstant. requireInHotSpot("executeHotSpotNmethod", JVMCI_CHECK_NULL); - HandleMark hm; + HandleMark hm(THREAD); JVMCIObject nmethod_mirror = JVMCIENV->wrap(hs_nmethod); nmethodLocker locker; @@ -1157,7 +1157,7 @@ C2V_VMENTRY_0(jboolean, setCountersSize, (JNIEnv* env, jobject, jint new_size)) C2V_END C2V_VMENTRY_0(jint, allocateCompileId, (JNIEnv* env, jobject, jobject jvmci_method, int entry_bci)) - HandleMark hm; + HandleMark hm(THREAD); if (jvmci_method == NULL) { JVMCI_THROW_0(NullPointerException); } @@ -2246,7 +2246,7 @@ C2V_VMENTRY_NULL(jlongArray, registerNativeMethods, (JNIEnv* env, jobject, jclas // Ensure the JVMCI shared library runtime is initialized. JVMCIEnv __peer_jvmci_env__(thread, false, __FILE__, __LINE__); JVMCIEnv* peerEnv = &__peer_jvmci_env__; - HandleMark hm; + HandleMark hm(THREAD); JVMCIObject receiver = runtime->get_HotSpotJVMCIRuntime(peerEnv); if (peerEnv->has_pending_exception()) { peerEnv->describe_pending_exception(true); diff --git a/src/hotspot/share/jvmci/jvmciEnv.cpp b/src/hotspot/share/jvmci/jvmciEnv.cpp index bbcbaeefdde..a900859b667 100644 --- a/src/hotspot/share/jvmci/jvmciEnv.cpp +++ b/src/hotspot/share/jvmci/jvmciEnv.cpp @@ -661,8 +661,8 @@ void JVMCIEnv::call_HotSpotJVMCIRuntime_bootstrapFinished (JVMCIObject runtime, } void JVMCIEnv::call_HotSpotJVMCIRuntime_shutdown (JVMCIObject runtime) { - HandleMark hm; JavaThread* THREAD = JavaThread::current(); + HandleMark hm(THREAD); if (is_hotspot()) { JavaCallArguments jargs; jargs.push_oop(Handle(THREAD, HotSpotJVMCI::resolve(runtime))); diff --git a/src/hotspot/share/jvmci/jvmciRuntime.cpp b/src/hotspot/share/jvmci/jvmciRuntime.cpp index 74508fd93d8..84392e4c430 100644 --- a/src/hotspot/share/jvmci/jvmciRuntime.cpp +++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp @@ -925,9 +925,9 @@ void JVMCIRuntime::initialize(JVMCIEnv* JVMCIENV) { { MutexUnlocker unlock(JVMCI_lock); - HandleMark hm; - ResourceMark rm; JavaThread* THREAD = JavaThread::current(); + HandleMark hm(THREAD); + ResourceMark rm(THREAD); if (JVMCIENV->is_hotspot()) { HotSpotJVMCI::compute_offsets(CHECK_EXIT); } else { @@ -1013,7 +1013,7 @@ JVM_ENTRY_NO_ENV(void, JVM_RegisterJVMCINatives(JNIEnv *env, jclass c2vmClass)) JVMCIENV->runtime()->initialize(JVMCIENV); { - ResourceMark rm; + ResourceMark rm(thread); HandleMark hm(thread); ThreadToNativeFromVM trans(thread); @@ -1483,7 +1483,7 @@ void JVMCIRuntime::compile_method(JVMCIEnv* JVMCIENV, JVMCICompiler* compiler, c return; } - HandleMark hm; + HandleMark hm(thread); JVMCIObject receiver = get_HotSpotJVMCIRuntime(JVMCIENV); if (JVMCIENV->has_pending_exception()) { fatal_exception_in_compile(JVMCIENV, thread, "Exception during HotSpotJVMCIRuntime initialization"); diff --git a/src/hotspot/share/memory/dynamicArchive.cpp b/src/hotspot/share/memory/dynamicArchive.cpp index 578c35dc5c3..f0a22481668 100644 --- a/src/hotspot/share/memory/dynamicArchive.cpp +++ b/src/hotspot/share/memory/dynamicArchive.cpp @@ -575,7 +575,6 @@ class DynamicArchiveBuilder : ResourceObj { void verify_universe(const char* info) { if (VerifyBeforeExit) { log_info(cds)("Verify %s", info); - HandleMark hm; // Among other things, this ensures that Eden top is correct. Universe::heap()->prepare_for_verify(); Universe::verify(info); diff --git a/src/hotspot/share/memory/universe.cpp b/src/hotspot/share/memory/universe.cpp index 9b446376b69..61034adde9c 100644 --- a/src/hotspot/share/memory/universe.cpp +++ b/src/hotspot/share/memory/universe.cpp @@ -333,6 +333,7 @@ void initialize_basic_type_klass(Klass* k, TRAPS) { void Universe::genesis(TRAPS) { ResourceMark rm(THREAD); + HandleMark hm(THREAD); { AutoModifyRestore temporarily(_bootstrapping, true); @@ -1176,8 +1177,9 @@ void Universe::verify(VerifyOption option, const char* prefix) { "(of thread stacks below)"); ) - ResourceMark rm; - HandleMark hm; // Handles created during verification can be zapped + Thread* thread = Thread::current(); + ResourceMark rm(thread); + HandleMark hm(thread); // Handles created during verification can be zapped _verify_count++; FormatBuffer<> title("Verifying %s", prefix); diff --git a/src/hotspot/share/metaprogramming/logical.hpp b/src/hotspot/share/metaprogramming/logical.hpp new file mode 100644 index 00000000000..a488b94f8b9 --- /dev/null +++ b/src/hotspot/share/metaprogramming/logical.hpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_METAPROGRAMMING_LOGICAL_HPP +#define SHARE_METAPROGRAMMING_LOGICAL_HPP + +// Stand-ins for C++17 logical operations on types. + +#include + +// Stand-in for C++17 std::bool_constant. +template +using BoolConstant = std::integral_constant; + +// Stand-in for C++17 std::conjunction +template +struct Conjunction : public std::true_type {}; + +template +struct Conjunction : public T1 {}; + +template +struct Conjunction : + public std::conditional_t, T1> +{}; + +// Stand-in for C++17 std::disjunction. +template +struct Disjunction : public std::false_type {}; + +template +struct Disjunction : public T1 {}; + +template +struct Disjunction : + public std::conditional_t> +{}; + +// Stand-in for C++17 std::negation. +template +using Negation = BoolConstant; + +#endif // SHARE_METAPROGRAMMING_LOGICAL_HPP diff --git a/src/hotspot/share/oops/klassVtable.cpp b/src/hotspot/share/oops/klassVtable.cpp index f1f99afae31..a3912f9bf1c 100644 --- a/src/hotspot/share/oops/klassVtable.cpp +++ b/src/hotspot/share/oops/klassVtable.cpp @@ -1199,7 +1199,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Insta assert(interf->is_interface(), "must be"); Array* methods = interf->methods(); int nof_methods = methods->length(); - HandleMark hm; + HandleMark hm(THREAD); Handle interface_loader (THREAD, interf->class_loader()); int ime_count = method_count_for_interface(interf); @@ -1603,7 +1603,6 @@ int VtableStats::array_entries = 0; void klassVtable::print_statistics() { ResourceMark rm; - HandleMark hm; VtableStats::compute(); tty->print_cr("vtable statistics:"); tty->print_cr("%6d classes (%d instance, %d array)", VtableStats::no_klasses, VtableStats::no_instance_klasses, VtableStats::no_array_klasses); diff --git a/src/hotspot/share/oops/objArrayOop.inline.hpp b/src/hotspot/share/oops/objArrayOop.inline.hpp index f7771ad6f09..5862edcc48c 100644 --- a/src/hotspot/share/oops/objArrayOop.inline.hpp +++ b/src/hotspot/share/oops/objArrayOop.inline.hpp @@ -35,21 +35,23 @@ inline HeapWord* objArrayOopDesc::base() const { return (HeapWord*) arrayOopDesc inline HeapWord* objArrayOopDesc::base_raw() const { return (HeapWord*) arrayOopDesc::base_raw(T_OBJECT); } template T* objArrayOopDesc::obj_at_addr(int index) const { - assert(is_within_bounds(index), "index out of bounds"); + assert(is_within_bounds(index), "index %d out of bounds %d", index, length()); return &((T*)base())[index]; } template T* objArrayOopDesc::obj_at_addr_raw(int index) const { - assert(is_within_bounds(index), "index out of bounds"); + assert(is_within_bounds(index), "index %d out of bounds %d", index, length()); return &((T*)base_raw())[index]; } inline oop objArrayOopDesc::obj_at(int index) const { + assert(is_within_bounds(index), "index %d out of bounds %d", index, length()); ptrdiff_t offset = UseCompressedOops ? obj_at_offset(index) : obj_at_offset(index); return HeapAccess::oop_load_at(as_oop(), offset); } inline void objArrayOopDesc::obj_at_put(int index, oop value) { + assert(is_within_bounds(index), "index %d out of bounds %d", index, length()); ptrdiff_t offset = UseCompressedOops ? obj_at_offset(index) : obj_at_offset(index); HeapAccess::oop_store_at(as_oop(), offset, value); } diff --git a/src/hotspot/share/oops/typeArrayOop.inline.hpp b/src/hotspot/share/oops/typeArrayOop.inline.hpp index 8b3f64b95e9..6215acc69ed 100644 --- a/src/hotspot/share/oops/typeArrayOop.inline.hpp +++ b/src/hotspot/share/oops/typeArrayOop.inline.hpp @@ -90,91 +90,111 @@ inline jdouble* typeArrayOopDesc::double_at_addr(int which) const { } inline jbyte typeArrayOopDesc::byte_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::byte_at_put(int which, jbyte contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jboolean typeArrayOopDesc::bool_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::bool_at_put(int which, jboolean contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, jboolean(contents & 1)); } inline jchar typeArrayOopDesc::char_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::char_at_put(int which, jchar contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jint typeArrayOopDesc::int_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::int_at_put(int which, jint contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jshort typeArrayOopDesc::short_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::short_at_put(int which, jshort contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jushort typeArrayOopDesc::ushort_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::ushort_at_put(int which, jushort contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jlong typeArrayOopDesc::long_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::long_at_put(int which, jlong contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jfloat typeArrayOopDesc::float_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::float_at_put(int which, jfloat contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jdouble typeArrayOopDesc::double_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::double_at_put(int which, jdouble contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } inline jbyte typeArrayOopDesc::byte_at_acquire(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::release_byte_at_put(int which, jbyte contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, contents); } @@ -184,19 +204,23 @@ inline void typeArrayOopDesc::release_byte_at_put(int which, jbyte contents) { // casting #ifdef _LP64 inline Symbol* typeArrayOopDesc::symbol_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return (Symbol*)(jlong) HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::symbol_at_put(int which, Symbol* contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, (jlong)contents); } #else inline Symbol* typeArrayOopDesc::symbol_at(int which) const { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); return (Symbol*)(jint) HeapAccess::load_at(as_oop(), offset); } inline void typeArrayOopDesc::symbol_at_put(int which, Symbol* contents) { + assert(is_within_bounds(which), "index %d out of bounds %d", which, length()); ptrdiff_t offset = element_offset(which); HeapAccess::store_at(as_oop(), offset, (jint)contents); } diff --git a/src/hotspot/share/oops/weakHandle.hpp b/src/hotspot/share/oops/weakHandle.hpp index 59525adb665..435bc1e85cd 100644 --- a/src/hotspot/share/oops/weakHandle.hpp +++ b/src/hotspot/share/oops/weakHandle.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -58,6 +58,9 @@ class WeakHandle { void print() const; void print_on(outputStream* st) const; + + bool is_empty() const { return _obj == NULL; } + oop* ptr_raw() const { return _obj; } }; #endif // SHARE_OOPS_WEAKHANDLE_HPP diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp index 2dd4d8de5ab..b5dbd300d4c 100644 --- a/src/hotspot/share/opto/cfgnode.cpp +++ b/src/hotspot/share/opto/cfgnode.cpp @@ -1615,40 +1615,33 @@ static Node* is_absolute( PhaseGVN *phase, PhiNode *phi_root, int true_path) { // Check other phi input for subtract node Node *sub = phi_root->in(3 - phi_x_idx); + bool is_sub = sub->Opcode() == Op_SubF || sub->Opcode() == Op_SubD || + sub->Opcode() == Op_SubI || sub->Opcode() == Op_SubL; + // Allow only Sub(0,X) and fail out for all others; Neg is not OK - if( tzero == TypeF::ZERO ) { - if( sub->Opcode() != Op_SubF || - sub->in(2) != x || - phase->type(sub->in(1)) != tzero ) return NULL; + if (!is_sub || phase->type(sub->in(1)) != tzero || sub->in(2) != x) return NULL; + + if (tzero == TypeF::ZERO) { x = new AbsFNode(x); if (flip) { x = new SubFNode(sub->in(1), phase->transform(x)); } } else if (tzero == TypeD::ZERO) { - if( sub->Opcode() != Op_SubD || - sub->in(2) != x || - phase->type(sub->in(1)) != tzero ) return NULL; x = new AbsDNode(x); if (flip) { x = new SubDNode(sub->in(1), phase->transform(x)); } - } else if (tzero == TypeInt::ZERO) { - if (sub->Opcode() != Op_SubI || - sub->in(2) != x || - phase->type(sub->in(1)) != tzero) return NULL; + } else if (tzero == TypeInt::ZERO && Matcher::match_rule_supported(Op_AbsI)) { x = new AbsINode(x); if (flip) { x = new SubINode(sub->in(1), phase->transform(x)); } - } else { - if (sub->Opcode() != Op_SubL || - sub->in(2) != x || - phase->type(sub->in(1)) != tzero) return NULL; + } else if (tzero == TypeLong::ZERO && Matcher::match_rule_supported(Op_AbsL)) { x = new AbsLNode(x); if (flip) { x = new SubLNode(sub->in(1), phase->transform(x)); } - } + } else return NULL; return x; } @@ -2261,23 +2254,30 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { } else { // We know that at least one MergeMem->base_memory() == this // (saw_self == true). If all other inputs also references this phi - // (directly or through data nodes) - it is dead loop. + // (directly or through data nodes) - it is a dead loop. bool saw_safe_input = false; for (uint j = 1; j < req(); ++j) { - Node *n = in(j); - if (n->is_MergeMem() && n->as_MergeMem()->base_memory() == this) - continue; // skip known cases + Node* n = in(j); + if (n->is_MergeMem()) { + MergeMemNode* mm = n->as_MergeMem(); + if (mm->base_memory() == this || mm->base_memory() == mm->empty_memory()) { + // Skip this input if it references back to this phi or if the memory path is dead + continue; + } + } if (!is_unsafe_data_reference(n)) { saw_safe_input = true; // found safe input break; } } - if (!saw_safe_input) - return top; // all inputs reference back to this phi - dead loop + if (!saw_safe_input) { + // There is a dead loop: All inputs are either dead or reference back to this phi + return top; + } // Phi(...MergeMem(m0, m1:AT1, m2:AT2)...) into // MergeMem(Phi(...m0...), Phi:AT1(...m1...), Phi:AT2(...m2...)) - PhaseIterGVN *igvn = phase->is_IterGVN(); + PhaseIterGVN* igvn = phase->is_IterGVN(); Node* hook = new Node(1); PhiNode* new_base = (PhiNode*) clone(); // Must eagerly register phis, since they participate in loops. diff --git a/src/hotspot/share/opto/compile.cpp b/src/hotspot/share/opto/compile.cpp index d618d9e85bd..ac44213ed90 100644 --- a/src/hotspot/share/opto/compile.cpp +++ b/src/hotspot/share/opto/compile.cpp @@ -2832,15 +2832,21 @@ static void eval_operands(Node* n, uint& func1, uint& func2, uint& func3, ResourceHashtable& eval_map) { assert(is_vector_bitwise_op(n), ""); - func1 = eval_operand(n->in(1), eval_map); - if (is_vector_binary_bitwise_op(n)) { + if (is_vector_unary_bitwise_op(n)) { + Node* opnd = n->in(1); + if (VectorNode::is_vector_bitwise_not_pattern(n) && VectorNode::is_all_ones_vector(opnd)) { + opnd = n->in(2); + } + func1 = eval_operand(opnd, eval_map); + } else if (is_vector_binary_bitwise_op(n)) { + func1 = eval_operand(n->in(1), eval_map); func2 = eval_operand(n->in(2), eval_map); - } else if (is_vector_ternary_bitwise_op(n)) { + } else { + assert(is_vector_ternary_bitwise_op(n), "unknown operation"); + func1 = eval_operand(n->in(1), eval_map); func2 = eval_operand(n->in(2), eval_map); func3 = eval_operand(n->in(3), eval_map); - } else { - assert(is_vector_unary_bitwise_op(n), "not unary"); } } diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp index 23cf1812d2b..9cbb62ecd8b 100644 --- a/src/hotspot/share/opto/ifnode.cpp +++ b/src/hotspot/share/opto/ifnode.cpp @@ -993,11 +993,15 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f } if (lo && hi) { + Node* hook = new Node(1); + hook->init_req(0, lo); // Add a use to lo to prevent him from dying // Merge the two compares into a single unsigned compare by building (CmpU (n - lo) (hi - lo)) Node* adjusted_val = igvn->transform(new SubINode(n, lo)); if (adjusted_lim == NULL) { adjusted_lim = igvn->transform(new SubINode(hi, lo)); } + hook->del_req(0); // Just yank bogus edge + hook->destruct(); Node* newcmp = igvn->transform(new CmpUNode(adjusted_val, adjusted_lim)); Node* newbool = igvn->transform(new BoolNode(newcmp, cond)); diff --git a/src/hotspot/share/opto/type.cpp b/src/hotspot/share/opto/type.cpp index 342773ebf61..1a741234b7c 100644 --- a/src/hotspot/share/opto/type.cpp +++ b/src/hotspot/share/opto/type.cpp @@ -3254,7 +3254,7 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, Offse ciField* field = vk->get_field_by_offset(foffset, false); assert(field != NULL, "missing field"); BasicType bt = field->layout_type(); - _is_ptr_to_narrowoop = (bt == T_OBJECT || bt == T_ARRAY || T_INLINE_TYPE); + _is_ptr_to_narrowoop = UseCompressedOops && is_reference_type(bt); } } else if (klass()->is_instance_klass()) { if (this->isa_klassptr()) { @@ -3262,7 +3262,7 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, Offse } else if (_offset == Offset::bottom || _offset == Offset::top) { // unsafe access _is_ptr_to_narrowoop = UseCompressedOops; - } else { // exclude unsafe ops + } else { assert(this->isa_instptr(), "must be an instance ptr."); if (klass() == ciEnv::current()->Class_klass() && (this->offset() == java_lang_Class::klass_offset() || @@ -3279,12 +3279,17 @@ TypeOopPtr::TypeOopPtr(TYPES t, PTR ptr, ciKlass* k, bool xk, ciObject* o, Offse if (ik->is_inlinetype() && this->offset() == ik->as_inline_klass()->default_value_offset()) { // Special hidden field that contains the oop of the default inline type basic_elem_type = T_INLINE_TYPE; + _is_ptr_to_narrowoop = UseCompressedOops; } else { ciField* field = ik->get_field_by_offset(this->offset(), true); - assert(field != NULL, "missing field"); - basic_elem_type = field->layout_type(); + if (field != NULL) { + BasicType basic_elem_type = field->layout_type(); + _is_ptr_to_narrowoop = UseCompressedOops && is_reference_type(basic_elem_type); + } else { + // unsafe access + _is_ptr_to_narrowoop = UseCompressedOops; + } } - _is_ptr_to_narrowoop = UseCompressedOops && is_reference_type(basic_elem_type); } else { // Instance fields which contains a compressed oop references. ciInstanceKlass* ik = klass()->as_instance_klass(); diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index 14c0e417ba5..e315257ef15 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -4194,7 +4194,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { JVMCICompiler* compiler = JVMCICompiler::instance(true, CATCH); compiler->bootstrap(THREAD); if (HAS_PENDING_EXCEPTION) { - HandleMark hm; + HandleMark hm(THREAD); vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); } } @@ -4228,7 +4228,7 @@ static jint JNI_CreateJavaVM_inner(JavaVM **vm, void **penv, void *args) { // otherwise no pending exception possible - VM will already have aborted JavaThread* THREAD = JavaThread::current(); if (HAS_PENDING_EXCEPTION) { - HandleMark hm; + HandleMark hm(THREAD); vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION)); } } diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index 3216ba76807..560f2791023 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -539,7 +539,15 @@ JVM_ENTRY_NO_ENV(jint, JVM_ActiveProcessorCount(void)) return os::active_processor_count(); JVM_END - +JVM_ENTRY_NO_ENV(jboolean, JVM_IsUseContainerSupport(void)) + JVMWrapper("JVM_IsUseContainerSupport"); +#ifdef LINUX + if (UseContainerSupport) { + return JNI_TRUE; + } +#endif + return JNI_FALSE; +JVM_END // java.lang.Throwable ////////////////////////////////////////////////////// diff --git a/src/hotspot/share/prims/jvmtiEnv.cpp b/src/hotspot/share/prims/jvmtiEnv.cpp index 08c04883233..b326958cabd 100644 --- a/src/hotspot/share/prims/jvmtiEnv.cpp +++ b/src/hotspot/share/prims/jvmtiEnv.cpp @@ -660,7 +660,7 @@ JvmtiEnv::AddToBootstrapClassLoaderSearch(const char* segment) { // lock the loader Thread* thread = Thread::current(); - HandleMark hm; + HandleMark hm(thread); Handle loader_lock = Handle(thread, SystemDictionary::system_loader_lock()); ObjectLocker ol(loader_lock, thread); @@ -697,7 +697,8 @@ JvmtiEnv::AddToSystemClassLoaderSearch(const char* segment) { // The phase is checked by the wrapper that called this function, // but this thread could be racing with the thread that is // terminating the VM so we check one more time. - HandleMark hm; + Thread* THREAD = Thread::current(); + HandleMark hm(THREAD); // create the zip entry (which will open the zip file and hence // check that the segment is indeed a zip file). @@ -708,9 +709,7 @@ JvmtiEnv::AddToSystemClassLoaderSearch(const char* segment) { delete zip_entry; // no longer needed // lock the loader - Thread* THREAD = Thread::current(); Handle loader = Handle(THREAD, SystemDictionary::java_system_loader()); - ObjectLocker ol(loader, THREAD); // need the path as java.lang.String @@ -915,11 +914,12 @@ jvmtiError JvmtiEnv::GetAllThreads(jint* threads_count_ptr, jthread** threads_ptr) { int nthreads = 0; Handle *thread_objs = NULL; - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); // enumerate threads (including agent threads) - ThreadsListEnumerator tle(Thread::current(), true); + ThreadsListEnumerator tle(current_thread, true); nthreads = tle.num_threads(); *threads_count_ptr = nthreads; @@ -1125,10 +1125,10 @@ JvmtiEnv::InterruptThread(jthread thread) { // info_ptr - pre-checked for NULL jvmtiError JvmtiEnv::GetThreadInfo(jthread thread, jvmtiThreadInfo* info_ptr) { - ResourceMark rm; - HandleMark hm; - JavaThread* current_thread = JavaThread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); + ThreadsListHandle tlh(current_thread); // if thread is NULL the current thread is used @@ -1400,10 +1400,9 @@ JvmtiEnv::GetTopThreadGroups(jint* group_count_ptr, jthreadGroup** groups_ptr) { // info_ptr - pre-checked for NULL jvmtiError JvmtiEnv::GetThreadGroupInfo(jthreadGroup group, jvmtiThreadGroupInfo* info_ptr) { - ResourceMark rm; - HandleMark hm; - - JavaThread* current_thread = JavaThread::current(); + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); Handle group_obj (current_thread, JNIHandles::resolve_external_guard(group)); NULL_CHECK(group_obj(), JVMTI_ERROR_INVALID_THREAD_GROUP); @@ -3183,7 +3182,6 @@ jvmtiError JvmtiEnv::GetBytecodes(Method* method, jint* bytecode_count_ptr, unsigned char** bytecodes_ptr) { NULL_CHECK(method, JVMTI_ERROR_INVALID_METHODID); - HandleMark hm; methodHandle mh(Thread::current(), method); jint size = (jint)mh->code_size(); jvmtiError err = allocate(size, bytecodes_ptr); diff --git a/src/hotspot/share/prims/jvmtiEnvBase.cpp b/src/hotspot/share/prims/jvmtiEnvBase.cpp index f4f046dc2de..2d7e9dde5df 100644 --- a/src/hotspot/share/prims/jvmtiEnvBase.cpp +++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp @@ -622,8 +622,9 @@ JvmtiEnvBase::count_locked_objects(JavaThread *java_thread, Handle hobj) { return ret; // no Java frames so no monitors } - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); RegisterMap reg_map(java_thread); for(javaVFrame *jvf=java_thread->last_java_vframe(®_map); jvf != NULL; @@ -675,7 +676,7 @@ JvmtiEnvBase::get_current_contended_monitor(JavaThread *calling_thread, JavaThre if (obj == NULL) { *monitor_ptr = NULL; } else { - HandleMark hm; + HandleMark hm(current_jt); Handle hobj(current_jt, obj); *monitor_ptr = jni_reference(calling_thread, hobj); } @@ -693,8 +694,8 @@ JvmtiEnvBase::get_owned_monitors(JavaThread *calling_thread, JavaThread* java_th "call by myself or at direct handshake"); if (java_thread->has_last_Java_frame()) { - ResourceMark rm; - HandleMark hm; + ResourceMark rm(current_jt); + HandleMark hm(current_jt); RegisterMap reg_map(java_thread); int depth = 0; @@ -723,8 +724,9 @@ jvmtiError JvmtiEnvBase::get_locked_objects_in_frame(JavaThread* calling_thread, JavaThread* java_thread, javaVFrame *jvf, GrowableArray* owned_monitors_list, jint stack_depth) { jvmtiError err = JVMTI_ERROR_NONE; - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); GrowableArray* mons = jvf->monitors(); if (mons->is_empty()) { diff --git a/src/hotspot/share/prims/jvmtiExport.cpp b/src/hotspot/share/prims/jvmtiExport.cpp index 1b23f3b1560..32c30d3d761 100644 --- a/src/hotspot/share/prims/jvmtiExport.cpp +++ b/src/hotspot/share/prims/jvmtiExport.cpp @@ -105,7 +105,7 @@ class JvmtiThreadEventTransition : StackObj { JavaThread *_jthread; public: - JvmtiThreadEventTransition(Thread *thread) : _rm(), _hm() { + JvmtiThreadEventTransition(Thread *thread) : _rm(), _hm(thread) { if (thread->is_Java_thread()) { _jthread = (JavaThread *)thread; _saved_state = _jthread->thread_state(); diff --git a/src/hotspot/share/prims/jvmtiImpl.cpp b/src/hotspot/share/prims/jvmtiImpl.cpp index 362ec73b9ea..45633ccc255 100644 --- a/src/hotspot/share/prims/jvmtiImpl.cpp +++ b/src/hotspot/share/prims/jvmtiImpl.cpp @@ -697,7 +697,8 @@ void VM_GetOrSetLocal::doit() { return; } StackValueCollection *locals = _jvf->locals(); - HandleMark hm; + Thread* current_thread = Thread::current(); + HandleMark hm(current_thread); switch (_type) { case T_INT: locals->set_int_at (_index, _value.i); break; @@ -706,7 +707,7 @@ void VM_GetOrSetLocal::doit() { case T_DOUBLE: locals->set_double_at(_index, _value.d); break; case T_OBJECT: case T_INLINE_TYPE: { - Handle ob_h(Thread::current(), JNIHandles::resolve_external_guard(_value.l)); + Handle ob_h(current_thread, JNIHandles::resolve_external_guard(_value.l)); locals->set_obj_at (_index, ob_h); break; } diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp index d88cdaf7c54..33d0ca3f409 100644 --- a/src/hotspot/share/prims/jvmtiTagMap.cpp +++ b/src/hotspot/share/prims/jvmtiTagMap.cpp @@ -841,7 +841,6 @@ void ClassFieldMap::add(int index, char type, int offset) { // of the given class. // ClassFieldMap* ClassFieldMap::create_map_of_static_fields(Klass* k) { - HandleMark hm; InstanceKlass* ik = InstanceKlass::cast(k); // create the field map @@ -866,7 +865,6 @@ ClassFieldMap* ClassFieldMap::create_map_of_static_fields(Klass* k) { // and private fields declared in superclasses and superinterfaces too). // ClassFieldMap* ClassFieldMap::create_map_of_instance_fields(oop obj) { - HandleMark hm; InstanceKlass* ik = InstanceKlass::cast(obj->klass()); // create the field map diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index a2f158a94c4..0716f4599f1 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -2125,11 +2125,11 @@ WB_ENTRY(jint, WB_HandshakeWalkStack(JNIEnv* env, jobject wb, jobject thread_han } public: - TraceSelfClosure() : HandshakeClosure("WB_TraceSelf"), _num_threads_completed(0) {} + TraceSelfClosure(Thread* thread) : HandshakeClosure("WB_TraceSelf"), _num_threads_completed(0) {} jint num_threads_completed() const { return _num_threads_completed; } }; - TraceSelfClosure tsc; + TraceSelfClosure tsc(Thread::current()); if (all_threads) { Handshake::execute(&tsc); diff --git a/src/hotspot/share/runtime/deoptimization.cpp b/src/hotspot/share/runtime/deoptimization.cpp index 563e501c0d7..95ab6f26ca0 100644 --- a/src/hotspot/share/runtime/deoptimization.cpp +++ b/src/hotspot/share/runtime/deoptimization.cpp @@ -241,7 +241,8 @@ static bool eliminate_allocations(JavaThread* thread, int exec_mode, CompiledMet } static void eliminate_locks(JavaThread* thread, GrowableArray* chunk, bool realloc_failures) { - HandleMark hm; + assert(thread == Thread::current(), "should be"); + HandleMark hm(thread); #ifndef PRODUCT bool first = true; #endif @@ -661,7 +662,7 @@ JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_m // but makes the entry a little slower. There is however a little dance we have to // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro ResetNoHandleMark rnhm; // No-op in release/product versions - HandleMark hm; + HandleMark hm(thread); frame stub_frame = thread->last_frame(); @@ -1621,8 +1622,9 @@ void Deoptimization::revoke_from_deopt_handler(JavaThread* thread, frame fr, Reg if (!UseBiasedLocking) { return; } - ResourceMark rm; - HandleMark hm; + assert(thread == Thread::current(), "should be"); + ResourceMark rm(thread); + HandleMark hm(thread); GrowableArray* objects_to_revoke = new GrowableArray(); get_monitors_from_stack(objects_to_revoke, thread, fr, map); @@ -1854,7 +1856,7 @@ static void post_deoptimization_event(CompiledMethod* nm, #endif // INCLUDE_JFR JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) { - HandleMark hm; + HandleMark hm(thread); // uncommon_trap() is called at the beginning of the uncommon trap // handler. Note this fact before we start generating temporary frames diff --git a/src/hotspot/share/runtime/handles.cpp b/src/hotspot/share/runtime/handles.cpp index 77bf47aa81c..c89fb29ef17 100644 --- a/src/hotspot/share/runtime/handles.cpp +++ b/src/hotspot/share/runtime/handles.cpp @@ -118,7 +118,7 @@ void HandleArea::oops_do(OopClosure* f) { } void HandleMark::initialize(Thread* thread) { - _thread = thread; + _thread = thread; // Not the current thread during thread creation. // Save area _area = thread->handle_area(); // Save current top diff --git a/src/hotspot/share/runtime/handles.hpp b/src/hotspot/share/runtime/handles.hpp index d49b07cbc79..0e00b7b1337 100644 --- a/src/hotspot/share/runtime/handles.hpp +++ b/src/hotspot/share/runtime/handles.hpp @@ -230,7 +230,7 @@ class HandleArea: public Arena { // // Handle h; // { -// HandleMark hm; +// HandleMark hm(THREAD); // h = Handle(THREAD, obj); // } // h()->print(); // WRONG, h destroyed by HandleMark destructor. @@ -259,7 +259,6 @@ class HandleMark { // remove all chunks beginning with the next void chop_later_chunks(); public: - HandleMark(); // see handles_inline.hpp HandleMark(Thread* thread) { initialize(thread); } ~HandleMark(); diff --git a/src/hotspot/share/runtime/handles.inline.hpp b/src/hotspot/share/runtime/handles.inline.hpp index 2fad37f4e5d..7a690abf294 100644 --- a/src/hotspot/share/runtime/handles.inline.hpp +++ b/src/hotspot/share/runtime/handles.inline.hpp @@ -68,10 +68,6 @@ inline name##Handle::name##Handle(Thread* thread, type* obj) : _value(obj), _thr DEF_METADATA_HANDLE_FN(method, Method) DEF_METADATA_HANDLE_FN(constantPool, ConstantPool) -inline HandleMark::HandleMark() { - initialize(Thread::current()); -} - inline void HandleMark::push() { // This is intentionally a NOP. pop_and_restore will reset // values to the HandleMark further down the stack, typically diff --git a/src/hotspot/share/runtime/init.cpp b/src/hotspot/share/runtime/init.cpp index d2c844f2bb4..905a1259528 100644 --- a/src/hotspot/share/runtime/init.cpp +++ b/src/hotspot/share/runtime/init.cpp @@ -105,7 +105,6 @@ void vm_init_globals() { jint init_globals() { - HandleMark hm; management_init(); bytecodes_init(); classLoader_init1(); diff --git a/src/hotspot/share/runtime/java.cpp b/src/hotspot/share/runtime/java.cpp index ff7fb366e6f..dc1bc43a4ad 100644 --- a/src/hotspot/share/runtime/java.cpp +++ b/src/hotspot/share/runtime/java.cpp @@ -111,7 +111,6 @@ void print_method_profiling_data() { if (ProfileInterpreter COMPILER1_PRESENT(|| C1UpdateMethodData) && (PrintMethodData || CompilerOracle::should_print_methods())) { ResourceMark rm; - HandleMark hm; collected_profiled_methods = new GrowableArray(1024); SystemDictionary::methods_do(collect_profiled_methods); collected_profiled_methods->sort(&compare_methods); @@ -158,7 +157,6 @@ void collect_invoked_methods(Method* m) { void print_method_invocation_histogram() { ResourceMark rm; - HandleMark hm; collected_invoked_methods = new GrowableArray(1024); SystemDictionary::methods_do(collect_invoked_methods); collected_invoked_methods->sort(&compare_methods); diff --git a/src/hotspot/share/runtime/safepointMechanism.hpp b/src/hotspot/share/runtime/safepointMechanism.hpp index 084dd137727..81448747800 100644 --- a/src/hotspot/share/runtime/safepointMechanism.hpp +++ b/src/hotspot/share/runtime/safepointMechanism.hpp @@ -73,8 +73,6 @@ class SafepointMechanism : public AllStatic { static inline void arm_local_poll(JavaThread* thread); // Release semantics static inline void arm_local_poll_release(JavaThread* thread); - // Optional release - static inline void disarm_if_needed(JavaThread* thread, bool memory_order_release); // Setup the selected safepoint mechanism static void initialize(); diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index 33af3135ec2..6659c62c014 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -3271,8 +3271,10 @@ oop JavaThread::current_park_blocker() { void JavaThread::print_stack_on(outputStream* st) { if (!has_last_Java_frame()) return; - ResourceMark rm; - HandleMark hm; + + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); RegisterMap reg_map(this); vframe* start_vf = last_java_vframe(®_map); @@ -3401,8 +3403,9 @@ void JavaThread::trace_stack_from(vframe* start_vf) { void JavaThread::trace_stack() { if (!has_last_Java_frame()) return; - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); RegisterMap reg_map(this); trace_stack_from(last_java_vframe(®_map)); } @@ -3925,8 +3928,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { // Should be done after the heap is fully created main_thread->cache_global_variables(); - HandleMark hm; - { MutexLocker mu(Threads_lock); Threads::add(main_thread); } @@ -3973,6 +3974,7 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) { Arguments::update_vm_info_property(VM_Version::vm_info_string()); Thread* THREAD = Thread::current(); + HandleMark hm(THREAD); // Always call even when there are not JVMTI environments yet, since environments // may be attached late and JVMTI must track phases of VM execution diff --git a/src/hotspot/share/runtime/vframe.cpp b/src/hotspot/share/runtime/vframe.cpp index 1eac52a5094..7e3fcf27aa7 100644 --- a/src/hotspot/share/runtime/vframe.cpp +++ b/src/hotspot/share/runtime/vframe.cpp @@ -518,7 +518,6 @@ void vframeStreamCommon::security_next() { void vframeStreamCommon::skip_prefixed_method_and_wrappers() { ResourceMark rm; - HandleMark hm; int method_prefix_count = 0; char** method_prefixes = JvmtiExport::get_all_native_method_prefixes(&method_prefix_count); @@ -627,8 +626,9 @@ static void print_stack_values(const char* title, StackValueCollection* values) void javaVFrame::print() { - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); vframe::print(); tty->print("\t"); diff --git a/src/hotspot/share/runtime/vframeArray.cpp b/src/hotspot/share/runtime/vframeArray.cpp index cbfe4b1289b..759bb82445e 100644 --- a/src/hotspot/share/runtime/vframeArray.cpp +++ b/src/hotspot/share/runtime/vframeArray.cpp @@ -71,8 +71,10 @@ void vframeArrayElement::fill_in(compiledVFrame* vf, bool realloc_failures) { int index; { - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); + // Get the monitors off-stack GrowableArray* list = vf->monitors(); diff --git a/src/hotspot/share/services/attachListener.cpp b/src/hotspot/share/services/attachListener.cpp index c234b7aa734..ce189b3a92a 100644 --- a/src/hotspot/share/services/attachListener.cpp +++ b/src/hotspot/share/services/attachListener.cpp @@ -63,7 +63,7 @@ static InstanceKlass* load_and_initialize_klass(Symbol* sh, TRAPS) { static jint get_properties(AttachOperation* op, outputStream* out, Symbol* serializePropertiesMethod) { Thread* THREAD = Thread::current(); - HandleMark hm; + HandleMark hm(THREAD); // load VMSupport Symbol* klass = vmSymbols::jdk_internal_vm_VMSupport(); diff --git a/src/hotspot/share/services/heapDumper.cpp b/src/hotspot/share/services/heapDumper.cpp index fee88404f1f..fdc538b092f 100644 --- a/src/hotspot/share/services/heapDumper.cpp +++ b/src/hotspot/share/services/heapDumper.cpp @@ -830,7 +830,6 @@ void DumperSupport::dump_field_value(DumpWriter* writer, char type, oop obj, int // returns the size of the instance of the given class u4 DumperSupport::instance_size(Klass* k) { - HandleMark hm; InstanceKlass* ik = InstanceKlass::cast(k); u4 size = 0; @@ -843,7 +842,6 @@ u4 DumperSupport::instance_size(Klass* k) { } u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) { - HandleMark hm; field_count = 0; u4 size = 0; @@ -886,7 +884,6 @@ u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) { // dumps static fields of the given class void DumperSupport::dump_static_fields(DumpWriter* writer, Klass* k) { - HandleMark hm; InstanceKlass* ik = InstanceKlass::cast(k); // dump the field descriptors and raw values @@ -930,7 +927,6 @@ void DumperSupport::dump_static_fields(DumpWriter* writer, Klass* k) { // dump the raw values of the instance fields of the given object void DumperSupport::dump_instance_fields(DumpWriter* writer, oop o) { - HandleMark hm; InstanceKlass* ik = InstanceKlass::cast(o->klass()); for (FieldStream fld(ik, false, false); !fld.eos(); fld.next()) { @@ -943,7 +939,6 @@ void DumperSupport::dump_instance_fields(DumpWriter* writer, oop o) { // dumps the definition of the instance fields for a given class u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) { - HandleMark hm; u2 field_count = 0; for (FieldStream fldc(ik, true, true); !fldc.eos(); fldc.next()) { @@ -955,7 +950,6 @@ u2 DumperSupport::get_instance_fields_count(InstanceKlass* ik) { // dumps the definition of the instance fields for a given class void DumperSupport::dump_instance_field_descriptors(DumpWriter* writer, Klass* k) { - HandleMark hm; InstanceKlass* ik = InstanceKlass::cast(k); // dump the field descriptors @@ -1777,7 +1771,6 @@ void VM_HeapDumper::do_threads() { void VM_HeapDumper::doit() { - HandleMark hm; CollectedHeap* ch = Universe::heap(); ch->ensure_parsability(false); // must happen, even if collection does diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index e714bddb534..53382e2cb6e 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -566,8 +566,9 @@ StackFrameInfo::StackFrameInfo(javaVFrame* jvf, bool with_lock_info) { _class_holder = OopHandle(Universe::vm_global(), _method->method_holder()->klass_holder()); _locked_monitors = NULL; if (with_lock_info) { - ResourceMark rm; - HandleMark hm; + Thread* current_thread = Thread::current(); + ResourceMark rm(current_thread); + HandleMark hm(current_thread); GrowableArray* list = jvf->locked_monitors(); int length = list->length(); if (length > 0) { diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp index c20c1c7de94..3b96f9cc5b0 100644 --- a/src/hotspot/share/utilities/debug.cpp +++ b/src/hotspot/share/utilities/debug.cpp @@ -355,8 +355,6 @@ void report_java_out_of_memory(const char* message) { class Command : public StackObj { private: ResourceMark rm; - ResetNoHandleMark rnhm; - HandleMark hm; bool debug_save; public: static int level; diff --git a/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java b/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java index fb42229f42c..12cb4b04444 100644 --- a/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java +++ b/src/java.base/linux/classes/jdk/internal/platform/CgroupMetrics.java @@ -160,7 +160,13 @@ public long getBlkIOServiced() { } public static Metrics getInstance() { + if (!isUseContainerSupport()) { + // Return null on -XX:-UseContainerSupport + return null; + } return CgroupSubsystemFactory.create(); } -} \ No newline at end of file + private static native boolean isUseContainerSupport(); + +} diff --git a/src/java.base/linux/native/libjava/CgroupMetrics.c b/src/java.base/linux/native/libjava/CgroupMetrics.c new file mode 100644 index 00000000000..8c9a9dd7a7e --- /dev/null +++ b/src/java.base/linux/native/libjava/CgroupMetrics.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2020, Red Hat, Inc. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "jni.h" +#include "jvm.h" + +#include "jdk_internal_platform_CgroupMetrics.h" + +JNIEXPORT jboolean JNICALL +Java_jdk_internal_platform_CgroupMetrics_isUseContainerSupport(JNIEnv *env, jclass ignored) +{ + return JVM_IsUseContainerSupport(); +} diff --git a/src/java.base/share/classes/java/lang/reflect/AnnotatedArrayType.java b/src/java.base/share/classes/java/lang/reflect/AnnotatedArrayType.java index d9b8223c758..fa9ccc03686 100644 --- a/src/java.base/share/classes/java/lang/reflect/AnnotatedArrayType.java +++ b/src/java.base/share/classes/java/lang/reflect/AnnotatedArrayType.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,6 +31,7 @@ * array type, whose component type may itself represent the annotated use of a * type. * + * @jls 10.1 Array Types * @since 1.8 */ public interface AnnotatedArrayType extends AnnotatedType { diff --git a/src/java.base/share/classes/java/lang/reflect/AnnotatedParameterizedType.java b/src/java.base/share/classes/java/lang/reflect/AnnotatedParameterizedType.java index b42530ef82e..fc32e910116 100644 --- a/src/java.base/share/classes/java/lang/reflect/AnnotatedParameterizedType.java +++ b/src/java.base/share/classes/java/lang/reflect/AnnotatedParameterizedType.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ * of a parameterized type, whose type arguments may themselves represent * annotated uses of types. * + * @jls 4.5 Parameterized Types * @since 1.8 */ public interface AnnotatedParameterizedType extends AnnotatedType { diff --git a/src/java.base/share/classes/java/lang/reflect/AnnotatedType.java b/src/java.base/share/classes/java/lang/reflect/AnnotatedType.java index a22e5c26336..662cdd42e36 100644 --- a/src/java.base/share/classes/java/lang/reflect/AnnotatedType.java +++ b/src/java.base/share/classes/java/lang/reflect/AnnotatedType.java @@ -37,6 +37,14 @@ * type annotations (JLS {@jls 9.7.4}) as the entity being * potentially annotated is a type. * + * @jls 4.1 The Kinds of Types and Values + * @jls 4.2 Primitive Types and Values + * @jls 4.3 Reference Types and Values + * @jls 4.4 Type Variables + * @jls 4.5 Parameterized Types + * @jls 4.8 Raw Types + * @jls 4.9 Intersection Types + * @jls 10.1 Array Types * @since 1.8 */ public interface AnnotatedType extends AnnotatedElement { diff --git a/src/java.base/share/classes/java/lang/reflect/AnnotatedTypeVariable.java b/src/java.base/share/classes/java/lang/reflect/AnnotatedTypeVariable.java index cab83f361c3..4d94bd38aec 100644 --- a/src/java.base/share/classes/java/lang/reflect/AnnotatedTypeVariable.java +++ b/src/java.base/share/classes/java/lang/reflect/AnnotatedTypeVariable.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ * type variable, whose declaration may have bounds which themselves represent * annotated uses of types. * + * @jls 4.4 Type Variables * @since 1.8 */ public interface AnnotatedTypeVariable extends AnnotatedType { diff --git a/src/java.base/share/classes/java/lang/reflect/AnnotatedWildcardType.java b/src/java.base/share/classes/java/lang/reflect/AnnotatedWildcardType.java index d46d269d343..8a211eab6bf 100644 --- a/src/java.base/share/classes/java/lang/reflect/AnnotatedWildcardType.java +++ b/src/java.base/share/classes/java/lang/reflect/AnnotatedWildcardType.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,7 @@ * wildcard type argument, whose upper or lower bounds may themselves represent * annotated uses of types. * + * @jls 4.5.1 Type Arguments of Parameterized Types * @since 1.8 */ public interface AnnotatedWildcardType extends AnnotatedType { diff --git a/src/java.base/share/classes/java/lang/reflect/GenericArrayType.java b/src/java.base/share/classes/java/lang/reflect/GenericArrayType.java index c45399e7a24..ad4e047f748 100644 --- a/src/java.base/share/classes/java/lang/reflect/GenericArrayType.java +++ b/src/java.base/share/classes/java/lang/reflect/GenericArrayType.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,8 @@ /** * {@code GenericArrayType} represents an array type whose component * type is either a parameterized type or a type variable. + * + * @jls 10.1 Array Types * @since 1.5 */ public interface GenericArrayType extends Type { diff --git a/src/java.base/share/classes/java/lang/reflect/ParameterizedType.java b/src/java.base/share/classes/java/lang/reflect/ParameterizedType.java index a1b139894ed..62d6b7bf9e5 100644 --- a/src/java.base/share/classes/java/lang/reflect/ParameterizedType.java +++ b/src/java.base/share/classes/java/lang/reflect/ParameterizedType.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ /** * ParameterizedType represents a parameterized type such as - * Collection<String>. + * {@code Collection}. * *

A parameterized type is created the first time it is needed by a * reflective method, as specified in this package. When a @@ -42,6 +42,7 @@ * an equals() method that equates any two instances that share the * same generic type declaration and have equal type parameters. * + * @jls 4.5 Parameterized Types * @since 1.5 */ public interface ParameterizedType extends Type { diff --git a/src/java.base/share/classes/java/lang/reflect/Proxy.java b/src/java.base/share/classes/java/lang/reflect/Proxy.java index 1a2d7f668ab..2f2ec3bba34 100644 --- a/src/java.base/share/classes/java/lang/reflect/Proxy.java +++ b/src/java.base/share/classes/java/lang/reflect/Proxy.java @@ -674,12 +674,6 @@ private static void validateProxyInterfaces(ClassLoader loader, { Map, Boolean> interfaceSet = new IdentityHashMap<>(interfaces.size()); for (Class intf : interfaces) { - /* - * Verify that the class loader resolves the name of this - * interface to the same Class object. - */ - ensureVisible(loader, intf); - /* * Verify that the Class object actually represents an * interface. @@ -688,6 +682,16 @@ private static void validateProxyInterfaces(ClassLoader loader, throw new IllegalArgumentException(intf.getName() + " is not an interface"); } + if (intf.isHidden()) { + throw new IllegalArgumentException(intf.getName() + " is a hidden interface"); + } + + /* + * Verify that the class loader resolves the name of this + * interface to the same Class object. + */ + ensureVisible(loader, intf); + /* * Verify that this interface is not a duplicate. */ @@ -905,7 +909,8 @@ private static Module getDynamicModule(ClassLoader loader) { * if any of the following restrictions is violated: *